From 59af8fc84ff7ccbfa6418ef131f85b3577642d27 Mon Sep 17 00:00:00 2001 From: Derek McQuay Date: Tue, 10 Apr 2018 19:17:26 -0700 Subject: [PATCH] initial commit Signed-off-by: Derek McQuay --- Gopkg.lock | 99 + Gopkg.toml | 54 + LICENSE | 8 + README.md | 6 + cmd/hdxd/main.go | 116 + db.go | 53 + ip.go | 77 + routes.go | 19 + server.go | 110 + sql.go | 10 + vendor/cloud.google.com/go/.travis.yml | 21 + vendor/cloud.google.com/go/AUTHORS | 15 + vendor/cloud.google.com/go/CONTRIBUTING.md | 152 + vendor/cloud.google.com/go/CONTRIBUTORS | 40 + vendor/cloud.google.com/go/LICENSE | 202 + vendor/cloud.google.com/go/MIGRATION.md | 54 + vendor/cloud.google.com/go/README.md | 593 + vendor/cloud.google.com/go/appveyor.yml | 32 + .../cloud.google.com/go/authexample_test.go | 49 + .../go/bigquery/benchmarks/README.md | 8 + .../go/bigquery/benchmarks/bench.go | 85 + .../go/bigquery/benchmarks/queries.json | 10 + .../cloud.google.com/go/bigquery/bigquery.go | 161 + vendor/cloud.google.com/go/bigquery/copy.go | 106 + .../cloud.google.com/go/bigquery/copy_test.go | 165 + .../cloud.google.com/go/bigquery/dataset.go | 518 + .../go/bigquery/dataset_test.go | 328 + .../apiv1/ListDataSources_smoke_test.go | 67 + .../apiv1/data_transfer_client.go | 601 + .../data_transfer_client_example_test.go | 288 + .../go/bigquery/datatransfer/apiv1/doc.go | 47 + .../bigquery/datatransfer/apiv1/mock_test.go | 1146 ++ .../bigquery/datatransfer/apiv1/path_funcs.go | 135 + vendor/cloud.google.com/go/bigquery/doc.go | 303 + vendor/cloud.google.com/go/bigquery/error.go | 82 + .../go/bigquery/error_test.go | 110 + .../go/bigquery/examples_test.go | 829 + .../cloud.google.com/go/bigquery/external.go | 398 + .../go/bigquery/external_test.go | 143 + .../cloud.google.com/go/bigquery/extract.go | 109 + .../go/bigquery/extract_test.go | 118 + vendor/cloud.google.com/go/bigquery/file.go | 135 + .../cloud.google.com/go/bigquery/file_test.go | 98 + vendor/cloud.google.com/go/bigquery/gcs.go | 73 + .../go/bigquery/integration_test.go | 1858 ++ .../cloud.google.com/go/bigquery/iterator.go | 215 + .../go/bigquery/iterator_test.go | 363 + vendor/cloud.google.com/go/bigquery/job.go | 711 + .../cloud.google.com/go/bigquery/job_test.go | 95 + vendor/cloud.google.com/go/bigquery/load.go | 141 + .../cloud.google.com/go/bigquery/load_test.go | 260 + vendor/cloud.google.com/go/bigquery/nulls.go | 299 + .../go/bigquery/nulls_test.go | 73 + .../cloud.google.com/go/bigquery/oc_test.go | 40 + vendor/cloud.google.com/go/bigquery/params.go | 346 + .../go/bigquery/params_test.go | 361 + vendor/cloud.google.com/go/bigquery/query.go | 298 + .../go/bigquery/query_test.go | 402 + .../cloud.google.com/go/bigquery/read_test.go | 235 + vendor/cloud.google.com/go/bigquery/schema.go | 387 + .../go/bigquery/schema_test.go | 897 + vendor/cloud.google.com/go/bigquery/table.go | 531 + .../go/bigquery/table_test.go | 295 + .../cloud.google.com/go/bigquery/uploader.go | 231 + .../go/bigquery/uploader_test.go | 211 + vendor/cloud.google.com/go/bigquery/value.go | 835 + .../go/bigquery/value_test.go | 1166 ++ vendor/cloud.google.com/go/bigtable/admin.go | 885 + .../go/bigtable/admin_test.go | 433 + .../cloud.google.com/go/bigtable/bigtable.go | 884 + .../go/bigtable/bigtable_test.go | 1163 ++ .../go/bigtable/bttest/example_test.go | 83 + .../go/bigtable/bttest/inmem.go | 1316 ++ .../go/bigtable/bttest/inmem_test.go | 718 + .../go/bigtable/cmd/cbt/cbt.go | 1323 ++ .../go/bigtable/cmd/cbt/cbt_test.go | 113 + .../go/bigtable/cmd/cbt/cbtdoc.go | 316 + .../go/bigtable/cmd/emulator/cbtemulator.go | 44 + .../go/bigtable/cmd/loadtest/loadtest.go | 205 + .../go/bigtable/cmd/scantest/scantest.go | 155 + vendor/cloud.google.com/go/bigtable/doc.go | 125 + .../go/bigtable/export_test.go | 222 + vendor/cloud.google.com/go/bigtable/filter.go | 317 + vendor/cloud.google.com/go/bigtable/gc.go | 158 + .../cloud.google.com/go/bigtable/gc_test.go | 46 + vendor/cloud.google.com/go/bigtable/go18.go | 68 + .../bigtable/internal/cbtconfig/cbtconfig.go | 246 + .../go/bigtable/internal/gax/call_option.go | 106 + .../go/bigtable/internal/gax/invoke.go | 84 + .../go/bigtable/internal/gax/invoke_test.go | 49 + .../go/bigtable/internal/option/option.go | 48 + .../go/bigtable/internal/stat/stats.go | 144 + .../cloud.google.com/go/bigtable/not_go18.go | 36 + vendor/cloud.google.com/go/bigtable/reader.go | 250 + .../go/bigtable/reader_test.go | 344 + .../go/bigtable/retry_test.go | 372 + .../testdata/read-rows-acceptance-test.json | 1178 ++ vendor/cloud.google.com/go/civil/civil.go | 277 + .../cloud.google.com/go/civil/civil_test.go | 442 + vendor/cloud.google.com/go/cloud.go | 40 + .../go/cmd/go-cloud-debug-agent/debuglet.go | 450 + .../internal/breakpoints/breakpoints.go | 174 + .../internal/breakpoints/breakpoints_test.go | 168 + .../internal/controller/client.go | 291 + .../internal/controller/client_test.go | 254 + .../internal/valuecollector/valuecollector.go | 460 + .../valuecollector/valuecollector_test.go | 418 + .../go/compute/metadata/metadata.go | 437 + .../go/compute/metadata/metadata_test.go | 48 + .../apiv1/ListClusters_smoke_test.go | 68 + .../container/apiv1/cluster_manager_client.go | 674 + .../cluster_manager_client_example_test.go | 571 + .../go/container/apiv1/doc.go | 48 + .../go/container/apiv1/mock_test.go | 2912 +++ .../go/container/container.go | 272 + .../dataproc/apiv1/ListClusters_smoke_test.go | 69 + .../apiv1/cluster_controller_client.go | 593 + .../cluster_controller_client_example_test.go | 160 + .../cloud.google.com/go/dataproc/apiv1/doc.go | 46 + .../dataproc/apiv1/job_controller_client.go | 285 + .../job_controller_client_example_test.go | 146 + .../go/dataproc/apiv1/mock_test.go | 1196 ++ .../cloud.google.com/go/datastore/client.go | 118 + .../go/datastore/datastore.go | 627 + .../go/datastore/datastore_test.go | 3493 ++++ vendor/cloud.google.com/go/datastore/doc.go | 491 + .../cloud.google.com/go/datastore/errors.go | 47 + .../go/datastore/example_test.go | 567 + .../go/datastore/integration_test.go | 1277 ++ vendor/cloud.google.com/go/datastore/key.go | 280 + .../cloud.google.com/go/datastore/key_test.go | 210 + vendor/cloud.google.com/go/datastore/load.go | 512 + .../go/datastore/load_test.go | 886 + .../cloud.google.com/go/datastore/mutation.go | 129 + .../go/datastore/mutation_test.go | 150 + .../cloud.google.com/go/datastore/oc_test.go | 45 + vendor/cloud.google.com/go/datastore/prop.go | 342 + vendor/cloud.google.com/go/datastore/query.go | 784 + .../go/datastore/query_test.go | 547 + vendor/cloud.google.com/go/datastore/save.go | 462 + .../go/datastore/save_test.go | 285 + .../go/datastore/testdata/index.yaml | 41 + vendor/cloud.google.com/go/datastore/time.go | 36 + .../go/datastore/time_test.go | 75 + .../go/datastore/transaction.go | 409 + .../go/datastore/transaction_test.go | 78 + .../go/debugger/apiv2/controller2_client.go | 215 + .../apiv2/controller2_client_example_test.go | 87 + .../go/debugger/apiv2/debugger2_client.go | 211 + .../apiv2/debugger2_client_example_test.go | 121 + .../cloud.google.com/go/debugger/apiv2/doc.go | 50 + .../go/debugger/apiv2/mock_test.go | 693 + .../go/dlp/apiv2/dlp_client.go | 812 + .../go/dlp/apiv2/dlp_client_example_test.go | 498 + vendor/cloud.google.com/go/dlp/apiv2/doc.go | 47 + .../go/dlp/apiv2/mock_test.go | 1902 ++ .../apiv2beta1/InspectContent_smoke_test.go | 79 + .../go/dlp/apiv2beta1/dlp_client.go | 429 + .../dlp/apiv2beta1/dlp_client_example_test.go | 187 + .../cloud.google.com/go/dlp/apiv2beta1/doc.go | 48 + .../go/dlp/apiv2beta1/mock_test.go | 844 + .../go/dlp/apiv2beta1/path_funcs.go | 27 + .../apiv1beta1/ReportErrorEvent_smoke_test.go | 88 + .../go/errorreporting/apiv1beta1/doc.go | 50 + .../apiv1beta1/error_group_client.go | 151 + .../error_group_client_example_test.go | 69 + .../apiv1beta1/error_stats_client.go | 293 + .../error_stats_client_example_test.go | 100 + .../go/errorreporting/apiv1beta1/mock_test.go | 587 + .../errorreporting/apiv1beta1/path_funcs.go | 51 + .../apiv1beta1/report_errors_client.go | 122 + .../report_errors_client_example_test.go | 51 + .../go/errorreporting/errors.go | 230 + .../go/errorreporting/errors_test.go | 113 + .../go/errorreporting/example_test.go | 49 + .../go/errorreporting/stack_test.go | 56 + vendor/cloud.google.com/go/firestore/Makefile | 13 + .../go/firestore/apiv1beta1/doc.go | 48 + .../firestore/apiv1beta1/firestore_client.go | 497 + .../firestore_client_example_test.go | 328 + .../go/firestore/apiv1beta1/mock_test.go | 1153 ++ .../go/firestore/apiv1beta1/path_funcs.go | 78 + .../cloud.google.com/go/firestore/client.go | 283 + .../go/firestore/client_test.go | 212 + .../cloud.google.com/go/firestore/collref.go | 114 + .../go/firestore/collref_test.go | 97 + .../go/firestore/cross_language_test.go | 357 + vendor/cloud.google.com/go/firestore/doc.go | 218 + .../cloud.google.com/go/firestore/docref.go | 611 + .../go/firestore/docref_test.go | 312 + .../cloud.google.com/go/firestore/document.go | 301 + .../go/firestore/document_test.go | 239 + .../go/firestore/examples_test.go | 528 + .../go/firestore/fieldpath.go | 275 + .../go/firestore/fieldpath_test.go | 232 + .../go/firestore/from_value.go | 431 + .../go/firestore/from_value_test.go | 550 + .../go/firestore/genproto/test.pb.go | 1243 ++ .../go/firestore/integration_test.go | 1133 ++ .../go/firestore/internal/Makefile | 16 + .../go/firestore/internal/doc-snippets.go | 161 + .../go/firestore/internal/doc.template | 145 + .../go/firestore/internal/snipdoc.awk | 116 + .../go/firestore/mock_test.go | 207 + .../cloud.google.com/go/firestore/options.go | 177 + .../go/firestore/options_test.go | 151 + vendor/cloud.google.com/go/firestore/order.go | 216 + .../go/firestore/order_test.go | 118 + vendor/cloud.google.com/go/firestore/query.go | 757 + .../go/firestore/query_test.go | 717 + .../go/firestore/testdata/VERSION | 1 + .../firestore/testdata/create-basic.textproto | 27 + .../testdata/create-complex.textproto | 61 + .../create-del-noarray-nested.textproto | 13 + .../testdata/create-del-noarray.textproto | 13 + .../firestore/testdata/create-empty.textproto | 20 + .../firestore/testdata/create-nodel.textproto | 11 + .../testdata/create-nosplit.textproto | 40 + .../testdata/create-special-chars.textproto | 41 + .../testdata/create-st-alone.textproto | 26 + .../testdata/create-st-multi.textproto | 41 + .../testdata/create-st-nested.textproto | 38 + .../create-st-noarray-nested.textproto | 12 + .../testdata/create-st-noarray.textproto | 12 + .../go/firestore/testdata/create-st.textproto | 39 + .../testdata/delete-exists-precond.textproto | 21 + .../testdata/delete-no-precond.textproto | 15 + .../testdata/delete-time-precond.textproto | 25 + .../go/firestore/testdata/get-basic.textproto | 12 + .../testdata/query-bad-NaN.textproto | 19 + .../testdata/query-bad-null.textproto | 19 + .../query-cursor-docsnap-order.textproto | 68 + ...uery-cursor-docsnap-orderby-name.textproto | 76 + .../query-cursor-docsnap-where-eq.textproto | 53 + ...cursor-docsnap-where-neq-orderby.textproto | 72 + .../query-cursor-docsnap-where-neq.textproto | 64 + .../testdata/query-cursor-docsnap.textproto | 34 + .../testdata/query-cursor-no-order.textproto | 16 + .../testdata/query-cursor-vals-1a.textproto | 50 + .../testdata/query-cursor-vals-1b.textproto | 48 + .../testdata/query-cursor-vals-2.textproto | 71 + .../query-cursor-vals-docid.textproto | 50 + .../query-cursor-vals-last-wins.textproto | 60 + .../testdata/query-del-cursor.textproto | 23 + .../testdata/query-del-where.textproto | 19 + .../testdata/query-invalid-operator.textproto | 19 + .../query-invalid-path-order.textproto | 19 + .../query-invalid-path-select.textproto | 18 + .../query-invalid-path-where.textproto | 20 + .../query-offset-limit-last-wins.textproto | 30 + .../testdata/query-offset-limit.textproto | 24 + .../firestore/testdata/query-order.textproto | 42 + .../testdata/query-select-empty.textproto | 23 + .../testdata/query-select-last-wins.textproto | 36 + .../firestore/testdata/query-select.textproto | 32 + .../testdata/query-st-cursor.textproto | 23 + .../testdata/query-st-where.textproto | 19 + .../testdata/query-where-2.textproto | 59 + .../testdata/query-where-NaN.textproto | 31 + .../testdata/query-where-null.textproto | 31 + .../firestore/testdata/query-where.textproto | 34 + .../testdata/query-wrong-collection.textproto | 19 + .../go/firestore/testdata/set-basic.textproto | 24 + .../firestore/testdata/set-complex.textproto | 58 + .../testdata/set-del-merge-alone.textproto | 28 + .../testdata/set-del-merge.textproto | 37 + .../testdata/set-del-mergeall.textproto | 31 + .../testdata/set-del-noarray-nested.textproto | 13 + .../testdata/set-del-noarray.textproto | 13 + .../testdata/set-del-nomerge.textproto | 17 + .../testdata/set-del-nonleaf.textproto | 19 + .../testdata/set-del-wo-merge.textproto | 12 + .../go/firestore/testdata/set-empty.textproto | 17 + .../firestore/testdata/set-merge-fp.textproto | 40 + .../testdata/set-merge-nested.textproto | 41 + .../testdata/set-merge-nonleaf.textproto | 46 + .../testdata/set-merge-prefix.textproto | 21 + .../testdata/set-merge-present.textproto | 20 + .../go/firestore/testdata/set-merge.textproto | 32 + .../testdata/set-mergeall-empty.textproto | 15 + .../testdata/set-mergeall-nested.textproto | 45 + .../firestore/testdata/set-mergeall.textproto | 37 + .../go/firestore/testdata/set-nodel.textproto | 11 + .../firestore/testdata/set-nosplit.textproto | 37 + .../testdata/set-special-chars.textproto | 38 + .../testdata/set-st-alone-mergeall.textproto | 26 + .../firestore/testdata/set-st-alone.textproto | 28 + .../testdata/set-st-merge-both.textproto | 45 + .../set-st-merge-nonleaf-alone.textproto | 37 + .../testdata/set-st-merge-nonleaf.textproto | 49 + .../testdata/set-st-merge-nowrite.textproto | 28 + .../testdata/set-st-mergeall.textproto | 40 + .../firestore/testdata/set-st-multi.textproto | 38 + .../testdata/set-st-nested.textproto | 35 + .../testdata/set-st-noarray-nested.textproto | 12 + .../testdata/set-st-noarray.textproto | 12 + .../testdata/set-st-nomerge.textproto | 33 + .../go/firestore/testdata/set-st.textproto | 36 + .../testdata/update-badchar.textproto | 12 + .../firestore/testdata/update-basic.textproto | 30 + .../testdata/update-complex.textproto | 65 + .../testdata/update-del-alone.textproto | 25 + .../testdata/update-del-dot.textproto | 46 + .../testdata/update-del-nested.textproto | 11 + .../update-del-noarray-nested.textproto | 13 + .../testdata/update-del-noarray.textproto | 13 + .../firestore/testdata/update-del.textproto | 32 + .../testdata/update-exists-precond.textproto | 14 + .../update-fp-empty-component.textproto | 11 + .../testdata/update-no-paths.textproto | 11 + .../testdata/update-paths-basic.textproto | 33 + .../testdata/update-paths-complex.textproto | 72 + .../testdata/update-paths-del-alone.textproto | 28 + .../update-paths-del-nested.textproto | 14 + .../update-paths-del-noarray-nested.textproto | 16 + .../update-paths-del-noarray.textproto | 16 + .../testdata/update-paths-del.textproto | 39 + .../update-paths-exists-precond.textproto | 17 + .../testdata/update-paths-fp-del.textproto | 47 + .../testdata/update-paths-fp-dup.textproto | 22 + .../update-paths-fp-empty-component.textproto | 15 + .../testdata/update-paths-fp-empty.textproto | 13 + .../testdata/update-paths-fp-multi.textproto | 42 + .../update-paths-fp-nosplit.textproto | 48 + .../testdata/update-paths-no-paths.textproto | 10 + .../testdata/update-paths-prefix-1.textproto | 19 + .../testdata/update-paths-prefix-2.textproto | 19 + .../testdata/update-paths-prefix-3.textproto | 20 + .../update-paths-special-chars.textproto | 53 + .../testdata/update-paths-st-alone.textproto | 29 + .../testdata/update-paths-st-multi.textproto | 56 + .../testdata/update-paths-st-nested.textproto | 49 + .../update-paths-st-noarray-nested.textproto | 15 + .../update-paths-st-noarray.textproto | 15 + .../testdata/update-paths-st.textproto | 49 + .../testdata/update-paths-uptime.textproto | 40 + .../testdata/update-prefix-1.textproto | 11 + .../testdata/update-prefix-2.textproto | 11 + .../testdata/update-prefix-3.textproto | 12 + .../testdata/update-quoting.textproto | 45 + .../testdata/update-split-top-level.textproto | 45 + .../firestore/testdata/update-split.textproto | 44 + .../testdata/update-st-alone.textproto | 26 + .../testdata/update-st-dot.textproto | 27 + .../testdata/update-st-multi.textproto | 49 + .../testdata/update-st-nested.textproto | 42 + .../update-st-noarray-nested.textproto | 12 + .../testdata/update-st-noarray.textproto | 12 + .../go/firestore/testdata/update-st.textproto | 42 + .../testdata/update-uptime.textproto | 37 + .../cloud.google.com/go/firestore/to_value.go | 278 + .../go/firestore/to_value_test.go | 276 + .../go/firestore/transaction.go | 276 + .../go/firestore/transaction_test.go | 389 + .../go/firestore/util_test.go | 150 + vendor/cloud.google.com/go/firestore/watch.go | 515 + .../go/firestore/watch_test.go | 326 + .../go/firestore/writebatch.go | 82 + .../go/firestore/writebatch_test.go | 119 + .../go/iam/admin/apiv1/doc.go | 43 + .../go/iam/admin/apiv1/iam_client.go | 478 + .../admin/apiv1/iam_client_example_test.go | 253 + .../go/iam/admin/apiv1/mock_test.go | 1143 ++ .../go/iam/admin/apiv1/policy_methods.go | 52 + vendor/cloud.google.com/go/iam/iam.go | 284 + vendor/cloud.google.com/go/iam/iam_test.go | 87 + vendor/cloud.google.com/go/import_test.go | 61 + .../cloud.google.com/go/internal/annotate.go | 54 + .../go/internal/annotate_test.go | 65 + .../go/internal/atomiccache/atomiccache.go | 58 + .../internal/atomiccache/atomiccache_test.go | 46 + .../go/internal/btree/README.md | 11 + .../go/internal/btree/benchmarks_test.go | 268 + .../go/internal/btree/btree.go | 1018 + .../go/internal/btree/btree_test.go | 422 + .../go/internal/btree/debug.go | 37 + .../go/internal/fields/fields.go | 468 + .../go/internal/fields/fields_test.go | 563 + .../go/internal/fields/fold.go | 156 + .../go/internal/fields/fold_test.go | 129 + .../go/internal/kokoro/build.sh | 44 + .../go/internal/kokoro/kokoro-key.json.enc | Bin 0 -> 2448 bytes .../go/internal/optional/optional.go | 108 + .../go/internal/optional/optional_test.go | 64 + .../go/internal/pretty/diff.go | 78 + .../go/internal/pretty/diff_test.go | 50 + .../go/internal/pretty/pretty.go | 254 + .../go/internal/pretty/pretty_test.go | 105 + .../go/internal/protostruct/protostruct.go | 56 + .../internal/protostruct/protostruct_test.go | 58 + .../go/internal/readme/Makefile | 48 + .../go/internal/readme/snipmd.awk | 123 + .../go/internal/readme/snippets.go | 241 + .../internal/readme/testdata/bad-no-name.go | 23 + .../internal/readme/testdata/bad-no-open.go | 19 + .../go/internal/readme/testdata/bad-nosnip.md | 2 + .../go/internal/readme/testdata/bad-spec.md | 1 + .../internal/readme/testdata/bad-unclosed.go | 21 + .../go/internal/readme/testdata/good.md | 18 + .../go/internal/readme/testdata/snips.go | 39 + .../go/internal/readme/testdata/want.md | 30 + vendor/cloud.google.com/go/internal/retry.go | 55 + .../go/internal/retry_test.go | 89 + .../go/internal/snipdoc/README.md | 29 + .../go/internal/snipdoc/sample-makefile | 16 + .../go/internal/snipdoc/snipdoc.awk | 116 + .../go/internal/testutil/cmp.go | 53 + .../go/internal/testutil/context.go | 95 + .../go/internal/testutil/go18.go | 64 + .../go/internal/testutil/server.go | 105 + .../go/internal/testutil/server_test.go | 79 + .../go/internal/testutil/unique.go | 101 + .../go/internal/testutil/unique_test.go | 69 + .../go/internal/trace/go18.go | 83 + .../go/internal/trace/go18_test.go | 55 + .../go/internal/trace/not_go18.go | 30 + .../go/internal/tracecontext/tracecontext.go | 83 + .../tracecontext/tracecontext_test.go | 136 + .../go/internal/version/update_version.sh | 6 + .../go/internal/version/version.go | 71 + .../go/internal/version/version_test.go | 35 + vendor/cloud.google.com/go/issue_template.md | 17 + vendor/cloud.google.com/go/keys.tar.enc | Bin 0 -> 10256 bytes .../apiv1/AnalyzeSentiment_smoke_test.go | 73 + .../cloud.google.com/go/language/apiv1/doc.go | 47 + .../go/language/apiv1/language_client.go | 229 + .../apiv1/language_client_example_test.go | 141 + .../go/language/apiv1/mock_test.go | 518 + .../apiv1beta2/AnalyzeSentiment_smoke_test.go | 73 + .../go/language/apiv1beta2/doc.go | 48 + .../go/language/apiv1beta2/language_client.go | 229 + .../language_client_example_test.go | 141 + .../go/language/apiv1beta2/mock_test.go | 518 + vendor/cloud.google.com/go/license_test.go | 71 + .../go/logging/apiv2/README.md | 11 + .../apiv2/WriteLogEntries_smoke_test.go | 68 + .../go/logging/apiv2/config_client.go | 421 + .../apiv2/config_client_example_test.go | 222 + .../cloud.google.com/go/logging/apiv2/doc.go | 52 + .../go/logging/apiv2/logging_client.go | 413 + .../apiv2/logging_client_example_test.go | 140 + .../go/logging/apiv2/metrics_client.go | 264 + .../apiv2/metrics_client_example_test.go | 128 + .../go/logging/apiv2/mock_test.go | 1677 ++ .../go/logging/apiv2/path_funcs.go | 107 + vendor/cloud.google.com/go/logging/doc.go | 117 + .../go/logging/examples_test.go | 166 + .../go/logging/internal/common.go | 39 + .../go/logging/internal/testing/equal.go | 42 + .../go/logging/internal/testing/fake.go | 395 + .../go/logging/internal/testing/fake_test.go | 122 + .../logadmin/example_entry_iterator_test.go | 66 + .../logadmin/example_metric_iterator_test.go | 52 + .../logging/logadmin/example_paging_test.go | 92 + .../example_resource_iterator_test.go | 52 + .../logadmin/example_sink_iterator_test.go | 52 + .../go/logging/logadmin/examples_test.go | 161 + .../go/logging/logadmin/logadmin.go | 406 + .../go/logging/logadmin/logadmin_test.go | 267 + .../go/logging/logadmin/metrics.go | 154 + .../go/logging/logadmin/metrics_test.go | 154 + .../go/logging/logadmin/resources.go | 74 + .../go/logging/logadmin/resources_test.go | 46 + .../go/logging/logadmin/sinks.go | 168 + .../go/logging/logadmin/sinks_test.go | 227 + vendor/cloud.google.com/go/logging/logging.go | 814 + .../go/logging/logging_test.go | 630 + .../go/logging/logging_unexported_test.go | 333 + .../go/longrunning/autogen/doc.go | 45 + .../go/longrunning/autogen/from_conn.go | 34 + .../go/longrunning/autogen/mock_test.go | 381 + .../longrunning/autogen/operations_client.go | 267 + .../autogen/operations_client_example_test.go | 108 + .../go/longrunning/example_test.go | 116 + .../go/longrunning/longrunning.go | 181 + .../go/longrunning/longrunning_test.go | 215 + ...MonitoredResourceDescriptors_smoke_test.go | 67 + .../monitoring/apiv3/alert_policy_client.go | 274 + .../apiv3/alert_policy_client_example_test.go | 128 + .../go/monitoring/apiv3/doc.go | 51 + .../go/monitoring/apiv3/group_client.go | 355 + .../apiv3/group_client_example_test.go | 152 + .../go/monitoring/apiv3/metric_client.go | 444 + .../apiv3/metric_client_example_test.go | 192 + .../go/monitoring/apiv3/mock_test.go | 2636 +++ .../apiv3/notification_channel_client.go | 369 + ...otification_channel_client_example_test.go | 170 + .../go/monitoring/apiv3/path_funcs.go | 107 + .../monitoring/apiv3/uptime_check_client.go | 355 + .../apiv3/uptime_check_client_example_test.go | 152 + vendor/cloud.google.com/go/old-news.md | 596 + .../go/oslogin/apiv1beta/doc.go | 49 + .../go/oslogin/apiv1beta/mock_test.go | 520 + .../go/oslogin/apiv1beta/os_login_client.go | 222 + .../apiv1beta/os_login_client_example_test.go | 137 + .../go/privacy/dlp/apiv2beta2/dlp_client.go | 681 + .../dlp/apiv2beta2/dlp_client_example_test.go | 422 + .../go/privacy/dlp/apiv2beta2/doc.go | 48 + .../go/privacy/dlp/apiv2beta2/mock_test.go | 1596 ++ .../go/profiler/busybench/busybench.go | 101 + .../go/profiler/integration-test.sh | 36 + .../go/profiler/integration_test.go | 266 + .../go/profiler/mocks/mock_profiler_client.go | 78 + vendor/cloud.google.com/go/profiler/mutex.go | 25 + .../go/profiler/mutex_go17.go | 21 + .../cloud.google.com/go/profiler/profiler.go | 506 + .../go/profiler/profiler_example_test.go | 25 + .../go/profiler/profiler_test.go | 876 + .../go/profiler/proftest/proftest.go | 501 + .../go/profiler/symbolizer.go | 143 + .../go/profiler/symbolizer_test.go | 229 + .../go/pubsub/apiv1/ListTopics_smoke_test.go | 67 + .../go/pubsub/apiv1/README.md | 9 + .../cloud.google.com/go/pubsub/apiv1/doc.go | 50 + .../go/pubsub/apiv1/mock_test.go | 1878 ++ .../go/pubsub/apiv1/path_funcs.go | 95 + .../go/pubsub/apiv1/publisher_client.go | 398 + .../apiv1/publisher_client_example_test.go | 204 + .../pubsub/apiv1/pubsub_pull_example_test.go | 106 + .../go/pubsub/apiv1/subscriber_client.go | 593 + .../apiv1/subscriber_client_example_test.go | 358 + vendor/cloud.google.com/go/pubsub/doc.go | 126 + .../go/pubsub/endtoend_test.go | 234 + .../example_subscription_iterator_test.go | 54 + .../go/pubsub/example_test.go | 369 + .../go/pubsub/example_topic_iterator_test.go | 53 + .../cloud.google.com/go/pubsub/fake_test.go | 322 + .../go/pubsub/flow_controller.go | 106 + .../go/pubsub/flow_controller_test.go | 236 + vendor/cloud.google.com/go/pubsub/go18.go | 150 + .../go/pubsub/integration_test.go | 448 + .../internal/distribution/distribution.go | 70 + .../distribution/distribution_test.go | 94 + vendor/cloud.google.com/go/pubsub/iterator.go | 294 + .../go/pubsub/loadtest/benchmark_test.go | 176 + .../go/pubsub/loadtest/cmd/loadtest.go | 54 + .../go/pubsub/loadtest/loadtest.go | 215 + .../go/pubsub/loadtest/pb/loadtest.pb.go | 792 + vendor/cloud.google.com/go/pubsub/message.go | 100 + vendor/cloud.google.com/go/pubsub/not_go18.go | 54 + .../go/pubsub/pstest/examples_test.go | 41 + .../cloud.google.com/go/pubsub/pstest/fake.go | 771 + .../go/pubsub/pstest/fake_test.go | 434 + .../cloud.google.com/go/pubsub/pstest_test.go | 76 + vendor/cloud.google.com/go/pubsub/pubsub.go | 113 + .../cloud.google.com/go/pubsub/pullstream.go | 167 + vendor/cloud.google.com/go/pubsub/service.go | 120 + .../go/pubsub/service_test.go | 69 + vendor/cloud.google.com/go/pubsub/snapshot.go | 160 + .../go/pubsub/streaming_pull_test.go | 325 + .../go/pubsub/subscription.go | 522 + .../go/pubsub/subscription_test.go | 180 + .../go/pubsub/timeout_test.go | 88 + vendor/cloud.google.com/go/pubsub/topic.go | 397 + .../cloud.google.com/go/pubsub/topic_test.go | 148 + vendor/cloud.google.com/go/regen-gapic.sh | 64 + vendor/cloud.google.com/go/rpcreplay/Makefile | 32 + vendor/cloud.google.com/go/rpcreplay/doc.go | 108 + .../go/rpcreplay/example_test.go | 47 + .../go/rpcreplay/fake_test.go | 122 + .../rpcreplay/proto/intstore/intstore.pb.go | 454 + .../rpcreplay/proto/intstore/intstore.proto | 54 + .../rpcreplay/proto/rpcreplay/rpcreplay.pb.go | 170 + .../rpcreplay/proto/rpcreplay/rpcreplay.proto | 71 + .../go/rpcreplay/rpcreplay.go | 689 + .../go/rpcreplay/rpcreplay_test.go | 362 + vendor/cloud.google.com/go/run-tests.sh | 88 + .../database/apiv1/database_admin_client.go | 516 + .../database_admin_client_example_test.go | 207 + .../go/spanner/admin/database/apiv1/doc.go | 46 + .../spanner/admin/database/apiv1/mock_test.go | 798 + .../admin/database/apiv1/path_funcs.go | 45 + .../go/spanner/admin/instance/apiv1/doc.go | 46 + .../instance/apiv1/instance_admin_client.go | 700 + .../instance_admin_client_example_test.go | 235 + .../spanner/admin/instance/apiv1/mock_test.go | 917 + .../admin/instance/apiv1/path_funcs.go | 55 + .../cloud.google.com/go/spanner/apiv1/doc.go | 50 + .../go/spanner/apiv1/mock_test.go | 1085 + .../go/spanner/apiv1/path_funcs.go | 49 + .../go/spanner/apiv1/spanner_client.go | 498 + .../apiv1/spanner_client_example_test.go | 290 + .../cloud.google.com/go/spanner/appengine.go | 20 + vendor/cloud.google.com/go/spanner/backoff.go | 58 + .../go/spanner/backoff_test.go | 62 + vendor/cloud.google.com/go/spanner/batch.go | 345 + .../cloud.google.com/go/spanner/batch_test.go | 73 + vendor/cloud.google.com/go/spanner/client.go | 442 + .../go/spanner/client_test.go | 50 + vendor/cloud.google.com/go/spanner/doc.go | 316 + vendor/cloud.google.com/go/spanner/errors.go | 115 + .../go/spanner/errors_test.go | 44 + .../go/spanner/examples_test.go | 640 + vendor/cloud.google.com/go/spanner/go18.go | 59 + .../spanner/internal/testutil/mockclient.go | 383 + .../spanner/internal/testutil/mockserver.go | 243 + vendor/cloud.google.com/go/spanner/key.go | 398 + .../cloud.google.com/go/spanner/key_test.go | 372 + .../cloud.google.com/go/spanner/mutation.go | 431 + .../go/spanner/mutation_test.go | 571 + .../go/spanner/not_appengine.go | 20 + .../cloud.google.com/go/spanner/not_go18.go | 31 + vendor/cloud.google.com/go/spanner/oc_test.go | 54 + .../cloud.google.com/go/spanner/protoutils.go | 113 + vendor/cloud.google.com/go/spanner/read.go | 704 + .../cloud.google.com/go/spanner/read_test.go | 1733 ++ vendor/cloud.google.com/go/spanner/retry.go | 198 + .../cloud.google.com/go/spanner/retry_test.go | 107 + vendor/cloud.google.com/go/spanner/row.go | 305 + .../cloud.google.com/go/spanner/row_test.go | 1811 ++ vendor/cloud.google.com/go/spanner/session.go | 1075 + .../go/spanner/session_test.go | 857 + .../go/spanner/spanner_test.go | 1879 ++ .../cloud.google.com/go/spanner/statement.go | 101 + .../go/spanner/statement_test.go | 171 + .../go/spanner/timestampbound.go | 240 + .../go/spanner/timestampbound_test.go | 207 + .../go/spanner/transaction.go | 879 + .../go/spanner/transaction_test.go | 222 + vendor/cloud.google.com/go/spanner/util.go | 33 + .../cloud.google.com/go/spanner/util_test.go | 28 + vendor/cloud.google.com/go/spanner/value.go | 1442 ++ .../go/spanner/value_benchmarks_test.go | 214 + .../cloud.google.com/go/spanner/value_test.go | 522 + .../go/speech/apiv1/Recognize_smoke_test.go | 80 + .../cloud.google.com/go/speech/apiv1/doc.go | 45 + .../go/speech/apiv1/mock_test.go | 405 + .../go/speech/apiv1/speech_client.go | 263 + .../apiv1/speech_client_example_test.go | 110 + .../apiv1beta1/SyncRecognize_smoke_test.go | 80 + .../go/speech/apiv1beta1/doc.go | 46 + .../go/speech/apiv1beta1/mock_test.go | 400 + .../go/speech/apiv1beta1/speech_client.go | 265 + .../apiv1beta1/speech_client_example_test.go | 110 + vendor/cloud.google.com/go/storage/acl.go | 245 + vendor/cloud.google.com/go/storage/bucket.go | 944 + .../go/storage/bucket_test.go | 319 + vendor/cloud.google.com/go/storage/copy.go | 207 + vendor/cloud.google.com/go/storage/doc.go | 167 + .../go/storage/example_test.go | 641 + vendor/cloud.google.com/go/storage/go110.go | 30 + vendor/cloud.google.com/go/storage/go17.go | 26 + vendor/cloud.google.com/go/storage/iam.go | 129 + .../go/storage/integration_test.go | 2285 +++ vendor/cloud.google.com/go/storage/invoke.go | 36 + .../go/storage/invoke_test.go | 56 + .../cloud.google.com/go/storage/not_go110.go | 40 + .../cloud.google.com/go/storage/not_go17.go | 26 + .../go/storage/notifications.go | 188 + .../go/storage/notifications_test.go | 98 + vendor/cloud.google.com/go/storage/oc_test.go | 40 + vendor/cloud.google.com/go/storage/reader.go | 245 + .../go/storage/reader_test.go | 112 + vendor/cloud.google.com/go/storage/storage.go | 1067 + .../go/storage/storage_test.go | 912 + .../go/storage/testdata/dummy_pem | 39 + .../go/storage/testdata/dummy_rsa | 27 + vendor/cloud.google.com/go/storage/writer.go | 218 + .../go/storage/writer_test.go | 174 + .../go/trace/apiv1/ListTraces_smoke_test.go | 67 + vendor/cloud.google.com/go/trace/apiv1/doc.go | 54 + .../go/trace/apiv1/mock_test.go | 321 + .../go/trace/apiv1/trace_client.go | 235 + .../trace/apiv1/trace_client_example_test.go | 92 + .../trace/apiv2/BatchWriteSpans_smoke_test.go | 66 + vendor/cloud.google.com/go/trace/apiv2/doc.go | 51 + .../go/trace/apiv2/mock_test.go | 252 + .../go/trace/apiv2/path_funcs.go | 43 + .../go/trace/apiv2/trace_client.go | 151 + .../trace/apiv2/trace_client_example_test.go | 67 + vendor/cloud.google.com/go/trace/grpc.go | 108 + vendor/cloud.google.com/go/trace/grpc_test.go | 180 + vendor/cloud.google.com/go/trace/http.go | 107 + vendor/cloud.google.com/go/trace/http_test.go | 151 + .../go/trace/httpexample_test.go | 57 + vendor/cloud.google.com/go/trace/sampling.go | 117 + .../testdata/helloworld/helloworld.pb.go | 161 + .../testdata/helloworld/helloworld.proto | 37 + vendor/cloud.google.com/go/trace/trace.go | 845 + .../cloud.google.com/go/trace/trace_test.go | 969 + .../go/translate/examples_test.go | 81 + .../go/translate/internal/translate/v2/README | 12 + .../translate/internal/translate/v2/regen.sh | 29 + .../translate/v2/translate-nov2016-api.json | 285 + .../translate/v2/translate-nov2016-gen.go | 790 + .../go/translate/translate.go | 237 + .../go/translate/translate_test.go | 340 + .../go/videointelligence/apiv1/doc.go | 46 + .../go/videointelligence/apiv1/mock_test.go | 180 + .../apiv1/video_intelligence_client.go | 225 + .../video_intelligence_client_example_test.go | 56 + .../go/videointelligence/apiv1beta1/doc.go | 46 + .../videointelligence/apiv1beta1/mock_test.go | 192 + .../apiv1beta1/video_intelligence_client.go | 225 + .../video_intelligence_client_example_test.go | 56 + .../go/videointelligence/apiv1beta2/doc.go | 46 + .../videointelligence/apiv1beta2/mock_test.go | 192 + .../apiv1beta2/video_intelligence_client.go | 225 + .../video_intelligence_client_example_test.go | 56 + .../videointelligence/apiv1beta2/whitelist.go | 16 + .../apiv1/BatchAnnotateImages_smoke_test.go | 82 + .../go/vision/apiv1/README.md | 9 + .../go/vision/apiv1/client.go | 151 + .../go/vision/apiv1/client_test.go | 200 + .../cloud.google.com/go/vision/apiv1/doc.go | 49 + .../go/vision/apiv1/examples_test.go | 92 + .../cloud.google.com/go/vision/apiv1/face.go | 153 + .../go/vision/apiv1/face_test.go | 225 + .../cloud.google.com/go/vision/apiv1/image.go | 37 + .../go/vision/apiv1/image_annotator_client.go | 134 + .../image_annotator_client_example_test.go | 51 + .../go/vision/apiv1/mock_test.go | 159 + .../BatchAnnotateImages_smoke_test.go | 82 + .../go/vision/apiv1p1beta1/doc.go | 50 + .../apiv1p1beta1/image_annotator_client.go | 134 + .../image_annotator_client_example_test.go | 51 + .../go/vision/apiv1p1beta1/mock_test.go | 159 + .../elazarl/go-bindata-assetfs/LICENSE | 23 + .../elazarl/go-bindata-assetfs/README.md | 46 + .../elazarl/go-bindata-assetfs/assetfs.go | 167 + .../elazarl/go-bindata-assetfs/doc.go | 13 + .../go-bindata-assetfs/main.go | 100 + vendor/github.com/golang/protobuf/.gitignore | 16 + vendor/github.com/golang/protobuf/.travis.yml | 18 + vendor/github.com/golang/protobuf/AUTHORS | 3 + .../github.com/golang/protobuf/CONTRIBUTORS | 3 + vendor/github.com/golang/protobuf/LICENSE | 31 + .../github.com/golang/protobuf/Make.protobuf | 40 + vendor/github.com/golang/protobuf/Makefile | 55 + vendor/github.com/golang/protobuf/README.md | 244 + .../golang/protobuf/_conformance/Makefile | 33 + .../protobuf/_conformance/conformance.go | 161 + .../conformance_proto/conformance.pb.go | 1885 ++ .../conformance_proto/conformance.proto | 285 + .../golang/protobuf/descriptor/descriptor.go | 93 + .../protobuf/descriptor/descriptor_test.go | 32 + .../golang/protobuf/jsonpb/jsonpb.go | 1083 + .../golang/protobuf/jsonpb/jsonpb_test.go | 896 + .../jsonpb/jsonpb_test_proto/Makefile | 33 + .../jsonpb_test_proto/more_test_objects.pb.go | 266 + .../jsonpb_test_proto/more_test_objects.proto | 69 + .../jsonpb_test_proto/test_objects.pb.go | 852 + .../jsonpb_test_proto/test_objects.proto | 147 + .../github.com/golang/protobuf/proto/Makefile | 43 + .../golang/protobuf/proto/all_test.go | 2278 +++ .../golang/protobuf/proto/any_test.go | 300 + .../github.com/golang/protobuf/proto/clone.go | 229 + .../golang/protobuf/proto/clone_test.go | 300 + .../golang/protobuf/proto/decode.go | 970 + .../golang/protobuf/proto/decode_test.go | 258 + .../golang/protobuf/proto/discard.go | 151 + .../golang/protobuf/proto/encode.go | 1362 ++ .../golang/protobuf/proto/encode_test.go | 85 + .../github.com/golang/protobuf/proto/equal.go | 300 + .../golang/protobuf/proto/equal_test.go | 224 + .../golang/protobuf/proto/extensions.go | 587 + .../golang/protobuf/proto/extensions_test.go | 536 + .../github.com/golang/protobuf/proto/lib.go | 897 + .../golang/protobuf/proto/map_test.go | 46 + .../golang/protobuf/proto/message_set.go | 311 + .../golang/protobuf/proto/message_set_test.go | 66 + .../golang/protobuf/proto/pointer_reflect.go | 484 + .../golang/protobuf/proto/pointer_unsafe.go | 270 + .../golang/protobuf/proto/properties.go | 872 + .../protobuf/proto/proto3_proto/proto3.pb.go | 347 + .../protobuf/proto/proto3_proto/proto3.proto | 87 + .../golang/protobuf/proto/proto3_test.go | 135 + .../golang/protobuf/proto/size2_test.go | 63 + .../golang/protobuf/proto/size_test.go | 164 + .../golang/protobuf/proto/testdata/Makefile | 50 + .../protobuf/proto/testdata/golden_test.go | 86 + .../golang/protobuf/proto/testdata/test.pb.go | 4147 ++++ .../golang/protobuf/proto/testdata/test.proto | 548 + .../github.com/golang/protobuf/proto/text.go | 854 + .../golang/protobuf/proto/text_parser.go | 895 + .../golang/protobuf/proto/text_parser_test.go | 673 + .../golang/protobuf/proto/text_test.go | 474 + .../golang/protobuf/protoc-gen-go/Makefile | 33 + .../protoc-gen-go/descriptor/Makefile | 37 + .../protoc-gen-go/descriptor/descriptor.pb.go | 2215 ++ .../protoc-gen-go/descriptor/descriptor.proto | 849 + .../golang/protobuf/protoc-gen-go/doc.go | 51 + .../protobuf/protoc-gen-go/generator/Makefile | 40 + .../protoc-gen-go/generator/generator.go | 2866 +++ .../protoc-gen-go/generator/name_test.go | 114 + .../protobuf/protoc-gen-go/grpc/grpc.go | 463 + .../protobuf/protoc-gen-go/link_grpc.go | 34 + .../golang/protobuf/protoc-gen-go/main.go | 98 + .../protobuf/protoc-gen-go/plugin/Makefile | 45 + .../protoc-gen-go/plugin/plugin.pb.go | 293 + .../protoc-gen-go/plugin/plugin.pb.golden | 83 + .../protoc-gen-go/plugin/plugin.proto | 167 + .../protobuf/protoc-gen-go/testdata/Makefile | 73 + .../testdata/extension_base.proto | 46 + .../testdata/extension_extra.proto | 38 + .../protoc-gen-go/testdata/extension_test.go | 210 + .../testdata/extension_user.proto | 100 + .../protoc-gen-go/testdata/grpc.proto | 59 + .../protoc-gen-go/testdata/imp.pb.go.golden | 113 + .../protobuf/protoc-gen-go/testdata/imp.proto | 70 + .../protoc-gen-go/testdata/imp2.proto | 43 + .../protoc-gen-go/testdata/imp3.proto | 38 + .../protoc-gen-go/testdata/main_test.go | 46 + .../protoc-gen-go/testdata/multi/multi1.proto | 44 + .../protoc-gen-go/testdata/multi/multi2.proto | 46 + .../protoc-gen-go/testdata/multi/multi3.proto | 43 + .../protoc-gen-go/testdata/my_test/test.pb.go | 870 + .../testdata/my_test/test.pb.go.golden | 870 + .../protoc-gen-go/testdata/my_test/test.proto | 156 + .../protoc-gen-go/testdata/proto3.proto | 53 + .../github.com/golang/protobuf/ptypes/any.go | 139 + .../golang/protobuf/ptypes/any/any.pb.go | 178 + .../golang/protobuf/ptypes/any/any.proto | 149 + .../golang/protobuf/ptypes/any_test.go | 113 + .../github.com/golang/protobuf/ptypes/doc.go | 35 + .../golang/protobuf/ptypes/duration.go | 102 + .../protobuf/ptypes/duration/duration.pb.go | 144 + .../protobuf/ptypes/duration/duration.proto | 117 + .../golang/protobuf/ptypes/duration_test.go | 121 + .../golang/protobuf/ptypes/empty/empty.pb.go | 66 + .../golang/protobuf/ptypes/empty/empty.proto | 52 + .../golang/protobuf/ptypes/regen.sh | 43 + .../protobuf/ptypes/struct/struct.pb.go | 380 + .../protobuf/ptypes/struct/struct.proto | 96 + .../golang/protobuf/ptypes/timestamp.go | 134 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 160 + .../protobuf/ptypes/timestamp/timestamp.proto | 133 + .../golang/protobuf/ptypes/timestamp_test.go | 153 + .../protobuf/ptypes/wrappers/wrappers.pb.go | 260 + .../protobuf/ptypes/wrappers/wrappers.proto | 118 + vendor/github.com/gorilla/context/.travis.yml | 19 + vendor/github.com/gorilla/context/LICENSE | 27 + vendor/github.com/gorilla/context/README.md | 7 + vendor/github.com/gorilla/context/context.go | 143 + .../gorilla/context/context_test.go | 161 + vendor/github.com/gorilla/context/doc.go | 82 + .../gorilla/securecookie/.travis.yml | 19 + .../github.com/gorilla/securecookie/LICENSE | 27 + .../github.com/gorilla/securecookie/README.md | 80 + vendor/github.com/gorilla/securecookie/doc.go | 61 + .../github.com/gorilla/securecookie/fuzz.go | 25 + .../gorilla/securecookie/fuzz/corpus/0.sc | 1 + ...05a79f06cf3f67f726dae68d18a2290f6c9a50c9-1 | 1 + ...05aefe7b48db1dcf464048449ac4fa6af2fbc73b-5 | 3 + .../gorilla/securecookie/fuzz/corpus/1.sc | 1 + .../gorilla/securecookie/fuzz/corpus/10.sc | 1 + .../gorilla/securecookie/fuzz/corpus/11.sc | 1 + .../gorilla/securecookie/fuzz/corpus/12.sc | 1 + .../gorilla/securecookie/fuzz/corpus/13.sc | 1 + .../gorilla/securecookie/fuzz/corpus/14.sc | 1 + .../gorilla/securecookie/fuzz/corpus/15.sc | 1 + .../gorilla/securecookie/fuzz/corpus/16.sc | 1 + .../169c3e89cd10efe9bce3a1fdb69a31229e618fc0 | 1 + .../gorilla/securecookie/fuzz/corpus/17.sc | 1 + .../gorilla/securecookie/fuzz/corpus/18.sc | 1 + .../gorilla/securecookie/fuzz/corpus/19.sc | 1 + .../gorilla/securecookie/fuzz/corpus/2.sc | 1 + .../gorilla/securecookie/fuzz/corpus/20.sc | 1 + ...202ad82e80f70c37f893e47d23f91b1de5067219-7 | 3 + .../gorilla/securecookie/fuzz/corpus/21.sc | 1 + ...21606782c65e44cac7afbb90977d8b6f82140e76-1 | 1 + .../gorilla/securecookie/fuzz/corpus/22.sc | 1 + .../gorilla/securecookie/fuzz/corpus/23.sc | 1 + .../gorilla/securecookie/fuzz/corpus/24.sc | 1 + .../gorilla/securecookie/fuzz/corpus/25.sc | 1 + ...25c648c4c5161116b9b3b883338ddae51f25a901-1 | 2 + .../gorilla/securecookie/fuzz/corpus/26.sc | 1 + .../gorilla/securecookie/fuzz/corpus/27.sc | 1 + .../gorilla/securecookie/fuzz/corpus/28.sc | 1 + .../gorilla/securecookie/fuzz/corpus/29.sc | 1 + ...2aad7069353f2b76fa70b9e0b22115bb42025ec0-2 | 2 + ...2b28c8193457fb5385d22ef4ca733c4e364f00e7-4 | 3 + .../gorilla/securecookie/fuzz/corpus/3.sc | 1 + .../gorilla/securecookie/fuzz/corpus/30.sc | 1 + .../gorilla/securecookie/fuzz/corpus/31.sc | 1 + .../gorilla/securecookie/fuzz/corpus/32.sc | 1 + .../gorilla/securecookie/fuzz/corpus/33.sc | 1 + .../gorilla/securecookie/fuzz/corpus/34.sc | 1 + .../gorilla/securecookie/fuzz/corpus/35.sc | 1 + .../gorilla/securecookie/fuzz/corpus/36.sc | 1 + ...366e3e0397c8ceca170311fb9db5ffcddf228b51-5 | 1 + .../gorilla/securecookie/fuzz/corpus/37.sc | 1 + .../gorilla/securecookie/fuzz/corpus/38.sc | 1 + .../gorilla/securecookie/fuzz/corpus/39.sc | 1 + ...3916f239f9da91baa003ee6dc147cca7f7f95bd7-2 | 1 + ...e70a0a4bb1ecd96f554cbef9f20c674ff43e2f6-10 | 9 + .../gorilla/securecookie/fuzz/corpus/4.sc | 1 + .../gorilla/securecookie/fuzz/corpus/40.sc | 1 + .../gorilla/securecookie/fuzz/corpus/41.sc | 1 + .../gorilla/securecookie/fuzz/corpus/42.sc | 1 + .../gorilla/securecookie/fuzz/corpus/43.sc | 1 + .../gorilla/securecookie/fuzz/corpus/44.sc | 1 + .../gorilla/securecookie/fuzz/corpus/45.sc | 1 + .../gorilla/securecookie/fuzz/corpus/46.sc | 1 + .../gorilla/securecookie/fuzz/corpus/47.sc | 1 + .../gorilla/securecookie/fuzz/corpus/48.sc | 1 + .../gorilla/securecookie/fuzz/corpus/49.sc | 1 + ...4b6a3b5efec9fd7ff70c713e135f825772ee0c5b-6 | 38 + .../gorilla/securecookie/fuzz/corpus/5.sc | 1 + .../gorilla/securecookie/fuzz/corpus/50.sc | 1 + .../gorilla/securecookie/fuzz/corpus/51.sc | 1 + ...5122906052326fb2d0f65fef576c1437b95256af-5 | 7 + .../gorilla/securecookie/fuzz/corpus/52.sc | 1 + .../gorilla/securecookie/fuzz/corpus/53.sc | 1 + .../gorilla/securecookie/fuzz/corpus/54.sc | 1 + .../gorilla/securecookie/fuzz/corpus/55.sc | 1 + .../gorilla/securecookie/fuzz/corpus/56.sc | 1 + ...5601b416f11820e0203c84570e4068cf87acad17-4 | 2 + .../gorilla/securecookie/fuzz/corpus/57.sc | 1 + .../gorilla/securecookie/fuzz/corpus/58.sc | 1 + .../gorilla/securecookie/fuzz/corpus/59.sc | 1 + .../gorilla/securecookie/fuzz/corpus/6.sc | 1 + .../gorilla/securecookie/fuzz/corpus/60.sc | 1 + .../gorilla/securecookie/fuzz/corpus/61.sc | 1 + .../gorilla/securecookie/fuzz/corpus/62.sc | 1 + .../gorilla/securecookie/fuzz/corpus/63.sc | 1 + .../gorilla/securecookie/fuzz/corpus/64.sc | 1 + .../gorilla/securecookie/fuzz/corpus/65.sc | 1 + .../gorilla/securecookie/fuzz/corpus/66.sc | 1 + .../gorilla/securecookie/fuzz/corpus/67.sc | 1 + .../gorilla/securecookie/fuzz/corpus/68.sc | 1 + ...8c721c56a20c85b4aefdffcd60437cf2902b0fa-10 | 9 + .../gorilla/securecookie/fuzz/corpus/69.sc | 1 + .../gorilla/securecookie/fuzz/corpus/7.sc | 1 + .../gorilla/securecookie/fuzz/corpus/70.sc | 1 + .../7095a5454c9f66801f2b298e577a488a9cadf52d | 1 + .../gorilla/securecookie/fuzz/corpus/71.sc | 1 + ...71853c6197a6a7f222db0f1978c7cb232b87c5ee-3 | 2 + .../gorilla/securecookie/fuzz/corpus/72.sc | 1 + .../gorilla/securecookie/fuzz/corpus/73.sc | 1 + .../gorilla/securecookie/fuzz/corpus/74.sc | 1 + .../gorilla/securecookie/fuzz/corpus/75.sc | 1 + .../gorilla/securecookie/fuzz/corpus/76.sc | 1 + .../gorilla/securecookie/fuzz/corpus/77.sc | 1 + .../gorilla/securecookie/fuzz/corpus/78.sc | 1 + .../gorilla/securecookie/fuzz/corpus/79.sc | 1 + .../gorilla/securecookie/fuzz/corpus/8.sc | 1 + .../gorilla/securecookie/fuzz/corpus/80.sc | 1 + .../gorilla/securecookie/fuzz/corpus/81.sc | 1 + .../gorilla/securecookie/fuzz/corpus/82.sc | 1 + .../gorilla/securecookie/fuzz/corpus/83.sc | 1 + .../gorilla/securecookie/fuzz/corpus/84.sc | 1 + .../gorilla/securecookie/fuzz/corpus/85.sc | 1 + .../gorilla/securecookie/fuzz/corpus/86.sc | 1 + .../gorilla/securecookie/fuzz/corpus/87.sc | 1 + ...878643f2e5fb1c89d90d7b5c65957914bb7fe2c6-1 | 1 + .../gorilla/securecookie/fuzz/corpus/88.sc | 1 + .../gorilla/securecookie/fuzz/corpus/89.sc | 1 + ...8ed2598d72255e78e1cdecba1a0a3b0cb4e4d8be-1 | 2 + .../gorilla/securecookie/fuzz/corpus/9.sc | 1 + .../gorilla/securecookie/fuzz/corpus/90.sc | 1 + .../gorilla/securecookie/fuzz/corpus/91.sc | 1 + .../gorilla/securecookie/fuzz/corpus/92.sc | 1 + .../gorilla/securecookie/fuzz/corpus/93.sc | 1 + .../gorilla/securecookie/fuzz/corpus/94.sc | 1 + .../gorilla/securecookie/fuzz/corpus/95.sc | 1 + .../gorilla/securecookie/fuzz/corpus/96.sc | 1 + .../gorilla/securecookie/fuzz/corpus/97.sc | 1 + .../gorilla/securecookie/fuzz/corpus/98.sc | 1 + .../gorilla/securecookie/fuzz/corpus/99.sc | 1 + ...9e34c6aae8f2c610f838fed4a5bab0da097c5135-2 | 2 + ...9eecb7ef73e5211948391dfc0c2d586e3822b028-1 | 1 + ...adc83b19e793491b1c6ea0fd8b46cd9f32e592fc-2 | 1 + ...ae3eb68089a89eb0a707c1de4b60edfeb6efc6e0-4 | 11 + .../b4f6322316fe4501272935267ab8b1c26684c884 | 1 + ...bf3f814c978c0fc01c46c8d5b337b024697186cc-7 | 1 + ...c63ae6dd4fc9f9dda66970e827d13f7c73fe841c-1 | 1 + ...cebedf21435b903c4013fb902fb5b753e40a100e-8 | 5 + ...da39a3ee5e6b4b0d3255bfef95601890afd80709-1 | 0 ...da5f06015af7bb09d3e421d086939d888f93271c-3 | 3 + ...df60b2ac6f14afbf990d366fa820ee4906f1436e-2 | 2 + ...ec54cdb4f33539c9b852b89ebcc67b4ec31a2b01-5 | 17 + ...ec80b4b6f256eb0f29955c2bc000931d3b766c57-6 | 1 + ...f2c59710b18847b10176f19fb0426cb597bafef0-9 | 9 + ...f4de882915d90ead3b18371ab004abb24b3cd320-3 | 2 + ...f82d23aaf2be2cfc7aa8e323922208cdfce8d35a-3 | 2 + ...fa0f4cd7fee9eb65ebb95a3dc88b6fa198a2c986-1 | 1 + .../gorilla/securecookie/fuzz/gencorpus.go | 47 + .../gorilla/securecookie/securecookie.go | 646 + .../gorilla/securecookie/securecookie_test.go | 301 + .../github.com/gorilla/sessions/.travis.yml | 22 + vendor/github.com/gorilla/sessions/LICENSE | 27 + vendor/github.com/gorilla/sessions/README.md | 79 + vendor/github.com/gorilla/sessions/doc.go | 199 + vendor/github.com/gorilla/sessions/lex.go | 102 + .../github.com/gorilla/sessions/sessions.go | 241 + .../gorilla/sessions/sessions_test.go | 160 + vendor/github.com/gorilla/sessions/store.go | 295 + .../github.com/gorilla/sessions/store_test.go | 125 + .../inconshreveable/mousetrap/LICENSE | 13 + .../inconshreveable/mousetrap/README.md | 23 + .../inconshreveable/mousetrap/trap_others.go | 15 + .../inconshreveable/mousetrap/trap_windows.go | 98 + .../mousetrap/trap_windows_1.4.go | 46 + vendor/github.com/jmoiron/sqlx/.gitignore | 24 + vendor/github.com/jmoiron/sqlx/.travis.yml | 27 + vendor/github.com/jmoiron/sqlx/LICENSE | 23 + vendor/github.com/jmoiron/sqlx/README.md | 185 + vendor/github.com/jmoiron/sqlx/bind.go | 208 + vendor/github.com/jmoiron/sqlx/doc.go | 12 + vendor/github.com/jmoiron/sqlx/named.go | 346 + .../github.com/jmoiron/sqlx/named_context.go | 132 + .../jmoiron/sqlx/named_context_test.go | 136 + vendor/github.com/jmoiron/sqlx/named_test.go | 227 + .../jmoiron/sqlx/reflectx/README.md | 17 + .../jmoiron/sqlx/reflectx/reflect.go | 441 + .../jmoiron/sqlx/reflectx/reflect_test.go | 974 + vendor/github.com/jmoiron/sqlx/sqlx.go | 1047 + .../github.com/jmoiron/sqlx/sqlx_context.go | 348 + .../jmoiron/sqlx/sqlx_context_test.go | 1344 ++ vendor/github.com/jmoiron/sqlx/sqlx_test.go | 1802 ++ .../github.com/jmoiron/sqlx/types/README.md | 5 + vendor/github.com/jmoiron/sqlx/types/types.go | 172 + .../jmoiron/sqlx/types/types_test.go | 127 + .../kelseyhightower/envconfig/.travis.yml | 7 + .../kelseyhightower/envconfig/LICENSE | 19 + .../kelseyhightower/envconfig/MAINTAINERS | 2 + .../kelseyhightower/envconfig/README.md | 188 + .../kelseyhightower/envconfig/doc.go | 8 + .../kelseyhightower/envconfig/env_os.go | 7 + .../kelseyhightower/envconfig/env_syscall.go | 7 + .../kelseyhightower/envconfig/envconfig.go | 319 + .../envconfig/envconfig_test.go | 688 + .../envconfig/testdata/custom.txt | 30 + .../envconfig/testdata/default_list.txt | 153 + .../envconfig/testdata/default_table.txt | 34 + .../envconfig/testdata/fault.txt | 30 + .../kelseyhightower/envconfig/usage.go | 158 + .../kelseyhightower/envconfig/usage_test.go | 155 + vendor/github.com/lib/pq/.gitignore | 4 + vendor/github.com/lib/pq/.travis.sh | 86 + vendor/github.com/lib/pq/.travis.yml | 50 + vendor/github.com/lib/pq/CONTRIBUTING.md | 29 + vendor/github.com/lib/pq/LICENSE.md | 8 + vendor/github.com/lib/pq/README.md | 106 + vendor/github.com/lib/pq/array.go | 756 + vendor/github.com/lib/pq/array_test.go | 1311 ++ vendor/github.com/lib/pq/bench_test.go | 436 + vendor/github.com/lib/pq/buf.go | 91 + vendor/github.com/lib/pq/certs/README | 3 + vendor/github.com/lib/pq/certs/bogus_root.crt | 19 + vendor/github.com/lib/pq/certs/postgresql.crt | 69 + vendor/github.com/lib/pq/certs/postgresql.key | 15 + vendor/github.com/lib/pq/certs/root.crt | 24 + vendor/github.com/lib/pq/certs/server.crt | 81 + vendor/github.com/lib/pq/certs/server.key | 27 + vendor/github.com/lib/pq/conn.go | 1854 ++ vendor/github.com/lib/pq/conn_go18.go | 131 + vendor/github.com/lib/pq/conn_test.go | 1659 ++ vendor/github.com/lib/pq/copy.go | 282 + vendor/github.com/lib/pq/copy_test.go | 468 + vendor/github.com/lib/pq/doc.go | 245 + vendor/github.com/lib/pq/encode.go | 603 + vendor/github.com/lib/pq/encode_test.go | 766 + vendor/github.com/lib/pq/error.go | 515 + .../github.com/lib/pq/example/listen/doc.go | 98 + vendor/github.com/lib/pq/go18_test.go | 321 + vendor/github.com/lib/pq/hstore/hstore.go | 118 + .../github.com/lib/pq/hstore/hstore_test.go | 148 + vendor/github.com/lib/pq/issues_test.go | 26 + vendor/github.com/lib/pq/notify.go | 794 + vendor/github.com/lib/pq/notify_test.go | 570 + vendor/github.com/lib/pq/oid/doc.go | 6 + vendor/github.com/lib/pq/oid/gen.go | 93 + vendor/github.com/lib/pq/oid/types.go | 343 + vendor/github.com/lib/pq/rows.go | 93 + vendor/github.com/lib/pq/rows_test.go | 220 + vendor/github.com/lib/pq/ssl.go | 169 + vendor/github.com/lib/pq/ssl_go1.7.go | 14 + vendor/github.com/lib/pq/ssl_permissions.go | 20 + vendor/github.com/lib/pq/ssl_renegotiation.go | 8 + vendor/github.com/lib/pq/ssl_test.go | 279 + vendor/github.com/lib/pq/ssl_windows.go | 9 + vendor/github.com/lib/pq/url.go | 76 + vendor/github.com/lib/pq/url_test.go | 66 + vendor/github.com/lib/pq/user_posix.go | 24 + vendor/github.com/lib/pq/user_windows.go | 27 + vendor/github.com/lib/pq/uuid.go | 23 + vendor/github.com/lib/pq/uuid_test.go | 46 + .../spf13/cobra/.circleci/config.yml | 38 + vendor/github.com/spf13/cobra/.gitignore | 36 + vendor/github.com/spf13/cobra/.mailmap | 3 + vendor/github.com/spf13/cobra/.travis.yml | 21 + vendor/github.com/spf13/cobra/LICENSE.txt | 174 + vendor/github.com/spf13/cobra/README.md | 736 + vendor/github.com/spf13/cobra/args.go | 89 + vendor/github.com/spf13/cobra/args_test.go | 241 + .../spf13/cobra/bash_completions.go | 555 + .../spf13/cobra/bash_completions.md | 221 + .../spf13/cobra/bash_completions_test.go | 217 + vendor/github.com/spf13/cobra/cobra.go | 200 + vendor/github.com/spf13/cobra/cobra/README.md | 94 + .../github.com/spf13/cobra/cobra/cmd/add.go | 179 + .../spf13/cobra/cobra/cmd/add_test.go | 109 + .../spf13/cobra/cobra/cmd/golden_test.go | 77 + .../spf13/cobra/cobra/cmd/helpers.go | 168 + .../github.com/spf13/cobra/cobra/cmd/init.go | 234 + .../spf13/cobra/cobra/cmd/init_test.go | 83 + .../spf13/cobra/cobra/cmd/license_agpl.go | 683 + .../spf13/cobra/cobra/cmd/license_apache_2.go | 238 + .../cobra/cobra/cmd/license_bsd_clause_2.go | 71 + .../cobra/cobra/cmd/license_bsd_clause_3.go | 78 + .../spf13/cobra/cobra/cmd/license_gpl_2.go | 376 + .../spf13/cobra/cobra/cmd/license_gpl_3.go | 711 + .../spf13/cobra/cobra/cmd/license_lgpl.go | 186 + .../spf13/cobra/cobra/cmd/license_mit.go | 63 + .../spf13/cobra/cobra/cmd/licenses.go | 118 + .../spf13/cobra/cobra/cmd/project.go | 200 + .../spf13/cobra/cobra/cmd/project_test.go | 24 + .../github.com/spf13/cobra/cobra/cmd/root.go | 79 + .../cobra/cobra/cmd/testdata/LICENSE.golden | 202 + .../cobra/cobra/cmd/testdata/main.go.golden | 21 + .../cobra/cobra/cmd/testdata/root.go.golden | 89 + .../cobra/cobra/cmd/testdata/test.go.golden | 50 + vendor/github.com/spf13/cobra/cobra/main.go | 20 + vendor/github.com/spf13/cobra/cobra_test.go | 22 + vendor/github.com/spf13/cobra/command.go | 1507 ++ .../github.com/spf13/cobra/command_notwin.go | 5 + vendor/github.com/spf13/cobra/command_test.go | 1628 ++ vendor/github.com/spf13/cobra/command_win.go | 20 + vendor/github.com/spf13/cobra/doc/cmd_test.go | 86 + vendor/github.com/spf13/cobra/doc/man_docs.go | 236 + vendor/github.com/spf13/cobra/doc/man_docs.md | 31 + .../spf13/cobra/doc/man_docs_test.go | 177 + .../spf13/cobra/doc/man_examples_test.go | 35 + vendor/github.com/spf13/cobra/doc/md_docs.go | 159 + vendor/github.com/spf13/cobra/doc/md_docs.md | 115 + .../spf13/cobra/doc/md_docs_test.go | 74 + .../github.com/spf13/cobra/doc/rest_docs.go | 185 + .../github.com/spf13/cobra/doc/rest_docs.md | 114 + .../spf13/cobra/doc/rest_docs_test.go | 76 + vendor/github.com/spf13/cobra/doc/util.go | 51 + .../github.com/spf13/cobra/doc/yaml_docs.go | 169 + .../github.com/spf13/cobra/doc/yaml_docs.md | 112 + .../spf13/cobra/doc/yaml_docs_test.go | 74 + .../github.com/spf13/cobra/zsh_completions.go | 126 + .../spf13/cobra/zsh_completions_test.go | 89 + vendor/github.com/spf13/pflag/.gitignore | 2 + vendor/github.com/spf13/pflag/.travis.yml | 21 + vendor/github.com/spf13/pflag/LICENSE | 28 + vendor/github.com/spf13/pflag/README.md | 296 + vendor/github.com/spf13/pflag/bool.go | 94 + vendor/github.com/spf13/pflag/bool_slice.go | 147 + .../github.com/spf13/pflag/bool_slice_test.go | 215 + vendor/github.com/spf13/pflag/bool_test.go | 179 + vendor/github.com/spf13/pflag/count.go | 96 + vendor/github.com/spf13/pflag/count_test.go | 52 + vendor/github.com/spf13/pflag/duration.go | 86 + vendor/github.com/spf13/pflag/example_test.go | 36 + vendor/github.com/spf13/pflag/export_test.go | 29 + vendor/github.com/spf13/pflag/flag.go | 1128 + vendor/github.com/spf13/pflag/flag_test.go | 1085 + vendor/github.com/spf13/pflag/float32.go | 88 + vendor/github.com/spf13/pflag/float64.go | 84 + vendor/github.com/spf13/pflag/golangflag.go | 101 + .../github.com/spf13/pflag/golangflag_test.go | 39 + vendor/github.com/spf13/pflag/int.go | 84 + vendor/github.com/spf13/pflag/int32.go | 88 + vendor/github.com/spf13/pflag/int64.go | 84 + vendor/github.com/spf13/pflag/int8.go | 88 + vendor/github.com/spf13/pflag/int_slice.go | 128 + .../github.com/spf13/pflag/int_slice_test.go | 165 + vendor/github.com/spf13/pflag/ip.go | 94 + vendor/github.com/spf13/pflag/ip_slice.go | 148 + .../github.com/spf13/pflag/ip_slice_test.go | 222 + vendor/github.com/spf13/pflag/ip_test.go | 63 + vendor/github.com/spf13/pflag/ipmask.go | 122 + vendor/github.com/spf13/pflag/ipnet.go | 98 + vendor/github.com/spf13/pflag/ipnet_test.go | 70 + vendor/github.com/spf13/pflag/string.go | 80 + vendor/github.com/spf13/pflag/string_array.go | 103 + .../spf13/pflag/string_array_test.go | 233 + vendor/github.com/spf13/pflag/string_slice.go | 129 + .../spf13/pflag/string_slice_test.go | 253 + vendor/github.com/spf13/pflag/uint.go | 88 + vendor/github.com/spf13/pflag/uint16.go | 88 + vendor/github.com/spf13/pflag/uint32.go | 88 + vendor/github.com/spf13/pflag/uint64.go | 88 + vendor/github.com/spf13/pflag/uint8.go | 88 + vendor/github.com/spf13/pflag/uint_slice.go | 126 + .../github.com/spf13/pflag/uint_slice_test.go | 161 + vendor/github.com/spf13/pflag/verify/all.sh | 69 + vendor/github.com/spf13/pflag/verify/gofmt.sh | 19 + .../github.com/spf13/pflag/verify/golint.sh | 15 + vendor/golang.org/x/net/.gitattributes | 10 + vendor/golang.org/x/net/.gitignore | 2 + vendor/golang.org/x/net/AUTHORS | 3 + vendor/golang.org/x/net/CONTRIBUTING.md | 26 + vendor/golang.org/x/net/CONTRIBUTORS | 3 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/README.md | 16 + vendor/golang.org/x/net/bpf/asm.go | 41 + vendor/golang.org/x/net/bpf/constants.go | 218 + vendor/golang.org/x/net/bpf/doc.go | 82 + vendor/golang.org/x/net/bpf/instructions.go | 704 + .../golang.org/x/net/bpf/instructions_test.go | 525 + vendor/golang.org/x/net/bpf/setter.go | 10 + .../x/net/bpf/testdata/all_instructions.bpf | 1 + .../x/net/bpf/testdata/all_instructions.txt | 79 + vendor/golang.org/x/net/bpf/vm.go | 140 + vendor/golang.org/x/net/bpf/vm_aluop_test.go | 512 + vendor/golang.org/x/net/bpf/vm_bpf_test.go | 192 + .../golang.org/x/net/bpf/vm_extension_test.go | 49 + .../golang.org/x/net/bpf/vm_instructions.go | 174 + vendor/golang.org/x/net/bpf/vm_jump_test.go | 380 + vendor/golang.org/x/net/bpf/vm_load_test.go | 246 + vendor/golang.org/x/net/bpf/vm_ret_test.go | 115 + .../golang.org/x/net/bpf/vm_scratch_test.go | 247 + vendor/golang.org/x/net/bpf/vm_test.go | 144 + vendor/golang.org/x/net/codereview.cfg | 1 + vendor/golang.org/x/net/context/context.go | 56 + .../golang.org/x/net/context/context_test.go | 583 + .../x/net/context/ctxhttp/ctxhttp.go | 74 + .../x/net/context/ctxhttp/ctxhttp_17_test.go | 29 + .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 + .../net/context/ctxhttp/ctxhttp_pre17_test.go | 79 + .../x/net/context/ctxhttp/ctxhttp_test.go | 105 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/net/context/pre_go19.go | 109 + .../x/net/context/withtimeout_test.go | 31 + vendor/golang.org/x/net/dict/dict.go | 210 + .../x/net/dns/dnsmessage/example_test.go | 132 + .../x/net/dns/dnsmessage/message.go | 2247 ++ .../x/net/dns/dnsmessage/message_test.go | 1316 ++ vendor/golang.org/x/net/html/atom/atom.go | 78 + .../golang.org/x/net/html/atom/atom_test.go | 109 + vendor/golang.org/x/net/html/atom/gen.go | 710 + vendor/golang.org/x/net/html/atom/table.go | 779 + .../golang.org/x/net/html/atom/table_test.go | 374 + .../golang.org/x/net/html/charset/charset.go | 257 + .../x/net/html/charset/charset_test.go | 237 + .../html/charset/testdata/HTTP-charset.html | 48 + .../charset/testdata/HTTP-vs-UTF-8-BOM.html | 48 + .../testdata/HTTP-vs-meta-charset.html | 49 + .../testdata/HTTP-vs-meta-content.html | 49 + .../testdata/No-encoding-declaration.html | 47 + .../x/net/html/charset/testdata/README | 9 + .../html/charset/testdata/UTF-16BE-BOM.html | Bin 0 -> 2670 bytes .../html/charset/testdata/UTF-16LE-BOM.html | Bin 0 -> 2682 bytes .../testdata/UTF-8-BOM-vs-meta-charset.html | 49 + .../testdata/UTF-8-BOM-vs-meta-content.html | 48 + .../testdata/meta-charset-attribute.html | 48 + .../testdata/meta-content-attribute.html | 48 + vendor/golang.org/x/net/html/const.go | 104 + vendor/golang.org/x/net/html/doc.go | 106 + vendor/golang.org/x/net/html/doctype.go | 156 + vendor/golang.org/x/net/html/entity.go | 2253 ++ vendor/golang.org/x/net/html/entity_test.go | 29 + vendor/golang.org/x/net/html/escape.go | 258 + vendor/golang.org/x/net/html/escape_test.go | 97 + vendor/golang.org/x/net/html/example_test.go | 40 + vendor/golang.org/x/net/html/foreign.go | 226 + vendor/golang.org/x/net/html/node.go | 194 + vendor/golang.org/x/net/html/node_test.go | 146 + vendor/golang.org/x/net/html/parse.go | 2094 ++ vendor/golang.org/x/net/html/parse_test.go | 388 + vendor/golang.org/x/net/html/render.go | 271 + vendor/golang.org/x/net/html/render_test.go | 156 + .../golang.org/x/net/html/testdata/go1.html | 2237 ++ .../x/net/html/testdata/webkit/README | 28 + .../x/net/html/testdata/webkit/adoption01.dat | 194 + .../x/net/html/testdata/webkit/adoption02.dat | 31 + .../x/net/html/testdata/webkit/comments01.dat | 135 + .../x/net/html/testdata/webkit/doctype01.dat | 370 + .../x/net/html/testdata/webkit/entities01.dat | 603 + .../x/net/html/testdata/webkit/entities02.dat | 249 + .../html/testdata/webkit/html5test-com.dat | 246 + .../x/net/html/testdata/webkit/inbody01.dat | 43 + .../x/net/html/testdata/webkit/isindex.dat | 40 + ...pending-spec-changes-plain-text-unsafe.dat | Bin 0 -> 115 bytes .../testdata/webkit/pending-spec-changes.dat | 52 + .../testdata/webkit/plain-text-unsafe.dat | Bin 0 -> 4166 bytes .../net/html/testdata/webkit/scriptdata01.dat | 308 + .../testdata/webkit/scripted/adoption01.dat | 15 + .../testdata/webkit/scripted/webkit01.dat | 28 + .../x/net/html/testdata/webkit/tables01.dat | 212 + .../x/net/html/testdata/webkit/tests1.dat | 1952 ++ .../x/net/html/testdata/webkit/tests10.dat | 799 + .../x/net/html/testdata/webkit/tests11.dat | 482 + .../x/net/html/testdata/webkit/tests12.dat | 62 + .../x/net/html/testdata/webkit/tests14.dat | 74 + .../x/net/html/testdata/webkit/tests15.dat | 208 + .../x/net/html/testdata/webkit/tests16.dat | 2299 +++ .../x/net/html/testdata/webkit/tests17.dat | 153 + .../x/net/html/testdata/webkit/tests18.dat | 269 + .../x/net/html/testdata/webkit/tests19.dat | 1237 ++ .../x/net/html/testdata/webkit/tests2.dat | 763 + .../x/net/html/testdata/webkit/tests20.dat | 455 + .../x/net/html/testdata/webkit/tests21.dat | 221 + .../x/net/html/testdata/webkit/tests22.dat | 157 + .../x/net/html/testdata/webkit/tests23.dat | 155 + .../x/net/html/testdata/webkit/tests24.dat | 79 + .../x/net/html/testdata/webkit/tests25.dat | 219 + .../x/net/html/testdata/webkit/tests26.dat | 313 + .../x/net/html/testdata/webkit/tests3.dat | 305 + .../x/net/html/testdata/webkit/tests4.dat | 59 + .../x/net/html/testdata/webkit/tests5.dat | 191 + .../x/net/html/testdata/webkit/tests6.dat | 663 + .../x/net/html/testdata/webkit/tests7.dat | 390 + .../x/net/html/testdata/webkit/tests8.dat | 148 + .../x/net/html/testdata/webkit/tests9.dat | 457 + .../testdata/webkit/tests_innerHTML_1.dat | 741 + .../x/net/html/testdata/webkit/tricky01.dat | 261 + .../x/net/html/testdata/webkit/webkit01.dat | 610 + .../x/net/html/testdata/webkit/webkit02.dat | 159 + vendor/golang.org/x/net/html/token.go | 1219 ++ vendor/golang.org/x/net/html/token_test.go | 748 + .../x/net/http/httpproxy/export_test.go | 7 + .../x/net/http/httpproxy/go19_test.go | 13 + .../golang.org/x/net/http/httpproxy/proxy.go | 239 + .../x/net/http/httpproxy/proxy_test.go | 301 + vendor/golang.org/x/net/http2/.gitignore | 2 + vendor/golang.org/x/net/http2/Dockerfile | 51 + vendor/golang.org/x/net/http2/Makefile | 3 + vendor/golang.org/x/net/http2/README | 20 + vendor/golang.org/x/net/http2/ciphers.go | 641 + vendor/golang.org/x/net/http2/ciphers_test.go | 309 + .../x/net/http2/client_conn_pool.go | 256 + .../x/net/http2/configure_transport.go | 80 + vendor/golang.org/x/net/http2/databuffer.go | 146 + .../golang.org/x/net/http2/databuffer_test.go | 157 + vendor/golang.org/x/net/http2/errors.go | 133 + vendor/golang.org/x/net/http2/errors_test.go | 24 + vendor/golang.org/x/net/http2/flow.go | 50 + vendor/golang.org/x/net/http2/flow_test.go | 53 + vendor/golang.org/x/net/http2/frame.go | 1579 ++ vendor/golang.org/x/net/http2/frame_test.go | 1191 ++ vendor/golang.org/x/net/http2/go16.go | 16 + vendor/golang.org/x/net/http2/go17.go | 106 + vendor/golang.org/x/net/http2/go17_not18.go | 36 + vendor/golang.org/x/net/http2/go18.go | 56 + vendor/golang.org/x/net/http2/go18_test.go | 79 + vendor/golang.org/x/net/http2/go19.go | 16 + vendor/golang.org/x/net/http2/go19_test.go | 59 + vendor/golang.org/x/net/http2/gotrack.go | 170 + vendor/golang.org/x/net/http2/gotrack_test.go | 33 + .../golang.org/x/net/http2/h2demo/.gitignore | 6 + .../golang.org/x/net/http2/h2demo/Dockerfile | 11 + .../x/net/http2/h2demo/Dockerfile.0 | 134 + vendor/golang.org/x/net/http2/h2demo/Makefile | 55 + vendor/golang.org/x/net/http2/h2demo/README | 16 + .../x/net/http2/h2demo/deployment-prod.yaml | 28 + .../golang.org/x/net/http2/h2demo/h2demo.go | 543 + .../golang.org/x/net/http2/h2demo/launch.go | 302 + .../golang.org/x/net/http2/h2demo/rootCA.key | 27 + .../golang.org/x/net/http2/h2demo/rootCA.pem | 26 + .../golang.org/x/net/http2/h2demo/rootCA.srl | 1 + .../golang.org/x/net/http2/h2demo/server.crt | 20 + .../golang.org/x/net/http2/h2demo/server.key | 27 + .../x/net/http2/h2demo/service.yaml | 17 + vendor/golang.org/x/net/http2/h2demo/tmpl.go | 1991 ++ vendor/golang.org/x/net/http2/h2i/README.md | 97 + vendor/golang.org/x/net/http2/h2i/h2i.go | 522 + vendor/golang.org/x/net/http2/headermap.go | 78 + vendor/golang.org/x/net/http2/hpack/encode.go | 240 + .../x/net/http2/hpack/encode_test.go | 386 + vendor/golang.org/x/net/http2/hpack/hpack.go | 490 + .../x/net/http2/hpack/hpack_test.go | 722 + .../golang.org/x/net/http2/hpack/huffman.go | 212 + vendor/golang.org/x/net/http2/hpack/tables.go | 479 + .../x/net/http2/hpack/tables_test.go | 214 + vendor/golang.org/x/net/http2/http2.go | 391 + vendor/golang.org/x/net/http2/http2_test.go | 199 + vendor/golang.org/x/net/http2/not_go16.go | 21 + vendor/golang.org/x/net/http2/not_go17.go | 87 + vendor/golang.org/x/net/http2/not_go18.go | 29 + vendor/golang.org/x/net/http2/not_go19.go | 16 + vendor/golang.org/x/net/http2/pipe.go | 163 + vendor/golang.org/x/net/http2/pipe_test.go | 130 + vendor/golang.org/x/net/http2/server.go | 2888 +++ .../x/net/http2/server_push_test.go | 521 + vendor/golang.org/x/net/http2/server_test.go | 3725 ++++ .../testdata/draft-ietf-httpbis-http2.xml | 5021 +++++ vendor/golang.org/x/net/http2/transport.go | 2303 +++ .../golang.org/x/net/http2/transport_test.go | 3847 ++++ vendor/golang.org/x/net/http2/write.go | 365 + vendor/golang.org/x/net/http2/writesched.go | 242 + .../x/net/http2/writesched_priority.go | 452 + .../x/net/http2/writesched_priority_test.go | 541 + .../x/net/http2/writesched_random.go | 72 + .../x/net/http2/writesched_random_test.go | 44 + .../golang.org/x/net/http2/writesched_test.go | 125 + vendor/golang.org/x/net/http2/z_spec_test.go | 356 + vendor/golang.org/x/net/icmp/diag_test.go | 274 + vendor/golang.org/x/net/icmp/dstunreach.go | 41 + vendor/golang.org/x/net/icmp/echo.go | 157 + vendor/golang.org/x/net/icmp/endpoint.go | 113 + vendor/golang.org/x/net/icmp/example_test.go | 63 + vendor/golang.org/x/net/icmp/extension.go | 108 + .../golang.org/x/net/icmp/extension_test.go | 333 + vendor/golang.org/x/net/icmp/helper_posix.go | 75 + vendor/golang.org/x/net/icmp/interface.go | 322 + vendor/golang.org/x/net/icmp/ipv4.go | 61 + vendor/golang.org/x/net/icmp/ipv4_test.go | 75 + vendor/golang.org/x/net/icmp/ipv6.go | 23 + vendor/golang.org/x/net/icmp/listen_posix.go | 100 + vendor/golang.org/x/net/icmp/listen_stub.go | 33 + vendor/golang.org/x/net/icmp/message.go | 157 + vendor/golang.org/x/net/icmp/message_test.go | 155 + vendor/golang.org/x/net/icmp/messagebody.go | 41 + vendor/golang.org/x/net/icmp/mpls.go | 77 + vendor/golang.org/x/net/icmp/multipart.go | 121 + .../golang.org/x/net/icmp/multipart_test.go | 575 + vendor/golang.org/x/net/icmp/packettoobig.go | 43 + vendor/golang.org/x/net/icmp/paramprob.go | 63 + vendor/golang.org/x/net/icmp/sys_freebsd.go | 11 + vendor/golang.org/x/net/icmp/timeexceeded.go | 39 + vendor/golang.org/x/net/idna/example_test.go | 70 + vendor/golang.org/x/net/idna/idna.go | 732 + vendor/golang.org/x/net/idna/idna_test.go | 108 + vendor/golang.org/x/net/idna/punycode.go | 203 + vendor/golang.org/x/net/idna/punycode_test.go | 198 + vendor/golang.org/x/net/idna/tables.go | 4557 +++++ vendor/golang.org/x/net/idna/trie.go | 72 + vendor/golang.org/x/net/idna/trieval.go | 119 + .../golang.org/x/net/internal/iana/const.go | 227 + vendor/golang.org/x/net/internal/iana/gen.go | 387 + .../x/net/internal/nettest/helper_bsd.go | 53 + .../x/net/internal/nettest/helper_nobsd.go | 15 + .../x/net/internal/nettest/helper_posix.go | 31 + .../x/net/internal/nettest/helper_stub.go | 32 + .../x/net/internal/nettest/helper_unix.go | 29 + .../x/net/internal/nettest/helper_windows.go | 42 + .../x/net/internal/nettest/interface.go | 94 + .../x/net/internal/nettest/rlimit.go | 11 + .../x/net/internal/nettest/stack.go | 152 + .../x/net/internal/socket/cmsghdr.go | 11 + .../x/net/internal/socket/cmsghdr_bsd.go | 13 + .../internal/socket/cmsghdr_linux_32bit.go | 14 + .../internal/socket/cmsghdr_linux_64bit.go | 14 + .../internal/socket/cmsghdr_solaris_64bit.go | 14 + .../x/net/internal/socket/cmsghdr_stub.go | 17 + .../x/net/internal/socket/defs_darwin.go | 44 + .../x/net/internal/socket/defs_dragonfly.go | 44 + .../x/net/internal/socket/defs_freebsd.go | 44 + .../x/net/internal/socket/defs_linux.go | 49 + .../x/net/internal/socket/defs_netbsd.go | 47 + .../x/net/internal/socket/defs_openbsd.go | 44 + .../x/net/internal/socket/defs_solaris.go | 44 + .../x/net/internal/socket/error_unix.go | 31 + .../x/net/internal/socket/error_windows.go | 26 + .../x/net/internal/socket/iovec_32bit.go | 19 + .../x/net/internal/socket/iovec_64bit.go | 19 + .../internal/socket/iovec_solaris_64bit.go | 19 + .../x/net/internal/socket/iovec_stub.go | 11 + .../x/net/internal/socket/mmsghdr_stub.go | 21 + .../x/net/internal/socket/mmsghdr_unix.go | 42 + .../x/net/internal/socket/msghdr_bsd.go | 39 + .../x/net/internal/socket/msghdr_bsdvar.go | 16 + .../x/net/internal/socket/msghdr_linux.go | 36 + .../net/internal/socket/msghdr_linux_32bit.go | 24 + .../net/internal/socket/msghdr_linux_64bit.go | 24 + .../x/net/internal/socket/msghdr_openbsd.go | 14 + .../internal/socket/msghdr_solaris_64bit.go | 36 + .../x/net/internal/socket/msghdr_stub.go | 14 + .../x/net/internal/socket/rawconn.go | 66 + .../x/net/internal/socket/rawconn_mmsg.go | 74 + .../x/net/internal/socket/rawconn_msg.go | 77 + .../x/net/internal/socket/rawconn_nommsg.go | 18 + .../x/net/internal/socket/rawconn_nomsg.go | 18 + .../x/net/internal/socket/rawconn_stub.go | 25 + .../x/net/internal/socket/reflect.go | 62 + .../x/net/internal/socket/socket.go | 285 + .../net/internal/socket/socket_go1_9_test.go | 259 + .../x/net/internal/socket/socket_test.go | 46 + .../golang.org/x/net/internal/socket/sys.go | 33 + .../x/net/internal/socket/sys_bsd.go | 17 + .../x/net/internal/socket/sys_bsdvar.go | 14 + .../x/net/internal/socket/sys_darwin.go | 7 + .../x/net/internal/socket/sys_dragonfly.go | 7 + .../x/net/internal/socket/sys_linux.go | 27 + .../x/net/internal/socket/sys_linux_386.go | 55 + .../x/net/internal/socket/sys_linux_386.s | 11 + .../x/net/internal/socket/sys_linux_amd64.go | 10 + .../x/net/internal/socket/sys_linux_arm.go | 10 + .../x/net/internal/socket/sys_linux_arm64.go | 10 + .../x/net/internal/socket/sys_linux_mips.go | 10 + .../x/net/internal/socket/sys_linux_mips64.go | 10 + .../net/internal/socket/sys_linux_mips64le.go | 10 + .../x/net/internal/socket/sys_linux_mipsle.go | 10 + .../x/net/internal/socket/sys_linux_ppc64.go | 10 + .../net/internal/socket/sys_linux_ppc64le.go | 10 + .../x/net/internal/socket/sys_linux_s390x.go | 55 + .../x/net/internal/socket/sys_linux_s390x.s | 11 + .../x/net/internal/socket/sys_netbsd.go | 25 + .../x/net/internal/socket/sys_posix.go | 168 + .../x/net/internal/socket/sys_solaris.go | 71 + .../x/net/internal/socket/sys_solaris_amd64.s | 11 + .../x/net/internal/socket/sys_stub.go | 64 + .../x/net/internal/socket/sys_unix.go | 33 + .../x/net/internal/socket/sys_windows.go | 70 + .../x/net/internal/socket/zsys_darwin_386.go | 59 + .../net/internal/socket/zsys_darwin_amd64.go | 61 + .../x/net/internal/socket/zsys_darwin_arm.go | 59 + .../net/internal/socket/zsys_darwin_arm64.go | 61 + .../internal/socket/zsys_dragonfly_amd64.go | 61 + .../x/net/internal/socket/zsys_freebsd_386.go | 59 + .../net/internal/socket/zsys_freebsd_amd64.go | 61 + .../x/net/internal/socket/zsys_freebsd_arm.go | 59 + .../x/net/internal/socket/zsys_linux_386.go | 63 + .../x/net/internal/socket/zsys_linux_amd64.go | 66 + .../x/net/internal/socket/zsys_linux_arm.go | 63 + .../x/net/internal/socket/zsys_linux_arm64.go | 66 + .../x/net/internal/socket/zsys_linux_mips.go | 63 + .../net/internal/socket/zsys_linux_mips64.go | 66 + .../internal/socket/zsys_linux_mips64le.go | 66 + .../net/internal/socket/zsys_linux_mipsle.go | 63 + .../x/net/internal/socket/zsys_linux_ppc64.go | 66 + .../net/internal/socket/zsys_linux_ppc64le.go | 66 + .../x/net/internal/socket/zsys_linux_s390x.go | 66 + .../x/net/internal/socket/zsys_netbsd_386.go | 65 + .../net/internal/socket/zsys_netbsd_amd64.go | 68 + .../x/net/internal/socket/zsys_netbsd_arm.go | 65 + .../x/net/internal/socket/zsys_openbsd_386.go | 59 + .../net/internal/socket/zsys_openbsd_amd64.go | 61 + .../x/net/internal/socket/zsys_openbsd_arm.go | 59 + .../net/internal/socket/zsys_solaris_amd64.go | 60 + .../golang.org/x/net/internal/socks/client.go | 168 + .../x/net/internal/socks/dial_test.go | 158 + .../golang.org/x/net/internal/socks/socks.go | 265 + .../x/net/internal/sockstest/server.go | 241 + .../x/net/internal/sockstest/server_test.go | 103 + .../x/net/internal/timeseries/timeseries.go | 525 + .../internal/timeseries/timeseries_test.go | 170 + vendor/golang.org/x/net/ipv4/batch.go | 191 + vendor/golang.org/x/net/ipv4/bpf_test.go | 93 + vendor/golang.org/x/net/ipv4/control.go | 144 + vendor/golang.org/x/net/ipv4/control_bsd.go | 40 + .../golang.org/x/net/ipv4/control_pktinfo.go | 39 + vendor/golang.org/x/net/ipv4/control_stub.go | 13 + vendor/golang.org/x/net/ipv4/control_test.go | 21 + vendor/golang.org/x/net/ipv4/control_unix.go | 73 + .../golang.org/x/net/ipv4/control_windows.go | 16 + vendor/golang.org/x/net/ipv4/defs_darwin.go | 77 + .../golang.org/x/net/ipv4/defs_dragonfly.go | 38 + vendor/golang.org/x/net/ipv4/defs_freebsd.go | 75 + vendor/golang.org/x/net/ipv4/defs_linux.go | 122 + vendor/golang.org/x/net/ipv4/defs_netbsd.go | 37 + vendor/golang.org/x/net/ipv4/defs_openbsd.go | 37 + vendor/golang.org/x/net/ipv4/defs_solaris.go | 84 + vendor/golang.org/x/net/ipv4/dgramopt.go | 265 + vendor/golang.org/x/net/ipv4/doc.go | 244 + vendor/golang.org/x/net/ipv4/endpoint.go | 187 + vendor/golang.org/x/net/ipv4/example_test.go | 224 + vendor/golang.org/x/net/ipv4/gen.go | 199 + vendor/golang.org/x/net/ipv4/genericopt.go | 57 + vendor/golang.org/x/net/ipv4/header.go | 159 + vendor/golang.org/x/net/ipv4/header_test.go | 228 + vendor/golang.org/x/net/ipv4/helper.go | 63 + vendor/golang.org/x/net/ipv4/iana.go | 38 + vendor/golang.org/x/net/ipv4/icmp.go | 57 + vendor/golang.org/x/net/ipv4/icmp_linux.go | 25 + vendor/golang.org/x/net/ipv4/icmp_stub.go | 25 + vendor/golang.org/x/net/ipv4/icmp_test.go | 95 + .../golang.org/x/net/ipv4/multicast_test.go | 334 + .../x/net/ipv4/multicastlistener_test.go | 265 + .../x/net/ipv4/multicastsockopt_test.go | 195 + vendor/golang.org/x/net/ipv4/packet.go | 69 + vendor/golang.org/x/net/ipv4/packet_go1_8.go | 56 + vendor/golang.org/x/net/ipv4/packet_go1_9.go | 67 + vendor/golang.org/x/net/ipv4/payload.go | 23 + vendor/golang.org/x/net/ipv4/payload_cmsg.go | 36 + .../x/net/ipv4/payload_cmsg_go1_8.go | 59 + .../x/net/ipv4/payload_cmsg_go1_9.go | 67 + .../golang.org/x/net/ipv4/payload_nocmsg.go | 42 + .../x/net/ipv4/readwrite_go1_8_test.go | 248 + .../x/net/ipv4/readwrite_go1_9_test.go | 388 + .../golang.org/x/net/ipv4/readwrite_test.go | 140 + vendor/golang.org/x/net/ipv4/sockopt.go | 44 + vendor/golang.org/x/net/ipv4/sockopt_posix.go | 71 + vendor/golang.org/x/net/ipv4/sockopt_stub.go | 42 + vendor/golang.org/x/net/ipv4/sys_asmreq.go | 119 + .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 + vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 42 + .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_bpf.go | 23 + vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv4/sys_bsd.go | 37 + vendor/golang.org/x/net/ipv4/sys_darwin.go | 93 + vendor/golang.org/x/net/ipv4/sys_dragonfly.go | 35 + vendor/golang.org/x/net/ipv4/sys_freebsd.go | 76 + vendor/golang.org/x/net/ipv4/sys_linux.go | 59 + vendor/golang.org/x/net/ipv4/sys_solaris.go | 57 + vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_stub.go | 13 + vendor/golang.org/x/net/ipv4/sys_windows.go | 67 + vendor/golang.org/x/net/ipv4/unicast_test.go | 247 + .../x/net/ipv4/unicastsockopt_test.go | 148 + vendor/golang.org/x/net/ipv4/zsys_darwin.go | 99 + .../golang.org/x/net/ipv4/zsys_dragonfly.go | 31 + .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 93 + .../x/net/ipv4/zsys_freebsd_amd64.go | 95 + .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 95 + .../golang.org/x/net/ipv4/zsys_linux_386.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_arm.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_mips.go | 148 + .../x/net/ipv4/zsys_linux_mips64.go | 150 + .../x/net/ipv4/zsys_linux_mips64le.go | 150 + .../x/net/ipv4/zsys_linux_mipsle.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 150 + .../x/net/ipv4/zsys_linux_ppc64le.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 150 + vendor/golang.org/x/net/ipv4/zsys_netbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_openbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_solaris.go | 100 + vendor/golang.org/x/net/ipv6/batch.go | 119 + vendor/golang.org/x/net/ipv6/bpf_test.go | 96 + vendor/golang.org/x/net/ipv6/control.go | 187 + .../x/net/ipv6/control_rfc2292_unix.go | 48 + .../x/net/ipv6/control_rfc3542_unix.go | 94 + vendor/golang.org/x/net/ipv6/control_stub.go | 13 + vendor/golang.org/x/net/ipv6/control_test.go | 21 + vendor/golang.org/x/net/ipv6/control_unix.go | 55 + .../golang.org/x/net/ipv6/control_windows.go | 16 + vendor/golang.org/x/net/ipv6/defs_darwin.go | 112 + .../golang.org/x/net/ipv6/defs_dragonfly.go | 84 + vendor/golang.org/x/net/ipv6/defs_freebsd.go | 105 + vendor/golang.org/x/net/ipv6/defs_linux.go | 147 + vendor/golang.org/x/net/ipv6/defs_netbsd.go | 80 + vendor/golang.org/x/net/ipv6/defs_openbsd.go | 89 + vendor/golang.org/x/net/ipv6/defs_solaris.go | 114 + vendor/golang.org/x/net/ipv6/dgramopt.go | 302 + vendor/golang.org/x/net/ipv6/doc.go | 243 + vendor/golang.org/x/net/ipv6/endpoint.go | 128 + vendor/golang.org/x/net/ipv6/example_test.go | 216 + vendor/golang.org/x/net/ipv6/gen.go | 199 + vendor/golang.org/x/net/ipv6/genericopt.go | 58 + vendor/golang.org/x/net/ipv6/header.go | 55 + vendor/golang.org/x/net/ipv6/header_test.go | 55 + vendor/golang.org/x/net/ipv6/helper.go | 57 + vendor/golang.org/x/net/ipv6/iana.go | 86 + vendor/golang.org/x/net/ipv6/icmp.go | 60 + vendor/golang.org/x/net/ipv6/icmp_bsd.go | 29 + vendor/golang.org/x/net/ipv6/icmp_linux.go | 27 + vendor/golang.org/x/net/ipv6/icmp_solaris.go | 27 + vendor/golang.org/x/net/ipv6/icmp_stub.go | 23 + vendor/golang.org/x/net/ipv6/icmp_test.go | 96 + vendor/golang.org/x/net/ipv6/icmp_windows.go | 22 + .../x/net/ipv6/mocktransponder_test.go | 32 + .../golang.org/x/net/ipv6/multicast_test.go | 264 + .../x/net/ipv6/multicastlistener_test.go | 261 + .../x/net/ipv6/multicastsockopt_test.go | 157 + vendor/golang.org/x/net/ipv6/payload.go | 23 + vendor/golang.org/x/net/ipv6/payload_cmsg.go | 35 + .../x/net/ipv6/payload_cmsg_go1_8.go | 55 + .../x/net/ipv6/payload_cmsg_go1_9.go | 57 + .../golang.org/x/net/ipv6/payload_nocmsg.go | 41 + .../x/net/ipv6/readwrite_go1_8_test.go | 242 + .../x/net/ipv6/readwrite_go1_9_test.go | 373 + .../golang.org/x/net/ipv6/readwrite_test.go | 148 + vendor/golang.org/x/net/ipv6/sockopt.go | 43 + vendor/golang.org/x/net/ipv6/sockopt_posix.go | 87 + vendor/golang.org/x/net/ipv6/sockopt_stub.go | 46 + vendor/golang.org/x/net/ipv6/sockopt_test.go | 133 + vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 + .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 + vendor/golang.org/x/net/ipv6/sys_bpf.go | 23 + vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv6/sys_bsd.go | 57 + vendor/golang.org/x/net/ipv6/sys_darwin.go | 106 + vendor/golang.org/x/net/ipv6/sys_freebsd.go | 92 + vendor/golang.org/x/net/ipv6/sys_linux.go | 74 + vendor/golang.org/x/net/ipv6/sys_solaris.go | 74 + vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv6/sys_stub.go | 13 + vendor/golang.org/x/net/ipv6/sys_windows.go | 75 + vendor/golang.org/x/net/ipv6/unicast_test.go | 184 + .../x/net/ipv6/unicastsockopt_test.go | 120 + vendor/golang.org/x/net/ipv6/zsys_darwin.go | 131 + .../golang.org/x/net/ipv6/zsys_dragonfly.go | 88 + .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 122 + .../x/net/ipv6/zsys_freebsd_amd64.go | 124 + .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 124 + .../golang.org/x/net/ipv6/zsys_linux_386.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_arm.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_mips.go | 170 + .../x/net/ipv6/zsys_linux_mips64.go | 172 + .../x/net/ipv6/zsys_linux_mips64le.go | 172 + .../x/net/ipv6/zsys_linux_mipsle.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 172 + .../x/net/ipv6/zsys_linux_ppc64le.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 172 + vendor/golang.org/x/net/ipv6/zsys_netbsd.go | 84 + vendor/golang.org/x/net/ipv6/zsys_openbsd.go | 93 + vendor/golang.org/x/net/ipv6/zsys_solaris.go | 131 + .../golang.org/x/net/lex/httplex/httplex.go | 351 + .../x/net/lex/httplex/httplex_test.go | 119 + vendor/golang.org/x/net/lif/address.go | 105 + vendor/golang.org/x/net/lif/address_test.go | 123 + vendor/golang.org/x/net/lif/binary.go | 115 + vendor/golang.org/x/net/lif/defs_solaris.go | 90 + vendor/golang.org/x/net/lif/lif.go | 43 + vendor/golang.org/x/net/lif/link.go | 126 + vendor/golang.org/x/net/lif/link_test.go | 63 + vendor/golang.org/x/net/lif/sys.go | 21 + .../golang.org/x/net/lif/sys_solaris_amd64.s | 8 + vendor/golang.org/x/net/lif/syscall.go | 28 + .../x/net/lif/zsys_solaris_amd64.go | 103 + vendor/golang.org/x/net/nettest/conntest.go | 456 + .../golang.org/x/net/nettest/conntest_go16.go | 24 + .../golang.org/x/net/nettest/conntest_go17.go | 24 + .../golang.org/x/net/nettest/conntest_test.go | 76 + vendor/golang.org/x/net/netutil/listen.go | 74 + .../golang.org/x/net/netutil/listen_test.go | 147 + vendor/golang.org/x/net/proxy/direct.go | 18 + vendor/golang.org/x/net/proxy/per_host.go | 140 + .../golang.org/x/net/proxy/per_host_test.go | 55 + vendor/golang.org/x/net/proxy/proxy.go | 134 + vendor/golang.org/x/net/proxy/proxy_test.go | 123 + vendor/golang.org/x/net/proxy/socks5.go | 36 + vendor/golang.org/x/net/publicsuffix/gen.go | 713 + vendor/golang.org/x/net/publicsuffix/list.go | 135 + .../x/net/publicsuffix/list_test.go | 416 + vendor/golang.org/x/net/publicsuffix/table.go | 9534 +++++++++ .../x/net/publicsuffix/table_test.go | 16959 ++++++++++++++++ vendor/golang.org/x/net/route/address.go | 425 + .../x/net/route/address_darwin_test.go | 63 + vendor/golang.org/x/net/route/address_test.go | 103 + vendor/golang.org/x/net/route/binary.go | 90 + vendor/golang.org/x/net/route/defs_darwin.go | 114 + .../golang.org/x/net/route/defs_dragonfly.go | 113 + vendor/golang.org/x/net/route/defs_freebsd.go | 337 + vendor/golang.org/x/net/route/defs_netbsd.go | 112 + vendor/golang.org/x/net/route/defs_openbsd.go | 116 + vendor/golang.org/x/net/route/interface.go | 64 + .../x/net/route/interface_announce.go | 32 + .../x/net/route/interface_classic.go | 66 + .../x/net/route/interface_freebsd.go | 78 + .../x/net/route/interface_multicast.go | 30 + .../x/net/route/interface_openbsd.go | 90 + vendor/golang.org/x/net/route/message.go | 72 + .../x/net/route/message_darwin_test.go | 34 + .../x/net/route/message_freebsd_test.go | 92 + vendor/golang.org/x/net/route/message_test.go | 239 + vendor/golang.org/x/net/route/route.go | 123 + .../golang.org/x/net/route/route_classic.go | 75 + .../golang.org/x/net/route/route_openbsd.go | 65 + vendor/golang.org/x/net/route/route_test.go | 390 + vendor/golang.org/x/net/route/sys.go | 39 + vendor/golang.org/x/net/route/sys_darwin.go | 87 + .../golang.org/x/net/route/sys_dragonfly.go | 76 + vendor/golang.org/x/net/route/sys_freebsd.go | 155 + vendor/golang.org/x/net/route/sys_netbsd.go | 71 + vendor/golang.org/x/net/route/sys_openbsd.go | 80 + vendor/golang.org/x/net/route/syscall.go | 28 + vendor/golang.org/x/net/route/zsys_darwin.go | 99 + .../golang.org/x/net/route/zsys_dragonfly.go | 98 + .../x/net/route/zsys_freebsd_386.go | 126 + .../x/net/route/zsys_freebsd_amd64.go | 123 + .../x/net/route/zsys_freebsd_arm.go | 123 + vendor/golang.org/x/net/route/zsys_netbsd.go | 97 + vendor/golang.org/x/net/route/zsys_openbsd.go | 101 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + .../golang.org/x/net/trace/histogram_test.go | 325 + vendor/golang.org/x/net/trace/trace.go | 1103 + vendor/golang.org/x/net/trace/trace_go16.go | 21 + vendor/golang.org/x/net/trace/trace_go17.go | 21 + vendor/golang.org/x/net/trace/trace_test.go | 178 + vendor/golang.org/x/net/webdav/file.go | 796 + vendor/golang.org/x/net/webdav/file_go1.6.go | 17 + vendor/golang.org/x/net/webdav/file_go1.7.go | 16 + vendor/golang.org/x/net/webdav/file_test.go | 1184 ++ vendor/golang.org/x/net/webdav/if.go | 173 + vendor/golang.org/x/net/webdav/if_test.go | 322 + .../x/net/webdav/internal/xml/README | 11 + .../x/net/webdav/internal/xml/atom_test.go | 56 + .../x/net/webdav/internal/xml/example_test.go | 151 + .../x/net/webdav/internal/xml/marshal.go | 1223 ++ .../x/net/webdav/internal/xml/marshal_test.go | 1939 ++ .../x/net/webdav/internal/xml/read.go | 692 + .../x/net/webdav/internal/xml/read_test.go | 744 + .../x/net/webdav/internal/xml/typeinfo.go | 371 + .../x/net/webdav/internal/xml/xml.go | 1998 ++ .../x/net/webdav/internal/xml/xml_test.go | 752 + .../x/net/webdav/litmus_test_server.go | 94 + vendor/golang.org/x/net/webdav/lock.go | 445 + vendor/golang.org/x/net/webdav/lock_test.go | 731 + vendor/golang.org/x/net/webdav/prop.go | 418 + vendor/golang.org/x/net/webdav/prop_test.go | 613 + vendor/golang.org/x/net/webdav/webdav.go | 702 + vendor/golang.org/x/net/webdav/webdav_test.go | 344 + vendor/golang.org/x/net/webdav/xml.go | 519 + vendor/golang.org/x/net/webdav/xml_test.go | 906 + vendor/golang.org/x/net/websocket/client.go | 106 + vendor/golang.org/x/net/websocket/dial.go | 24 + .../golang.org/x/net/websocket/dial_test.go | 43 + .../x/net/websocket/exampledial_test.go | 31 + .../x/net/websocket/examplehandler_test.go | 26 + vendor/golang.org/x/net/websocket/hybi.go | 583 + .../golang.org/x/net/websocket/hybi_test.go | 608 + vendor/golang.org/x/net/websocket/server.go | 113 + .../golang.org/x/net/websocket/websocket.go | 448 + .../x/net/websocket/websocket_test.go | 665 + vendor/golang.org/x/net/xsrftoken/xsrf.go | 94 + .../golang.org/x/net/xsrftoken/xsrf_test.go | 83 + vendor/golang.org/x/oauth2/.travis.yml | 13 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 77 + vendor/golang.org/x/oauth2/amazon/amazon.go | 16 + .../x/oauth2/bitbucket/bitbucket.go | 16 + .../clientcredentials/clientcredentials.go | 109 + .../clientcredentials_test.go | 97 + vendor/golang.org/x/oauth2/example_test.go | 89 + .../golang.org/x/oauth2/facebook/facebook.go | 16 + vendor/golang.org/x/oauth2/fitbit/fitbit.go | 16 + .../x/oauth2/foursquare/foursquare.go | 16 + vendor/golang.org/x/oauth2/github/github.go | 16 + .../golang.org/x/oauth2/google/appengine.go | 89 + .../x/oauth2/google/appengine_hook.go | 14 + .../x/oauth2/google/appengineflex_hook.go | 11 + vendor/golang.org/x/oauth2/google/default.go | 115 + vendor/golang.org/x/oauth2/google/doc_go19.go | 42 + .../x/oauth2/google/doc_not_go19.go | 43 + .../x/oauth2/google/example_test.go | 162 + vendor/golang.org/x/oauth2/google/go19.go | 57 + vendor/golang.org/x/oauth2/google/google.go | 192 + .../golang.org/x/oauth2/google/google_test.go | 116 + vendor/golang.org/x/oauth2/google/jwt.go | 74 + vendor/golang.org/x/oauth2/google/jwt_test.go | 91 + vendor/golang.org/x/oauth2/google/not_go19.go | 54 + vendor/golang.org/x/oauth2/google/sdk.go | 201 + vendor/golang.org/x/oauth2/google/sdk_test.go | 107 + .../oauth2/google/testdata/gcloud/credentials | 122 + .../oauth2/google/testdata/gcloud/properties | 2 + vendor/golang.org/x/oauth2/heroku/heroku.go | 16 + vendor/golang.org/x/oauth2/hipchat/hipchat.go | 60 + .../x/oauth2/internal/client_appengine.go | 13 + vendor/golang.org/x/oauth2/internal/doc.go | 6 + vendor/golang.org/x/oauth2/internal/oauth2.go | 37 + vendor/golang.org/x/oauth2/internal/token.go | 266 + .../x/oauth2/internal/token_test.go | 112 + .../golang.org/x/oauth2/internal/transport.go | 34 + vendor/golang.org/x/oauth2/jira/jira.go | 167 + vendor/golang.org/x/oauth2/jira/jira_test.go | 185 + vendor/golang.org/x/oauth2/jws/jws.go | 182 + vendor/golang.org/x/oauth2/jws/jws_test.go | 46 + .../golang.org/x/oauth2/jwt/example_test.go | 33 + vendor/golang.org/x/oauth2/jwt/jwt.go | 162 + vendor/golang.org/x/oauth2/jwt/jwt_test.go | 221 + .../golang.org/x/oauth2/linkedin/linkedin.go | 16 + .../x/oauth2/mailchimp/mailchimp.go | 17 + vendor/golang.org/x/oauth2/mailru/mailru.go | 16 + .../x/oauth2/mediamath/mediamath.go | 22 + .../x/oauth2/microsoft/microsoft.go | 31 + vendor/golang.org/x/oauth2/oauth2.go | 353 + vendor/golang.org/x/oauth2/oauth2_test.go | 505 + .../x/oauth2/odnoklassniki/odnoklassniki.go | 16 + vendor/golang.org/x/oauth2/paypal/paypal.go | 22 + vendor/golang.org/x/oauth2/slack/slack.go | 16 + vendor/golang.org/x/oauth2/spotify/spotify.go | 16 + vendor/golang.org/x/oauth2/token.go | 175 + vendor/golang.org/x/oauth2/token_test.go | 72 + vendor/golang.org/x/oauth2/transport.go | 132 + vendor/golang.org/x/oauth2/transport_test.go | 108 + vendor/golang.org/x/oauth2/twitch/twitch.go | 19 + vendor/golang.org/x/oauth2/uber/uber.go | 16 + vendor/golang.org/x/oauth2/vk/vk.go | 16 + vendor/golang.org/x/oauth2/yahoo/yahoo.go | 17 + vendor/golang.org/x/oauth2/yandex/yandex.go | 16 + .../google.golang.org/appengine/.travis.yml | 18 + vendor/google.golang.org/appengine/LICENSE | 202 + vendor/google.golang.org/appengine/README.md | 73 + .../google.golang.org/appengine/aetest/doc.go | 42 + .../appengine/aetest/instance.go | 51 + .../appengine/aetest/instance_classic.go | 21 + .../appengine/aetest/instance_test.go | 116 + .../appengine/aetest/instance_vm.go | 276 + .../appengine/aetest/user.go | 36 + .../google.golang.org/appengine/appengine.go | 112 + .../appengine/appengine_test.go | 49 + .../appengine/appengine_vm.go | 20 + .../appengine/blobstore/blobstore.go | 276 + .../appengine/blobstore/blobstore_test.go | 183 + .../appengine/blobstore/read.go | 160 + .../appengine/capability/capability.go | 52 + .../appengine/channel/channel.go | 83 + .../appengine/channel/channel_test.go | 21 + .../appengine/cloudsql/cloudsql.go | 62 + .../appengine/cloudsql/cloudsql_classic.go | 17 + .../appengine/cloudsql/cloudsql_vm.go | 16 + .../appengine/cmd/aebundler/aebundler.go | 342 + .../appengine/cmd/aedeploy/aedeploy.go | 268 + .../appengine/cmd/aefix/ae.go | 185 + .../appengine/cmd/aefix/ae_test.go | 144 + .../appengine/cmd/aefix/fix.go | 848 + .../appengine/cmd/aefix/main.go | 258 + .../appengine/cmd/aefix/main_test.go | 129 + .../appengine/cmd/aefix/typecheck.go | 673 + .../appengine/datastore/datastore.go | 406 + .../appengine/datastore/datastore_test.go | 1567 ++ .../appengine/datastore/doc.go | 351 + .../appengine/datastore/key.go | 309 + .../appengine/datastore/key_test.go | 204 + .../appengine/datastore/load.go | 334 + .../appengine/datastore/metadata.go | 78 + .../appengine/datastore/prop.go | 296 + .../appengine/datastore/prop_test.go | 604 + .../appengine/datastore/query.go | 724 + .../appengine/datastore/query_test.go | 583 + .../appengine/datastore/save.go | 300 + .../appengine/datastore/time_test.go | 65 + .../appengine/datastore/transaction.go | 87 + .../appengine/delay/delay.go | 278 + .../appengine/delay/delay_test.go | 375 + .../appengine/demos/guestbook/app.yaml | 14 + .../appengine/demos/guestbook/favicon.ico | Bin 0 -> 1150 bytes .../appengine/demos/guestbook/guestbook.go | 109 + .../appengine/demos/guestbook/index.yaml | 7 + .../demos/guestbook/templates/guestbook.html | 26 + .../appengine/demos/helloworld/app.yaml | 10 + .../appengine/demos/helloworld/favicon.ico | Bin 0 -> 1150 bytes .../appengine/demos/helloworld/helloworld.go | 50 + vendor/google.golang.org/appengine/errors.go | 46 + .../google.golang.org/appengine/file/file.go | 28 + .../google.golang.org/appengine/identity.go | 142 + .../appengine/image/image.go | 67 + .../appengine/internal/aetesting/fake.go | 81 + .../appengine/internal/api.go | 646 + .../appengine/internal/api_classic.go | 159 + .../appengine/internal/api_common.go | 86 + .../appengine/internal/api_race_test.go | 9 + .../appengine/internal/api_test.go | 467 + .../appengine/internal/app_id.go | 28 + .../appengine/internal/app_id_test.go | 34 + .../app_identity/app_identity_service.pb.go | 296 + .../app_identity/app_identity_service.proto | 64 + .../appengine/internal/base/api_base.pb.go | 133 + .../appengine/internal/base/api_base.proto | 33 + .../blobstore/blobstore_service.pb.go | 347 + .../blobstore/blobstore_service.proto | 71 + .../capability/capability_service.pb.go | 125 + .../capability/capability_service.proto | 28 + .../internal/channel/channel_service.pb.go | 154 + .../internal/channel/channel_service.proto | 30 + .../internal/datastore/datastore_v3.pb.go | 2778 +++ .../internal/datastore/datastore_v3.proto | 541 + .../appengine/internal/identity.go | 14 + .../appengine/internal/identity_classic.go | 27 + .../appengine/internal/identity_vm.go | 97 + .../internal/image/images_service.pb.go | 845 + .../internal/image/images_service.proto | 162 + .../appengine/internal/internal.go | 110 + .../appengine/internal/internal_vm_test.go | 60 + .../appengine/internal/log/log_service.pb.go | 899 + .../appengine/internal/log/log_service.proto | 150 + .../internal/mail/mail_service.pb.go | 229 + .../internal/mail/mail_service.proto | 45 + .../appengine/internal/main.go | 15 + .../appengine/internal/main_vm.go | 44 + .../internal/memcache/memcache_service.pb.go | 938 + .../internal/memcache/memcache_service.proto | 165 + .../appengine/internal/metadata.go | 61 + .../internal/modules/modules_service.pb.go | 375 + .../internal/modules/modules_service.proto | 80 + .../appengine/internal/net.go | 56 + .../appengine/internal/net_test.go | 58 + .../appengine/internal/regen.sh | 40 + .../internal/remote_api/remote_api.pb.go | 231 + .../internal/remote_api/remote_api.proto | 44 + .../appengine/internal/search/search.pb.go | 2127 ++ .../appengine/internal/search/search.proto | 388 + .../internal/socket/socket_service.pb.go | 1858 ++ .../internal/socket/socket_service.proto | 460 + .../internal/system/system_service.pb.go | 198 + .../internal/system/system_service.proto | 49 + .../taskqueue/taskqueue_service.pb.go | 1888 ++ .../taskqueue/taskqueue_service.proto | 342 + .../appengine/internal/transaction.go | 107 + .../internal/urlfetch/urlfetch_service.pb.go | 355 + .../internal/urlfetch/urlfetch_service.proto | 64 + .../internal/user/user_service.pb.go | 289 + .../internal/user/user_service.proto | 58 + .../internal/xmpp/xmpp_service.pb.go | 427 + .../internal/xmpp/xmpp_service.proto | 83 + vendor/google.golang.org/appengine/log/api.go | 40 + vendor/google.golang.org/appengine/log/log.go | 323 + .../appengine/log/log_test.go | 112 + .../google.golang.org/appengine/mail/mail.go | 123 + .../appengine/mail/mail_test.go | 65 + .../appengine/memcache/memcache.go | 526 + .../appengine/memcache/memcache_test.go | 263 + .../appengine/module/module.go | 113 + .../appengine/module/module_test.go | 124 + .../google.golang.org/appengine/namespace.go | 25 + .../appengine/namespace_test.go | 39 + .../appengine/remote_api/client.go | 174 + .../appengine/remote_api/client_test.go | 24 + .../appengine/remote_api/remote_api.go | 152 + .../appengine/runtime/runtime.go | 148 + .../appengine/runtime/runtime_test.go | 101 + .../google.golang.org/appengine/search/doc.go | 209 + .../appengine/search/field.go | 82 + .../appengine/search/search.go | 1121 + .../appengine/search/search_test.go | 1000 + .../appengine/search/struct.go | 251 + .../appengine/search/struct_test.go | 213 + .../google.golang.org/appengine/socket/doc.go | 10 + .../appengine/socket/socket_classic.go | 290 + .../appengine/socket/socket_vm.go | 64 + .../appengine/taskqueue/taskqueue.go | 496 + .../appengine/taskqueue/taskqueue_test.go | 116 + vendor/google.golang.org/appengine/timeout.go | 20 + .../appengine/urlfetch/urlfetch.go | 210 + .../google.golang.org/appengine/user/oauth.go | 52 + .../google.golang.org/appengine/user/user.go | 84 + .../appengine/user/user_classic.go | 35 + .../appengine/user/user_test.go | 99 + .../appengine/user/user_vm.go | 38 + .../google.golang.org/appengine/xmpp/xmpp.go | 253 + .../appengine/xmpp/xmpp_test.go | 173 + 2025 files changed, 456739 insertions(+) create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 LICENSE create mode 100644 README.md create mode 100644 cmd/hdxd/main.go create mode 100644 db.go create mode 100644 ip.go create mode 100644 routes.go create mode 100644 server.go create mode 100644 sql.go create mode 100644 vendor/cloud.google.com/go/.travis.yml create mode 100644 vendor/cloud.google.com/go/AUTHORS create mode 100644 vendor/cloud.google.com/go/CONTRIBUTING.md create mode 100644 vendor/cloud.google.com/go/CONTRIBUTORS create mode 100644 vendor/cloud.google.com/go/LICENSE create mode 100644 vendor/cloud.google.com/go/MIGRATION.md create mode 100644 vendor/cloud.google.com/go/README.md create mode 100644 vendor/cloud.google.com/go/appveyor.yml create mode 100644 vendor/cloud.google.com/go/authexample_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/benchmarks/README.md create mode 100644 vendor/cloud.google.com/go/bigquery/benchmarks/bench.go create mode 100644 vendor/cloud.google.com/go/bigquery/benchmarks/queries.json create mode 100644 vendor/cloud.google.com/go/bigquery/bigquery.go create mode 100644 vendor/cloud.google.com/go/bigquery/copy.go create mode 100644 vendor/cloud.google.com/go/bigquery/copy_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/dataset.go create mode 100644 vendor/cloud.google.com/go/bigquery/dataset_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go create mode 100644 vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/bigquery/doc.go create mode 100644 vendor/cloud.google.com/go/bigquery/error.go create mode 100644 vendor/cloud.google.com/go/bigquery/error_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/examples_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/external.go create mode 100644 vendor/cloud.google.com/go/bigquery/external_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/extract.go create mode 100644 vendor/cloud.google.com/go/bigquery/extract_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/file.go create mode 100644 vendor/cloud.google.com/go/bigquery/file_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/gcs.go create mode 100644 vendor/cloud.google.com/go/bigquery/integration_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/iterator.go create mode 100644 vendor/cloud.google.com/go/bigquery/iterator_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/job.go create mode 100644 vendor/cloud.google.com/go/bigquery/job_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/load.go create mode 100644 vendor/cloud.google.com/go/bigquery/load_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/nulls.go create mode 100644 vendor/cloud.google.com/go/bigquery/nulls_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/oc_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/params.go create mode 100644 vendor/cloud.google.com/go/bigquery/params_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/query.go create mode 100644 vendor/cloud.google.com/go/bigquery/query_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/read_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/schema.go create mode 100644 vendor/cloud.google.com/go/bigquery/schema_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/table.go create mode 100644 vendor/cloud.google.com/go/bigquery/table_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/uploader.go create mode 100644 vendor/cloud.google.com/go/bigquery/uploader_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/value.go create mode 100644 vendor/cloud.google.com/go/bigquery/value_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/admin.go create mode 100644 vendor/cloud.google.com/go/bigtable/admin_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/bigtable.go create mode 100644 vendor/cloud.google.com/go/bigtable/bigtable_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/bttest/example_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/bttest/inmem.go create mode 100644 vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go create mode 100644 vendor/cloud.google.com/go/bigtable/doc.go create mode 100644 vendor/cloud.google.com/go/bigtable/export_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/filter.go create mode 100644 vendor/cloud.google.com/go/bigtable/gc.go create mode 100644 vendor/cloud.google.com/go/bigtable/gc_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/go18.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/option/option.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/stat/stats.go create mode 100644 vendor/cloud.google.com/go/bigtable/not_go18.go create mode 100644 vendor/cloud.google.com/go/bigtable/reader.go create mode 100644 vendor/cloud.google.com/go/bigtable/reader_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/retry_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json create mode 100644 vendor/cloud.google.com/go/civil/civil.go create mode 100644 vendor/cloud.google.com/go/civil/civil_test.go create mode 100644 vendor/cloud.google.com/go/cloud.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata_test.go create mode 100644 vendor/cloud.google.com/go/container/apiv1/ListClusters_smoke_test.go create mode 100644 vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go create mode 100644 vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go create mode 100644 vendor/cloud.google.com/go/container/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/container/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/container/container.go create mode 100644 vendor/cloud.google.com/go/dataproc/apiv1/ListClusters_smoke_test.go create mode 100644 vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client.go create mode 100644 vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client_example_test.go create mode 100644 vendor/cloud.google.com/go/dataproc/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client.go create mode 100644 vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client_example_test.go create mode 100644 vendor/cloud.google.com/go/dataproc/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/datastore/client.go create mode 100644 vendor/cloud.google.com/go/datastore/datastore.go create mode 100644 vendor/cloud.google.com/go/datastore/datastore_test.go create mode 100644 vendor/cloud.google.com/go/datastore/doc.go create mode 100644 vendor/cloud.google.com/go/datastore/errors.go create mode 100644 vendor/cloud.google.com/go/datastore/example_test.go create mode 100644 vendor/cloud.google.com/go/datastore/integration_test.go create mode 100644 vendor/cloud.google.com/go/datastore/key.go create mode 100644 vendor/cloud.google.com/go/datastore/key_test.go create mode 100644 vendor/cloud.google.com/go/datastore/load.go create mode 100644 vendor/cloud.google.com/go/datastore/load_test.go create mode 100644 vendor/cloud.google.com/go/datastore/mutation.go create mode 100644 vendor/cloud.google.com/go/datastore/mutation_test.go create mode 100644 vendor/cloud.google.com/go/datastore/oc_test.go create mode 100644 vendor/cloud.google.com/go/datastore/prop.go create mode 100644 vendor/cloud.google.com/go/datastore/query.go create mode 100644 vendor/cloud.google.com/go/datastore/query_test.go create mode 100644 vendor/cloud.google.com/go/datastore/save.go create mode 100644 vendor/cloud.google.com/go/datastore/save_test.go create mode 100644 vendor/cloud.google.com/go/datastore/testdata/index.yaml create mode 100644 vendor/cloud.google.com/go/datastore/time.go create mode 100644 vendor/cloud.google.com/go/datastore/time_test.go create mode 100644 vendor/cloud.google.com/go/datastore/transaction.go create mode 100644 vendor/cloud.google.com/go/datastore/transaction_test.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/mock_test.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2/mock_test.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2beta1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/errors.go create mode 100644 vendor/cloud.google.com/go/errorreporting/errors_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/example_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/stack_test.go create mode 100644 vendor/cloud.google.com/go/firestore/Makefile create mode 100644 vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go create mode 100644 vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go create mode 100644 vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go create mode 100644 vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go create mode 100644 vendor/cloud.google.com/go/firestore/apiv1beta1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/firestore/client.go create mode 100644 vendor/cloud.google.com/go/firestore/client_test.go create mode 100644 vendor/cloud.google.com/go/firestore/collref.go create mode 100644 vendor/cloud.google.com/go/firestore/collref_test.go create mode 100644 vendor/cloud.google.com/go/firestore/cross_language_test.go create mode 100644 vendor/cloud.google.com/go/firestore/doc.go create mode 100644 vendor/cloud.google.com/go/firestore/docref.go create mode 100644 vendor/cloud.google.com/go/firestore/docref_test.go create mode 100644 vendor/cloud.google.com/go/firestore/document.go create mode 100644 vendor/cloud.google.com/go/firestore/document_test.go create mode 100644 vendor/cloud.google.com/go/firestore/examples_test.go create mode 100644 vendor/cloud.google.com/go/firestore/fieldpath.go create mode 100644 vendor/cloud.google.com/go/firestore/fieldpath_test.go create mode 100644 vendor/cloud.google.com/go/firestore/from_value.go create mode 100644 vendor/cloud.google.com/go/firestore/from_value_test.go create mode 100644 vendor/cloud.google.com/go/firestore/genproto/test.pb.go create mode 100644 vendor/cloud.google.com/go/firestore/integration_test.go create mode 100644 vendor/cloud.google.com/go/firestore/internal/Makefile create mode 100644 vendor/cloud.google.com/go/firestore/internal/doc-snippets.go create mode 100644 vendor/cloud.google.com/go/firestore/internal/doc.template create mode 100644 vendor/cloud.google.com/go/firestore/internal/snipdoc.awk create mode 100644 vendor/cloud.google.com/go/firestore/mock_test.go create mode 100644 vendor/cloud.google.com/go/firestore/options.go create mode 100644 vendor/cloud.google.com/go/firestore/options_test.go create mode 100644 vendor/cloud.google.com/go/firestore/order.go create mode 100644 vendor/cloud.google.com/go/firestore/order_test.go create mode 100644 vendor/cloud.google.com/go/firestore/query.go create mode 100644 vendor/cloud.google.com/go/firestore/query_test.go create mode 100644 vendor/cloud.google.com/go/firestore/testdata/VERSION create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-st-multi.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-st-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-st-noarray-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-st-noarray.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/create-st.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/delete-exists-precond.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/delete-no-precond.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/delete-time-precond.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/get-basic.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-bad-NaN.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-bad-null.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-del-where.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-invalid-operator.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-order.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-select.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-where.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-offset-limit-last-wins.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-offset-limit.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-order.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-select-empty.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-select-last-wins.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-select.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-st-cursor.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-st-where.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-where-2.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-where-NaN.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-where-null.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-where.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-wrong-collection.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-basic.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-complex.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-del-merge-alone.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-del-merge.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-del-mergeall.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-del-noarray-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-del-noarray.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-del-nomerge.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-del-nonleaf.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-del-wo-merge.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-empty.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-merge-fp.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-merge-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-merge-nonleaf.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-merge-prefix.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-merge-present.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-merge.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-mergeall-empty.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-mergeall-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-mergeall.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-nodel.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-nosplit.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-special-chars.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-alone-mergeall.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-alone.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-merge-both.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nonleaf-alone.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nonleaf.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nowrite.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-mergeall.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-multi.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-noarray-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-noarray.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st-nomerge.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/set-st.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-badchar.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-basic.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-complex.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-del-alone.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-del-dot.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-del-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-del-noarray-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-del-noarray.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-del.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-exists-precond.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-fp-empty-component.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-no-paths.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-basic.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-complex.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-del-alone.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-del-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-del.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-exists-precond.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-del.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-dup.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty-component.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-multi.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-nosplit.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-no-paths.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-1.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-2.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-3.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-special-chars.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-st-alone.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-st-multi.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-st-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-st.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-paths-uptime.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-prefix-1.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-prefix-2.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-prefix-3.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-quoting.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-split-top-level.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-split.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-st-alone.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-st-dot.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-st-multi.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-st-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-st-noarray-nested.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-st-noarray.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-st.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/update-uptime.textproto create mode 100644 vendor/cloud.google.com/go/firestore/to_value.go create mode 100644 vendor/cloud.google.com/go/firestore/to_value_test.go create mode 100644 vendor/cloud.google.com/go/firestore/transaction.go create mode 100644 vendor/cloud.google.com/go/firestore/transaction_test.go create mode 100644 vendor/cloud.google.com/go/firestore/util_test.go create mode 100644 vendor/cloud.google.com/go/firestore/watch.go create mode 100644 vendor/cloud.google.com/go/firestore/watch_test.go create mode 100644 vendor/cloud.google.com/go/firestore/writebatch.go create mode 100644 vendor/cloud.google.com/go/firestore/writebatch_test.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go create mode 100644 vendor/cloud.google.com/go/iam/iam.go create mode 100644 vendor/cloud.google.com/go/iam/iam_test.go create mode 100644 vendor/cloud.google.com/go/import_test.go create mode 100644 vendor/cloud.google.com/go/internal/annotate.go create mode 100644 vendor/cloud.google.com/go/internal/annotate_test.go create mode 100644 vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go create mode 100644 vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go create mode 100644 vendor/cloud.google.com/go/internal/btree/README.md create mode 100644 vendor/cloud.google.com/go/internal/btree/benchmarks_test.go create mode 100644 vendor/cloud.google.com/go/internal/btree/btree.go create mode 100644 vendor/cloud.google.com/go/internal/btree/btree_test.go create mode 100644 vendor/cloud.google.com/go/internal/btree/debug.go create mode 100644 vendor/cloud.google.com/go/internal/fields/fields.go create mode 100644 vendor/cloud.google.com/go/internal/fields/fields_test.go create mode 100644 vendor/cloud.google.com/go/internal/fields/fold.go create mode 100644 vendor/cloud.google.com/go/internal/fields/fold_test.go create mode 100755 vendor/cloud.google.com/go/internal/kokoro/build.sh create mode 100644 vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc create mode 100644 vendor/cloud.google.com/go/internal/optional/optional.go create mode 100644 vendor/cloud.google.com/go/internal/optional/optional_test.go create mode 100644 vendor/cloud.google.com/go/internal/pretty/diff.go create mode 100644 vendor/cloud.google.com/go/internal/pretty/diff_test.go create mode 100644 vendor/cloud.google.com/go/internal/pretty/pretty.go create mode 100644 vendor/cloud.google.com/go/internal/pretty/pretty_test.go create mode 100644 vendor/cloud.google.com/go/internal/protostruct/protostruct.go create mode 100644 vendor/cloud.google.com/go/internal/protostruct/protostruct_test.go create mode 100644 vendor/cloud.google.com/go/internal/readme/Makefile create mode 100644 vendor/cloud.google.com/go/internal/readme/snipmd.awk create mode 100644 vendor/cloud.google.com/go/internal/readme/snippets.go create mode 100644 vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go create mode 100644 vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go create mode 100644 vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md create mode 100644 vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md create mode 100644 vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go create mode 100644 vendor/cloud.google.com/go/internal/readme/testdata/good.md create mode 100644 vendor/cloud.google.com/go/internal/readme/testdata/snips.go create mode 100644 vendor/cloud.google.com/go/internal/readme/testdata/want.md create mode 100644 vendor/cloud.google.com/go/internal/retry.go create mode 100644 vendor/cloud.google.com/go/internal/retry_test.go create mode 100644 vendor/cloud.google.com/go/internal/snipdoc/README.md create mode 100644 vendor/cloud.google.com/go/internal/snipdoc/sample-makefile create mode 100644 vendor/cloud.google.com/go/internal/snipdoc/snipdoc.awk create mode 100644 vendor/cloud.google.com/go/internal/testutil/cmp.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/context.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/go18.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/server.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/server_test.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/unique.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/unique_test.go create mode 100644 vendor/cloud.google.com/go/internal/trace/go18.go create mode 100644 vendor/cloud.google.com/go/internal/trace/go18_test.go create mode 100644 vendor/cloud.google.com/go/internal/trace/not_go18.go create mode 100644 vendor/cloud.google.com/go/internal/tracecontext/tracecontext.go create mode 100644 vendor/cloud.google.com/go/internal/tracecontext/tracecontext_test.go create mode 100755 vendor/cloud.google.com/go/internal/version/update_version.sh create mode 100644 vendor/cloud.google.com/go/internal/version/version.go create mode 100644 vendor/cloud.google.com/go/internal/version/version_test.go create mode 100644 vendor/cloud.google.com/go/issue_template.md create mode 100644 vendor/cloud.google.com/go/keys.tar.enc create mode 100644 vendor/cloud.google.com/go/language/apiv1/AnalyzeSentiment_smoke_test.go create mode 100644 vendor/cloud.google.com/go/language/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/language/apiv1/language_client.go create mode 100644 vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go create mode 100644 vendor/cloud.google.com/go/language/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/language/apiv1beta2/AnalyzeSentiment_smoke_test.go create mode 100644 vendor/cloud.google.com/go/language/apiv1beta2/doc.go create mode 100644 vendor/cloud.google.com/go/language/apiv1beta2/language_client.go create mode 100644 vendor/cloud.google.com/go/language/apiv1beta2/language_client_example_test.go create mode 100644 vendor/cloud.google.com/go/language/apiv1beta2/mock_test.go create mode 100644 vendor/cloud.google.com/go/license_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/README.md create mode 100644 vendor/cloud.google.com/go/logging/apiv2/WriteLogEntries_smoke_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/config_client.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/logging_client.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/metrics_client.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/mock_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/path_funcs.go create mode 100644 vendor/cloud.google.com/go/logging/doc.go create mode 100644 vendor/cloud.google.com/go/logging/examples_test.go create mode 100644 vendor/cloud.google.com/go/logging/internal/common.go create mode 100644 vendor/cloud.google.com/go/logging/internal/testing/equal.go create mode 100644 vendor/cloud.google.com/go/logging/internal/testing/fake.go create mode 100644 vendor/cloud.google.com/go/logging/internal/testing/fake_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/examples_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/logadmin.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/metrics.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/metrics_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/resources.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/resources_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/sinks.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/sinks_test.go create mode 100644 vendor/cloud.google.com/go/logging/logging.go create mode 100644 vendor/cloud.google.com/go/logging/logging_test.go create mode 100644 vendor/cloud.google.com/go/logging/logging_unexported_test.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/doc.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/from_conn.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/mock_test.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/operations_client.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/operations_client_example_test.go create mode 100644 vendor/cloud.google.com/go/longrunning/example_test.go create mode 100644 vendor/cloud.google.com/go/longrunning/longrunning.go create mode 100644 vendor/cloud.google.com/go/longrunning/longrunning_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/ListMonitoredResourceDescriptors_smoke_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client_example_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/doc.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/group_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client_example_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client_example_test.go create mode 100644 vendor/cloud.google.com/go/old-news.md create mode 100644 vendor/cloud.google.com/go/oslogin/apiv1beta/doc.go create mode 100644 vendor/cloud.google.com/go/oslogin/apiv1beta/mock_test.go create mode 100644 vendor/cloud.google.com/go/oslogin/apiv1beta/os_login_client.go create mode 100644 vendor/cloud.google.com/go/oslogin/apiv1beta/os_login_client_example_test.go create mode 100644 vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client.go create mode 100644 vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client_example_test.go create mode 100644 vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/doc.go create mode 100644 vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/mock_test.go create mode 100644 vendor/cloud.google.com/go/profiler/busybench/busybench.go create mode 100644 vendor/cloud.google.com/go/profiler/integration-test.sh create mode 100644 vendor/cloud.google.com/go/profiler/integration_test.go create mode 100644 vendor/cloud.google.com/go/profiler/mocks/mock_profiler_client.go create mode 100644 vendor/cloud.google.com/go/profiler/mutex.go create mode 100644 vendor/cloud.google.com/go/profiler/mutex_go17.go create mode 100644 vendor/cloud.google.com/go/profiler/profiler.go create mode 100644 vendor/cloud.google.com/go/profiler/profiler_example_test.go create mode 100644 vendor/cloud.google.com/go/profiler/profiler_test.go create mode 100644 vendor/cloud.google.com/go/profiler/proftest/proftest.go create mode 100644 vendor/cloud.google.com/go/profiler/symbolizer.go create mode 100644 vendor/cloud.google.com/go/profiler/symbolizer_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/ListTopics_smoke_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/README.md create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/pubsub_pull_example_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/doc.go create mode 100644 vendor/cloud.google.com/go/pubsub/endtoend_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/example_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/fake_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/flow_controller.go create mode 100644 vendor/cloud.google.com/go/pubsub/flow_controller_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/go18.go create mode 100644 vendor/cloud.google.com/go/pubsub/integration_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go create mode 100644 vendor/cloud.google.com/go/pubsub/internal/distribution/distribution_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/iterator.go create mode 100644 vendor/cloud.google.com/go/pubsub/loadtest/benchmark_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/loadtest/cmd/loadtest.go create mode 100644 vendor/cloud.google.com/go/pubsub/loadtest/loadtest.go create mode 100644 vendor/cloud.google.com/go/pubsub/loadtest/pb/loadtest.pb.go create mode 100644 vendor/cloud.google.com/go/pubsub/message.go create mode 100644 vendor/cloud.google.com/go/pubsub/not_go18.go create mode 100644 vendor/cloud.google.com/go/pubsub/pstest/examples_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/pstest/fake.go create mode 100644 vendor/cloud.google.com/go/pubsub/pstest/fake_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/pstest_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/pubsub.go create mode 100644 vendor/cloud.google.com/go/pubsub/pullstream.go create mode 100644 vendor/cloud.google.com/go/pubsub/service.go create mode 100644 vendor/cloud.google.com/go/pubsub/service_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/snapshot.go create mode 100644 vendor/cloud.google.com/go/pubsub/streaming_pull_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/subscription.go create mode 100644 vendor/cloud.google.com/go/pubsub/subscription_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/timeout_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/topic.go create mode 100644 vendor/cloud.google.com/go/pubsub/topic_test.go create mode 100755 vendor/cloud.google.com/go/regen-gapic.sh create mode 100644 vendor/cloud.google.com/go/rpcreplay/Makefile create mode 100644 vendor/cloud.google.com/go/rpcreplay/doc.go create mode 100644 vendor/cloud.google.com/go/rpcreplay/example_test.go create mode 100644 vendor/cloud.google.com/go/rpcreplay/fake_test.go create mode 100644 vendor/cloud.google.com/go/rpcreplay/proto/intstore/intstore.pb.go create mode 100644 vendor/cloud.google.com/go/rpcreplay/proto/intstore/intstore.proto create mode 100644 vendor/cloud.google.com/go/rpcreplay/proto/rpcreplay/rpcreplay.pb.go create mode 100644 vendor/cloud.google.com/go/rpcreplay/proto/rpcreplay/rpcreplay.proto create mode 100644 vendor/cloud.google.com/go/rpcreplay/rpcreplay.go create mode 100644 vendor/cloud.google.com/go/rpcreplay/rpcreplay_test.go create mode 100755 vendor/cloud.google.com/go/run-tests.sh create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/spanner/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/spanner/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/spanner/apiv1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go create mode 100644 vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go create mode 100644 vendor/cloud.google.com/go/spanner/appengine.go create mode 100644 vendor/cloud.google.com/go/spanner/backoff.go create mode 100644 vendor/cloud.google.com/go/spanner/backoff_test.go create mode 100644 vendor/cloud.google.com/go/spanner/batch.go create mode 100644 vendor/cloud.google.com/go/spanner/batch_test.go create mode 100644 vendor/cloud.google.com/go/spanner/client.go create mode 100644 vendor/cloud.google.com/go/spanner/client_test.go create mode 100644 vendor/cloud.google.com/go/spanner/doc.go create mode 100644 vendor/cloud.google.com/go/spanner/errors.go create mode 100644 vendor/cloud.google.com/go/spanner/errors_test.go create mode 100644 vendor/cloud.google.com/go/spanner/examples_test.go create mode 100644 vendor/cloud.google.com/go/spanner/go18.go create mode 100644 vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go create mode 100644 vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go create mode 100644 vendor/cloud.google.com/go/spanner/key.go create mode 100644 vendor/cloud.google.com/go/spanner/key_test.go create mode 100644 vendor/cloud.google.com/go/spanner/mutation.go create mode 100644 vendor/cloud.google.com/go/spanner/mutation_test.go create mode 100644 vendor/cloud.google.com/go/spanner/not_appengine.go create mode 100644 vendor/cloud.google.com/go/spanner/not_go18.go create mode 100644 vendor/cloud.google.com/go/spanner/oc_test.go create mode 100644 vendor/cloud.google.com/go/spanner/protoutils.go create mode 100644 vendor/cloud.google.com/go/spanner/read.go create mode 100644 vendor/cloud.google.com/go/spanner/read_test.go create mode 100644 vendor/cloud.google.com/go/spanner/retry.go create mode 100644 vendor/cloud.google.com/go/spanner/retry_test.go create mode 100644 vendor/cloud.google.com/go/spanner/row.go create mode 100644 vendor/cloud.google.com/go/spanner/row_test.go create mode 100644 vendor/cloud.google.com/go/spanner/session.go create mode 100644 vendor/cloud.google.com/go/spanner/session_test.go create mode 100644 vendor/cloud.google.com/go/spanner/spanner_test.go create mode 100644 vendor/cloud.google.com/go/spanner/statement.go create mode 100644 vendor/cloud.google.com/go/spanner/statement_test.go create mode 100644 vendor/cloud.google.com/go/spanner/timestampbound.go create mode 100644 vendor/cloud.google.com/go/spanner/timestampbound_test.go create mode 100644 vendor/cloud.google.com/go/spanner/transaction.go create mode 100644 vendor/cloud.google.com/go/spanner/transaction_test.go create mode 100644 vendor/cloud.google.com/go/spanner/util.go create mode 100644 vendor/cloud.google.com/go/spanner/util_test.go create mode 100644 vendor/cloud.google.com/go/spanner/value.go create mode 100644 vendor/cloud.google.com/go/spanner/value_benchmarks_test.go create mode 100644 vendor/cloud.google.com/go/spanner/value_test.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1/Recognize_smoke_test.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1/speech_client.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1/speech_client_example_test.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/SyncRecognize_smoke_test.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/doc.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go create mode 100644 vendor/cloud.google.com/go/storage/acl.go create mode 100644 vendor/cloud.google.com/go/storage/bucket.go create mode 100644 vendor/cloud.google.com/go/storage/bucket_test.go create mode 100644 vendor/cloud.google.com/go/storage/copy.go create mode 100644 vendor/cloud.google.com/go/storage/doc.go create mode 100644 vendor/cloud.google.com/go/storage/example_test.go create mode 100644 vendor/cloud.google.com/go/storage/go110.go create mode 100644 vendor/cloud.google.com/go/storage/go17.go create mode 100644 vendor/cloud.google.com/go/storage/iam.go create mode 100644 vendor/cloud.google.com/go/storage/integration_test.go create mode 100644 vendor/cloud.google.com/go/storage/invoke.go create mode 100644 vendor/cloud.google.com/go/storage/invoke_test.go create mode 100644 vendor/cloud.google.com/go/storage/not_go110.go create mode 100644 vendor/cloud.google.com/go/storage/not_go17.go create mode 100644 vendor/cloud.google.com/go/storage/notifications.go create mode 100644 vendor/cloud.google.com/go/storage/notifications_test.go create mode 100644 vendor/cloud.google.com/go/storage/oc_test.go create mode 100644 vendor/cloud.google.com/go/storage/reader.go create mode 100644 vendor/cloud.google.com/go/storage/reader_test.go create mode 100644 vendor/cloud.google.com/go/storage/storage.go create mode 100644 vendor/cloud.google.com/go/storage/storage_test.go create mode 100644 vendor/cloud.google.com/go/storage/testdata/dummy_pem create mode 100644 vendor/cloud.google.com/go/storage/testdata/dummy_rsa create mode 100644 vendor/cloud.google.com/go/storage/writer.go create mode 100644 vendor/cloud.google.com/go/storage/writer_test.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/ListTraces_smoke_test.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/trace_client.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/BatchWriteSpans_smoke_test.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/mock_test.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/path_funcs.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/trace_client.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/trace_client_example_test.go create mode 100644 vendor/cloud.google.com/go/trace/grpc.go create mode 100644 vendor/cloud.google.com/go/trace/grpc_test.go create mode 100644 vendor/cloud.google.com/go/trace/http.go create mode 100644 vendor/cloud.google.com/go/trace/http_test.go create mode 100644 vendor/cloud.google.com/go/trace/httpexample_test.go create mode 100644 vendor/cloud.google.com/go/trace/sampling.go create mode 100644 vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.pb.go create mode 100644 vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.proto create mode 100644 vendor/cloud.google.com/go/trace/trace.go create mode 100644 vendor/cloud.google.com/go/trace/trace_test.go create mode 100644 vendor/cloud.google.com/go/translate/examples_test.go create mode 100644 vendor/cloud.google.com/go/translate/internal/translate/v2/README create mode 100755 vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh create mode 100644 vendor/cloud.google.com/go/translate/internal/translate/v2/translate-nov2016-api.json create mode 100644 vendor/cloud.google.com/go/translate/internal/translate/v2/translate-nov2016-gen.go create mode 100644 vendor/cloud.google.com/go/translate/translate.go create mode 100644 vendor/cloud.google.com/go/translate/translate_test.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client_example_test.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta1/doc.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta1/mock_test.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client_example_test.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta2/doc.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta2/mock_test.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client_example_test.go create mode 100644 vendor/cloud.google.com/go/videointelligence/apiv1beta2/whitelist.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/BatchAnnotateImages_smoke_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/README.md create mode 100644 vendor/cloud.google.com/go/vision/apiv1/client.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/client_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/examples_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/face.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/face_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/image.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1p1beta1/BatchAnnotateImages_smoke_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client_example_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1p1beta1/mock_test.go create mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/LICENSE create mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/README.md create mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go create mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/doc.go create mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go create mode 100644 vendor/github.com/golang/protobuf/.gitignore create mode 100644 vendor/github.com/golang/protobuf/.travis.yml create mode 100644 vendor/github.com/golang/protobuf/AUTHORS create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/Make.protobuf create mode 100644 vendor/github.com/golang/protobuf/Makefile create mode 100644 vendor/github.com/golang/protobuf/README.md create mode 100644 vendor/github.com/golang/protobuf/_conformance/Makefile create mode 100644 vendor/github.com/golang/protobuf/_conformance/conformance.go create mode 100644 vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go create mode 100644 vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto create mode 100644 vendor/github.com/golang/protobuf/descriptor/descriptor.go create mode 100644 vendor/github.com/golang/protobuf/descriptor/descriptor_test.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto create mode 100644 vendor/github.com/golang/protobuf/proto/Makefile create mode 100644 vendor/github.com/golang/protobuf/proto/all_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/any_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/golang/protobuf/proto/clone_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/golang/protobuf/proto/map_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto create mode 100644 vendor/github.com/golang/protobuf/proto/proto3_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/size2_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/size_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/testdata/Makefile create mode 100644 vendor/github.com/golang/protobuf/proto/testdata/golden_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/testdata/test.pb.go create mode 100644 vendor/github.com/golang/protobuf/proto/testdata/test.proto create mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_test.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/doc.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/main.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/any_test.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration_test.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.proto create mode 100755 vendor/github.com/golang/protobuf/ptypes/regen.sh create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp_test.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto create mode 100644 vendor/github.com/gorilla/context/.travis.yml create mode 100644 vendor/github.com/gorilla/context/LICENSE create mode 100644 vendor/github.com/gorilla/context/README.md create mode 100644 vendor/github.com/gorilla/context/context.go create mode 100644 vendor/github.com/gorilla/context/context_test.go create mode 100644 vendor/github.com/gorilla/context/doc.go create mode 100644 vendor/github.com/gorilla/securecookie/.travis.yml create mode 100644 vendor/github.com/gorilla/securecookie/LICENSE create mode 100644 vendor/github.com/gorilla/securecookie/README.md create mode 100644 vendor/github.com/gorilla/securecookie/doc.go create mode 100644 vendor/github.com/gorilla/securecookie/fuzz.go create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/0.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/05a79f06cf3f67f726dae68d18a2290f6c9a50c9-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/05aefe7b48db1dcf464048449ac4fa6af2fbc73b-5 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/1.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/10.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/11.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/12.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/13.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/14.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/15.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/16.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/169c3e89cd10efe9bce3a1fdb69a31229e618fc0 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/17.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/18.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/19.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/2.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/20.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/202ad82e80f70c37f893e47d23f91b1de5067219-7 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/21.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/21606782c65e44cac7afbb90977d8b6f82140e76-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/22.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/23.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/24.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/25.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/25c648c4c5161116b9b3b883338ddae51f25a901-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/26.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/27.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/28.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/29.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/2aad7069353f2b76fa70b9e0b22115bb42025ec0-2 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/2b28c8193457fb5385d22ef4ca733c4e364f00e7-4 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/3.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/30.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/31.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/32.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/33.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/34.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/35.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/36.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/366e3e0397c8ceca170311fb9db5ffcddf228b51-5 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/37.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/38.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/39.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/3916f239f9da91baa003ee6dc147cca7f7f95bd7-2 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/3e70a0a4bb1ecd96f554cbef9f20c674ff43e2f6-10 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/4.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/40.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/41.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/42.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/43.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/44.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/45.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/46.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/47.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/48.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/49.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/4b6a3b5efec9fd7ff70c713e135f825772ee0c5b-6 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/5.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/50.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/51.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/5122906052326fb2d0f65fef576c1437b95256af-5 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/52.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/53.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/54.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/55.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/56.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/5601b416f11820e0203c84570e4068cf87acad17-4 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/57.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/58.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/59.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/6.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/60.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/61.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/62.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/63.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/64.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/65.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/66.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/67.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/68.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/68c721c56a20c85b4aefdffcd60437cf2902b0fa-10 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/69.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/7.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/70.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/7095a5454c9f66801f2b298e577a488a9cadf52d create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/71.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/71853c6197a6a7f222db0f1978c7cb232b87c5ee-3 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/72.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/73.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/74.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/75.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/76.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/77.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/78.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/79.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/8.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/80.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/81.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/82.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/83.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/84.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/85.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/86.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/87.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/878643f2e5fb1c89d90d7b5c65957914bb7fe2c6-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/88.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/89.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/8ed2598d72255e78e1cdecba1a0a3b0cb4e4d8be-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/9.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/90.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/91.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/92.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/93.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/94.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/95.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/96.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/97.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/98.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/99.sc create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/9e34c6aae8f2c610f838fed4a5bab0da097c5135-2 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/9eecb7ef73e5211948391dfc0c2d586e3822b028-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/adc83b19e793491b1c6ea0fd8b46cd9f32e592fc-2 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/ae3eb68089a89eb0a707c1de4b60edfeb6efc6e0-4 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/b4f6322316fe4501272935267ab8b1c26684c884 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/bf3f814c978c0fc01c46c8d5b337b024697186cc-7 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/c63ae6dd4fc9f9dda66970e827d13f7c73fe841c-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/cebedf21435b903c4013fb902fb5b753e40a100e-8 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/da5f06015af7bb09d3e421d086939d888f93271c-3 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/df60b2ac6f14afbf990d366fa820ee4906f1436e-2 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/ec54cdb4f33539c9b852b89ebcc67b4ec31a2b01-5 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/ec80b4b6f256eb0f29955c2bc000931d3b766c57-6 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/f2c59710b18847b10176f19fb0426cb597bafef0-9 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/f4de882915d90ead3b18371ab004abb24b3cd320-3 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/f82d23aaf2be2cfc7aa8e323922208cdfce8d35a-3 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/corpus/fa0f4cd7fee9eb65ebb95a3dc88b6fa198a2c986-1 create mode 100644 vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go create mode 100644 vendor/github.com/gorilla/securecookie/securecookie.go create mode 100644 vendor/github.com/gorilla/securecookie/securecookie_test.go create mode 100644 vendor/github.com/gorilla/sessions/.travis.yml create mode 100644 vendor/github.com/gorilla/sessions/LICENSE create mode 100644 vendor/github.com/gorilla/sessions/README.md create mode 100644 vendor/github.com/gorilla/sessions/doc.go create mode 100644 vendor/github.com/gorilla/sessions/lex.go create mode 100644 vendor/github.com/gorilla/sessions/sessions.go create mode 100644 vendor/github.com/gorilla/sessions/sessions_test.go create mode 100644 vendor/github.com/gorilla/sessions/store.go create mode 100644 vendor/github.com/gorilla/sessions/store_test.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go create mode 100644 vendor/github.com/jmoiron/sqlx/.gitignore create mode 100644 vendor/github.com/jmoiron/sqlx/.travis.yml create mode 100644 vendor/github.com/jmoiron/sqlx/LICENSE create mode 100644 vendor/github.com/jmoiron/sqlx/README.md create mode 100644 vendor/github.com/jmoiron/sqlx/bind.go create mode 100644 vendor/github.com/jmoiron/sqlx/doc.go create mode 100644 vendor/github.com/jmoiron/sqlx/named.go create mode 100644 vendor/github.com/jmoiron/sqlx/named_context.go create mode 100644 vendor/github.com/jmoiron/sqlx/named_context_test.go create mode 100644 vendor/github.com/jmoiron/sqlx/named_test.go create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/README.md create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/reflect.go create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx.go create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx_context.go create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx_context_test.go create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx_test.go create mode 100644 vendor/github.com/jmoiron/sqlx/types/README.md create mode 100644 vendor/github.com/jmoiron/sqlx/types/types.go create mode 100644 vendor/github.com/jmoiron/sqlx/types/types_test.go create mode 100644 vendor/github.com/kelseyhightower/envconfig/.travis.yml create mode 100644 vendor/github.com/kelseyhightower/envconfig/LICENSE create mode 100644 vendor/github.com/kelseyhightower/envconfig/MAINTAINERS create mode 100644 vendor/github.com/kelseyhightower/envconfig/README.md create mode 100644 vendor/github.com/kelseyhightower/envconfig/doc.go create mode 100644 vendor/github.com/kelseyhightower/envconfig/env_os.go create mode 100644 vendor/github.com/kelseyhightower/envconfig/env_syscall.go create mode 100644 vendor/github.com/kelseyhightower/envconfig/envconfig.go create mode 100644 vendor/github.com/kelseyhightower/envconfig/envconfig_test.go create mode 100644 vendor/github.com/kelseyhightower/envconfig/testdata/custom.txt create mode 100644 vendor/github.com/kelseyhightower/envconfig/testdata/default_list.txt create mode 100644 vendor/github.com/kelseyhightower/envconfig/testdata/default_table.txt create mode 100644 vendor/github.com/kelseyhightower/envconfig/testdata/fault.txt create mode 100644 vendor/github.com/kelseyhightower/envconfig/usage.go create mode 100644 vendor/github.com/kelseyhightower/envconfig/usage_test.go create mode 100644 vendor/github.com/lib/pq/.gitignore create mode 100755 vendor/github.com/lib/pq/.travis.sh create mode 100644 vendor/github.com/lib/pq/.travis.yml create mode 100644 vendor/github.com/lib/pq/CONTRIBUTING.md create mode 100644 vendor/github.com/lib/pq/LICENSE.md create mode 100644 vendor/github.com/lib/pq/README.md create mode 100644 vendor/github.com/lib/pq/array.go create mode 100644 vendor/github.com/lib/pq/array_test.go create mode 100644 vendor/github.com/lib/pq/bench_test.go create mode 100644 vendor/github.com/lib/pq/buf.go create mode 100644 vendor/github.com/lib/pq/certs/README create mode 100644 vendor/github.com/lib/pq/certs/bogus_root.crt create mode 100644 vendor/github.com/lib/pq/certs/postgresql.crt create mode 100644 vendor/github.com/lib/pq/certs/postgresql.key create mode 100644 vendor/github.com/lib/pq/certs/root.crt create mode 100644 vendor/github.com/lib/pq/certs/server.crt create mode 100644 vendor/github.com/lib/pq/certs/server.key create mode 100644 vendor/github.com/lib/pq/conn.go create mode 100644 vendor/github.com/lib/pq/conn_go18.go create mode 100644 vendor/github.com/lib/pq/conn_test.go create mode 100644 vendor/github.com/lib/pq/copy.go create mode 100644 vendor/github.com/lib/pq/copy_test.go create mode 100644 vendor/github.com/lib/pq/doc.go create mode 100644 vendor/github.com/lib/pq/encode.go create mode 100644 vendor/github.com/lib/pq/encode_test.go create mode 100644 vendor/github.com/lib/pq/error.go create mode 100644 vendor/github.com/lib/pq/example/listen/doc.go create mode 100644 vendor/github.com/lib/pq/go18_test.go create mode 100644 vendor/github.com/lib/pq/hstore/hstore.go create mode 100644 vendor/github.com/lib/pq/hstore/hstore_test.go create mode 100644 vendor/github.com/lib/pq/issues_test.go create mode 100644 vendor/github.com/lib/pq/notify.go create mode 100644 vendor/github.com/lib/pq/notify_test.go create mode 100644 vendor/github.com/lib/pq/oid/doc.go create mode 100644 vendor/github.com/lib/pq/oid/gen.go create mode 100644 vendor/github.com/lib/pq/oid/types.go create mode 100644 vendor/github.com/lib/pq/rows.go create mode 100644 vendor/github.com/lib/pq/rows_test.go create mode 100644 vendor/github.com/lib/pq/ssl.go create mode 100644 vendor/github.com/lib/pq/ssl_go1.7.go create mode 100644 vendor/github.com/lib/pq/ssl_permissions.go create mode 100644 vendor/github.com/lib/pq/ssl_renegotiation.go create mode 100644 vendor/github.com/lib/pq/ssl_test.go create mode 100644 vendor/github.com/lib/pq/ssl_windows.go create mode 100644 vendor/github.com/lib/pq/url.go create mode 100644 vendor/github.com/lib/pq/url_test.go create mode 100644 vendor/github.com/lib/pq/user_posix.go create mode 100644 vendor/github.com/lib/pq/user_windows.go create mode 100644 vendor/github.com/lib/pq/uuid.go create mode 100644 vendor/github.com/lib/pq/uuid_test.go create mode 100644 vendor/github.com/spf13/cobra/.circleci/config.yml create mode 100644 vendor/github.com/spf13/cobra/.gitignore create mode 100644 vendor/github.com/spf13/cobra/.mailmap create mode 100644 vendor/github.com/spf13/cobra/.travis.yml create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/args.go create mode 100644 vendor/github.com/spf13/cobra/args_test.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.md create mode 100644 vendor/github.com/spf13/cobra/bash_completions_test.go create mode 100644 vendor/github.com/spf13/cobra/cobra.go create mode 100644 vendor/github.com/spf13/cobra/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/add.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/add_test.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/golden_test.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/helpers.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/init.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/init_test.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/licenses.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/project.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/project_test.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/root.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/testdata/main.go.golden create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/testdata/root.go.golden create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/testdata/test.go.golden create mode 100644 vendor/github.com/spf13/cobra/cobra/main.go create mode 100644 vendor/github.com/spf13/cobra/cobra_test.go create mode 100644 vendor/github.com/spf13/cobra/command.go create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 vendor/github.com/spf13/cobra/command_test.go create mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/github.com/spf13/cobra/doc/cmd_test.go create mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.go create mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.md create mode 100644 vendor/github.com/spf13/cobra/doc/man_docs_test.go create mode 100644 vendor/github.com/spf13/cobra/doc/man_examples_test.go create mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.go create mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.md create mode 100644 vendor/github.com/spf13/cobra/doc/md_docs_test.go create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.go create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.md create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs_test.go create mode 100644 vendor/github.com/spf13/cobra/doc/util.go create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.go create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.md create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs_test.go create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go create mode 100644 vendor/github.com/spf13/cobra/zsh_completions_test.go create mode 100644 vendor/github.com/spf13/pflag/.gitignore create mode 100644 vendor/github.com/spf13/pflag/.travis.yml create mode 100644 vendor/github.com/spf13/pflag/LICENSE create mode 100644 vendor/github.com/spf13/pflag/README.md create mode 100644 vendor/github.com/spf13/pflag/bool.go create mode 100644 vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 vendor/github.com/spf13/pflag/bool_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/bool_test.go create mode 100644 vendor/github.com/spf13/pflag/count.go create mode 100644 vendor/github.com/spf13/pflag/count_test.go create mode 100644 vendor/github.com/spf13/pflag/duration.go create mode 100644 vendor/github.com/spf13/pflag/example_test.go create mode 100644 vendor/github.com/spf13/pflag/export_test.go create mode 100644 vendor/github.com/spf13/pflag/flag.go create mode 100644 vendor/github.com/spf13/pflag/flag_test.go create mode 100644 vendor/github.com/spf13/pflag/float32.go create mode 100644 vendor/github.com/spf13/pflag/float64.go create mode 100644 vendor/github.com/spf13/pflag/golangflag.go create mode 100644 vendor/github.com/spf13/pflag/golangflag_test.go create mode 100644 vendor/github.com/spf13/pflag/int.go create mode 100644 vendor/github.com/spf13/pflag/int32.go create mode 100644 vendor/github.com/spf13/pflag/int64.go create mode 100644 vendor/github.com/spf13/pflag/int8.go create mode 100644 vendor/github.com/spf13/pflag/int_slice.go create mode 100644 vendor/github.com/spf13/pflag/int_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/ip.go create mode 100644 vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 vendor/github.com/spf13/pflag/ip_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/ip_test.go create mode 100644 vendor/github.com/spf13/pflag/ipmask.go create mode 100644 vendor/github.com/spf13/pflag/ipnet.go create mode 100644 vendor/github.com/spf13/pflag/ipnet_test.go create mode 100644 vendor/github.com/spf13/pflag/string.go create mode 100644 vendor/github.com/spf13/pflag/string_array.go create mode 100644 vendor/github.com/spf13/pflag/string_array_test.go create mode 100644 vendor/github.com/spf13/pflag/string_slice.go create mode 100644 vendor/github.com/spf13/pflag/string_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/uint.go create mode 100644 vendor/github.com/spf13/pflag/uint16.go create mode 100644 vendor/github.com/spf13/pflag/uint32.go create mode 100644 vendor/github.com/spf13/pflag/uint64.go create mode 100644 vendor/github.com/spf13/pflag/uint8.go create mode 100644 vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 vendor/github.com/spf13/pflag/uint_slice_test.go create mode 100755 vendor/github.com/spf13/pflag/verify/all.sh create mode 100755 vendor/github.com/spf13/pflag/verify/gofmt.sh create mode 100755 vendor/github.com/spf13/pflag/verify/golint.sh create mode 100644 vendor/golang.org/x/net/.gitattributes create mode 100644 vendor/golang.org/x/net/.gitignore create mode 100644 vendor/golang.org/x/net/AUTHORS create mode 100644 vendor/golang.org/x/net/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/README.md create mode 100644 vendor/golang.org/x/net/bpf/asm.go create mode 100644 vendor/golang.org/x/net/bpf/constants.go create mode 100644 vendor/golang.org/x/net/bpf/doc.go create mode 100644 vendor/golang.org/x/net/bpf/instructions.go create mode 100644 vendor/golang.org/x/net/bpf/instructions_test.go create mode 100644 vendor/golang.org/x/net/bpf/setter.go create mode 100644 vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf create mode 100644 vendor/golang.org/x/net/bpf/testdata/all_instructions.txt create mode 100644 vendor/golang.org/x/net/bpf/vm.go create mode 100644 vendor/golang.org/x/net/bpf/vm_aluop_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_bpf_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_extension_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_instructions.go create mode 100644 vendor/golang.org/x/net/bpf/vm_jump_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_load_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_ret_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_scratch_test.go create mode 100644 vendor/golang.org/x/net/bpf/vm_test.go create mode 100644 vendor/golang.org/x/net/codereview.cfg create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/context_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/net/context/withtimeout_test.go create mode 100644 vendor/golang.org/x/net/dict/dict.go create mode 100644 vendor/golang.org/x/net/dns/dnsmessage/example_test.go create mode 100644 vendor/golang.org/x/net/dns/dnsmessage/message.go create mode 100644 vendor/golang.org/x/net/dns/dnsmessage/message_test.go create mode 100644 vendor/golang.org/x/net/html/atom/atom.go create mode 100644 vendor/golang.org/x/net/html/atom/atom_test.go create mode 100644 vendor/golang.org/x/net/html/atom/gen.go create mode 100644 vendor/golang.org/x/net/html/atom/table.go create mode 100644 vendor/golang.org/x/net/html/atom/table_test.go create mode 100644 vendor/golang.org/x/net/html/charset/charset.go create mode 100644 vendor/golang.org/x/net/html/charset/charset_test.go create mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/README create mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html create mode 100644 vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html create mode 100644 vendor/golang.org/x/net/html/const.go create mode 100644 vendor/golang.org/x/net/html/doc.go create mode 100644 vendor/golang.org/x/net/html/doctype.go create mode 100644 vendor/golang.org/x/net/html/entity.go create mode 100644 vendor/golang.org/x/net/html/entity_test.go create mode 100644 vendor/golang.org/x/net/html/escape.go create mode 100644 vendor/golang.org/x/net/html/escape_test.go create mode 100644 vendor/golang.org/x/net/html/example_test.go create mode 100644 vendor/golang.org/x/net/html/foreign.go create mode 100644 vendor/golang.org/x/net/html/node.go create mode 100644 vendor/golang.org/x/net/html/node_test.go create mode 100644 vendor/golang.org/x/net/html/parse.go create mode 100644 vendor/golang.org/x/net/html/parse_test.go create mode 100644 vendor/golang.org/x/net/html/render.go create mode 100644 vendor/golang.org/x/net/html/render_test.go create mode 100644 vendor/golang.org/x/net/html/testdata/go1.html create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/README create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/comments01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/entities01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/entities02.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/isindex.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tables01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests1.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests10.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests11.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests12.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests14.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests15.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests16.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests17.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests18.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests19.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests2.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests20.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests21.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests22.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests23.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests24.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests25.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests26.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests3.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests4.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests5.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests6.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests7.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests8.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests9.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat create mode 100644 vendor/golang.org/x/net/html/token.go create mode 100644 vendor/golang.org/x/net/html/token_test.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/export_test.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/go19_test.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/proxy.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/proxy_test.go create mode 100644 vendor/golang.org/x/net/http2/.gitignore create mode 100644 vendor/golang.org/x/net/http2/Dockerfile create mode 100644 vendor/golang.org/x/net/http2/Makefile create mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/ciphers.go create mode 100644 vendor/golang.org/x/net/http2/ciphers_test.go create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/golang.org/x/net/http2/configure_transport.go create mode 100644 vendor/golang.org/x/net/http2/databuffer.go create mode 100644 vendor/golang.org/x/net/http2/databuffer_test.go create mode 100644 vendor/golang.org/x/net/http2/errors.go create mode 100644 vendor/golang.org/x/net/http2/errors_test.go create mode 100644 vendor/golang.org/x/net/http2/flow.go create mode 100644 vendor/golang.org/x/net/http2/flow_test.go create mode 100644 vendor/golang.org/x/net/http2/frame.go create mode 100644 vendor/golang.org/x/net/http2/frame_test.go create mode 100644 vendor/golang.org/x/net/http2/go16.go create mode 100644 vendor/golang.org/x/net/http2/go17.go create mode 100644 vendor/golang.org/x/net/http2/go17_not18.go create mode 100644 vendor/golang.org/x/net/http2/go18.go create mode 100644 vendor/golang.org/x/net/http2/go18_test.go create mode 100644 vendor/golang.org/x/net/http2/go19.go create mode 100644 vendor/golang.org/x/net/http2/go19_test.go create mode 100644 vendor/golang.org/x/net/http2/gotrack.go create mode 100644 vendor/golang.org/x/net/http2/gotrack_test.go create mode 100644 vendor/golang.org/x/net/http2/h2demo/.gitignore create mode 100644 vendor/golang.org/x/net/http2/h2demo/Dockerfile create mode 100644 vendor/golang.org/x/net/http2/h2demo/Dockerfile.0 create mode 100644 vendor/golang.org/x/net/http2/h2demo/Makefile create mode 100644 vendor/golang.org/x/net/http2/h2demo/README create mode 100644 vendor/golang.org/x/net/http2/h2demo/deployment-prod.yaml create mode 100644 vendor/golang.org/x/net/http2/h2demo/h2demo.go create mode 100644 vendor/golang.org/x/net/http2/h2demo/launch.go create mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.key create mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.pem create mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.srl create mode 100644 vendor/golang.org/x/net/http2/h2demo/server.crt create mode 100644 vendor/golang.org/x/net/http2/h2demo/server.key create mode 100644 vendor/golang.org/x/net/http2/h2demo/service.yaml create mode 100644 vendor/golang.org/x/net/http2/h2demo/tmpl.go create mode 100644 vendor/golang.org/x/net/http2/h2i/README.md create mode 100644 vendor/golang.org/x/net/http2/h2i/h2i.go create mode 100644 vendor/golang.org/x/net/http2/headermap.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode_test.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack_test.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables_test.go create mode 100644 vendor/golang.org/x/net/http2/http2.go create mode 100644 vendor/golang.org/x/net/http2/http2_test.go create mode 100644 vendor/golang.org/x/net/http2/not_go16.go create mode 100644 vendor/golang.org/x/net/http2/not_go17.go create mode 100644 vendor/golang.org/x/net/http2/not_go18.go create mode 100644 vendor/golang.org/x/net/http2/not_go19.go create mode 100644 vendor/golang.org/x/net/http2/pipe.go create mode 100644 vendor/golang.org/x/net/http2/pipe_test.go create mode 100644 vendor/golang.org/x/net/http2/server.go create mode 100644 vendor/golang.org/x/net/http2/server_push_test.go create mode 100644 vendor/golang.org/x/net/http2/server_test.go create mode 100644 vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml create mode 100644 vendor/golang.org/x/net/http2/transport.go create mode 100644 vendor/golang.org/x/net/http2/transport_test.go create mode 100644 vendor/golang.org/x/net/http2/write.go create mode 100644 vendor/golang.org/x/net/http2/writesched.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority_test.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random_test.go create mode 100644 vendor/golang.org/x/net/http2/writesched_test.go create mode 100644 vendor/golang.org/x/net/http2/z_spec_test.go create mode 100644 vendor/golang.org/x/net/icmp/diag_test.go create mode 100644 vendor/golang.org/x/net/icmp/dstunreach.go create mode 100644 vendor/golang.org/x/net/icmp/echo.go create mode 100644 vendor/golang.org/x/net/icmp/endpoint.go create mode 100644 vendor/golang.org/x/net/icmp/example_test.go create mode 100644 vendor/golang.org/x/net/icmp/extension.go create mode 100644 vendor/golang.org/x/net/icmp/extension_test.go create mode 100644 vendor/golang.org/x/net/icmp/helper_posix.go create mode 100644 vendor/golang.org/x/net/icmp/interface.go create mode 100644 vendor/golang.org/x/net/icmp/ipv4.go create mode 100644 vendor/golang.org/x/net/icmp/ipv4_test.go create mode 100644 vendor/golang.org/x/net/icmp/ipv6.go create mode 100644 vendor/golang.org/x/net/icmp/listen_posix.go create mode 100644 vendor/golang.org/x/net/icmp/listen_stub.go create mode 100644 vendor/golang.org/x/net/icmp/message.go create mode 100644 vendor/golang.org/x/net/icmp/message_test.go create mode 100644 vendor/golang.org/x/net/icmp/messagebody.go create mode 100644 vendor/golang.org/x/net/icmp/mpls.go create mode 100644 vendor/golang.org/x/net/icmp/multipart.go create mode 100644 vendor/golang.org/x/net/icmp/multipart_test.go create mode 100644 vendor/golang.org/x/net/icmp/packettoobig.go create mode 100644 vendor/golang.org/x/net/icmp/paramprob.go create mode 100644 vendor/golang.org/x/net/icmp/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/icmp/timeexceeded.go create mode 100644 vendor/golang.org/x/net/idna/example_test.go create mode 100644 vendor/golang.org/x/net/idna/idna.go create mode 100644 vendor/golang.org/x/net/idna/idna_test.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/idna/punycode_test.go create mode 100644 vendor/golang.org/x/net/idna/tables.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go create mode 100644 vendor/golang.org/x/net/internal/iana/const.go create mode 100644 vendor/golang.org/x/net/internal/iana/gen.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_bsd.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_nobsd.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_posix.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_stub.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_unix.go create mode 100644 vendor/golang.org/x/net/internal/nettest/helper_windows.go create mode 100644 vendor/golang.org/x/net/internal/nettest/interface.go create mode 100644 vendor/golang.org/x/net/internal/nettest/rlimit.go create mode 100644 vendor/golang.org/x/net/internal/nettest/stack.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_solaris.go create mode 100644 vendor/golang.org/x/net/internal/socket/error_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/error_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_msg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/reflect.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket_test.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_netbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_posix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/dial_test.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go create mode 100644 vendor/golang.org/x/net/internal/sockstest/server.go create mode 100644 vendor/golang.org/x/net/internal/sockstest/server_test.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries_test.go create mode 100644 vendor/golang.org/x/net/ipv4/batch.go create mode 100644 vendor/golang.org/x/net/ipv4/bpf_test.go create mode 100644 vendor/golang.org/x/net/ipv4/control.go create mode 100644 vendor/golang.org/x/net/ipv4/control_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/control_pktinfo.go create mode 100644 vendor/golang.org/x/net/ipv4/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/control_test.go create mode 100644 vendor/golang.org/x/net/ipv4/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv4/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv4/doc.go create mode 100644 vendor/golang.org/x/net/ipv4/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv4/example_test.go create mode 100644 vendor/golang.org/x/net/ipv4/gen.go create mode 100644 vendor/golang.org/x/net/ipv4/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv4/header.go create mode 100644 vendor/golang.org/x/net/ipv4/header_test.go create mode 100644 vendor/golang.org/x/net/ipv4/helper.go create mode 100644 vendor/golang.org/x/net/ipv4/iana.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_test.go create mode 100644 vendor/golang.org/x/net/ipv4/multicast_test.go create mode 100644 vendor/golang.org/x/net/ipv4/multicastlistener_test.go create mode 100644 vendor/golang.org/x/net/ipv4/multicastsockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv4/packet.go create mode 100644 vendor/golang.org/x/net/ipv4/packet_go1_8.go create mode 100644 vendor/golang.org/x/net/ipv4/packet_go1_9.go create mode 100644 vendor/golang.org/x/net/ipv4/payload.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go create mode 100644 vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go create mode 100644 vendor/golang.org/x/net/ipv4/readwrite_test.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/unicast_test.go create mode 100644 vendor/golang.org/x/net/ipv4/unicastsockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/batch.go create mode 100644 vendor/golang.org/x/net/ipv6/bpf_test.go create mode 100644 vendor/golang.org/x/net/ipv6/control.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/control_test.go create mode 100644 vendor/golang.org/x/net/ipv6/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv6/doc.go create mode 100644 vendor/golang.org/x/net/ipv6/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv6/example_test.go create mode 100644 vendor/golang.org/x/net/ipv6/gen.go create mode 100644 vendor/golang.org/x/net/ipv6/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv6/header.go create mode 100644 vendor/golang.org/x/net/ipv6/header_test.go create mode 100644 vendor/golang.org/x/net/ipv6/helper.go create mode 100644 vendor/golang.org/x/net/ipv6/iana.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_test.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/mocktransponder_test.go create mode 100644 vendor/golang.org/x/net/ipv6/multicast_test.go create mode 100644 vendor/golang.org/x/net/ipv6/multicastlistener_test.go create mode 100644 vendor/golang.org/x/net/ipv6/multicastsockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv6/payload.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go create mode 100644 vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go create mode 100644 vendor/golang.org/x/net/ipv6/readwrite_test.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/unicast_test.go create mode 100644 vendor/golang.org/x/net/ipv6/unicastsockopt_test.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/lex/httplex/httplex.go create mode 100644 vendor/golang.org/x/net/lex/httplex/httplex_test.go create mode 100644 vendor/golang.org/x/net/lif/address.go create mode 100644 vendor/golang.org/x/net/lif/address_test.go create mode 100644 vendor/golang.org/x/net/lif/binary.go create mode 100644 vendor/golang.org/x/net/lif/defs_solaris.go create mode 100644 vendor/golang.org/x/net/lif/lif.go create mode 100644 vendor/golang.org/x/net/lif/link.go create mode 100644 vendor/golang.org/x/net/lif/link_test.go create mode 100644 vendor/golang.org/x/net/lif/sys.go create mode 100644 vendor/golang.org/x/net/lif/sys_solaris_amd64.s create mode 100644 vendor/golang.org/x/net/lif/syscall.go create mode 100644 vendor/golang.org/x/net/lif/zsys_solaris_amd64.go create mode 100644 vendor/golang.org/x/net/nettest/conntest.go create mode 100644 vendor/golang.org/x/net/nettest/conntest_go16.go create mode 100644 vendor/golang.org/x/net/nettest/conntest_go17.go create mode 100644 vendor/golang.org/x/net/nettest/conntest_test.go create mode 100644 vendor/golang.org/x/net/netutil/listen.go create mode 100644 vendor/golang.org/x/net/netutil/listen_test.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/per_host_test.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/proxy_test.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go create mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go create mode 100644 vendor/golang.org/x/net/publicsuffix/list.go create mode 100644 vendor/golang.org/x/net/publicsuffix/list_test.go create mode 100644 vendor/golang.org/x/net/publicsuffix/table.go create mode 100644 vendor/golang.org/x/net/publicsuffix/table_test.go create mode 100644 vendor/golang.org/x/net/route/address.go create mode 100644 vendor/golang.org/x/net/route/address_darwin_test.go create mode 100644 vendor/golang.org/x/net/route/address_test.go create mode 100644 vendor/golang.org/x/net/route/binary.go create mode 100644 vendor/golang.org/x/net/route/defs_darwin.go create mode 100644 vendor/golang.org/x/net/route/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/route/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/route/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/route/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/route/interface.go create mode 100644 vendor/golang.org/x/net/route/interface_announce.go create mode 100644 vendor/golang.org/x/net/route/interface_classic.go create mode 100644 vendor/golang.org/x/net/route/interface_freebsd.go create mode 100644 vendor/golang.org/x/net/route/interface_multicast.go create mode 100644 vendor/golang.org/x/net/route/interface_openbsd.go create mode 100644 vendor/golang.org/x/net/route/message.go create mode 100644 vendor/golang.org/x/net/route/message_darwin_test.go create mode 100644 vendor/golang.org/x/net/route/message_freebsd_test.go create mode 100644 vendor/golang.org/x/net/route/message_test.go create mode 100644 vendor/golang.org/x/net/route/route.go create mode 100644 vendor/golang.org/x/net/route/route_classic.go create mode 100644 vendor/golang.org/x/net/route/route_openbsd.go create mode 100644 vendor/golang.org/x/net/route/route_test.go create mode 100644 vendor/golang.org/x/net/route/sys.go create mode 100644 vendor/golang.org/x/net/route/sys_darwin.go create mode 100644 vendor/golang.org/x/net/route/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/route/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/route/sys_netbsd.go create mode 100644 vendor/golang.org/x/net/route/sys_openbsd.go create mode 100644 vendor/golang.org/x/net/route/syscall.go create mode 100644 vendor/golang.org/x/net/route/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/route/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/route/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/route/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/histogram_test.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/net/trace/trace_go16.go create mode 100644 vendor/golang.org/x/net/trace/trace_go17.go create mode 100644 vendor/golang.org/x/net/trace/trace_test.go create mode 100644 vendor/golang.org/x/net/webdav/file.go create mode 100644 vendor/golang.org/x/net/webdav/file_go1.6.go create mode 100644 vendor/golang.org/x/net/webdav/file_go1.7.go create mode 100644 vendor/golang.org/x/net/webdav/file_test.go create mode 100644 vendor/golang.org/x/net/webdav/if.go create mode 100644 vendor/golang.org/x/net/webdav/if_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/README create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/atom_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/example_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/marshal.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/read.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/read_test.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/xml.go create mode 100644 vendor/golang.org/x/net/webdav/internal/xml/xml_test.go create mode 100644 vendor/golang.org/x/net/webdav/litmus_test_server.go create mode 100644 vendor/golang.org/x/net/webdav/lock.go create mode 100644 vendor/golang.org/x/net/webdav/lock_test.go create mode 100644 vendor/golang.org/x/net/webdav/prop.go create mode 100644 vendor/golang.org/x/net/webdav/prop_test.go create mode 100644 vendor/golang.org/x/net/webdav/webdav.go create mode 100644 vendor/golang.org/x/net/webdav/webdav_test.go create mode 100644 vendor/golang.org/x/net/webdav/xml.go create mode 100644 vendor/golang.org/x/net/webdav/xml_test.go create mode 100644 vendor/golang.org/x/net/websocket/client.go create mode 100644 vendor/golang.org/x/net/websocket/dial.go create mode 100644 vendor/golang.org/x/net/websocket/dial_test.go create mode 100644 vendor/golang.org/x/net/websocket/exampledial_test.go create mode 100644 vendor/golang.org/x/net/websocket/examplehandler_test.go create mode 100644 vendor/golang.org/x/net/websocket/hybi.go create mode 100644 vendor/golang.org/x/net/websocket/hybi_test.go create mode 100644 vendor/golang.org/x/net/websocket/server.go create mode 100644 vendor/golang.org/x/net/websocket/websocket.go create mode 100644 vendor/golang.org/x/net/websocket/websocket_test.go create mode 100644 vendor/golang.org/x/net/xsrftoken/xsrf.go create mode 100644 vendor/golang.org/x/net/xsrftoken/xsrf_test.go create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/amazon/amazon.go create mode 100644 vendor/golang.org/x/oauth2/bitbucket/bitbucket.go create mode 100644 vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go create mode 100644 vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go create mode 100644 vendor/golang.org/x/oauth2/example_test.go create mode 100644 vendor/golang.org/x/oauth2/facebook/facebook.go create mode 100644 vendor/golang.org/x/oauth2/fitbit/fitbit.go create mode 100644 vendor/golang.org/x/oauth2/foursquare/foursquare.go create mode 100644 vendor/golang.org/x/oauth2/github/github.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/appengineflex_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/doc_go19.go create mode 100644 vendor/golang.org/x/oauth2/google/doc_not_go19.go create mode 100644 vendor/golang.org/x/oauth2/google/example_test.go create mode 100644 vendor/golang.org/x/oauth2/google/go19.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/google_test.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt_test.go create mode 100644 vendor/golang.org/x/oauth2/google/not_go19.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk_test.go create mode 100644 vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials create mode 100644 vendor/golang.org/x/oauth2/google/testdata/gcloud/properties create mode 100644 vendor/golang.org/x/oauth2/heroku/heroku.go create mode 100644 vendor/golang.org/x/oauth2/hipchat/hipchat.go create mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/internal/doc.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/token_test.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/jira/jira.go create mode 100644 vendor/golang.org/x/oauth2/jira/jira_test.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws_test.go create mode 100644 vendor/golang.org/x/oauth2/jwt/example_test.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt_test.go create mode 100644 vendor/golang.org/x/oauth2/linkedin/linkedin.go create mode 100644 vendor/golang.org/x/oauth2/mailchimp/mailchimp.go create mode 100644 vendor/golang.org/x/oauth2/mailru/mailru.go create mode 100644 vendor/golang.org/x/oauth2/mediamath/mediamath.go create mode 100644 vendor/golang.org/x/oauth2/microsoft/microsoft.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/oauth2_test.go create mode 100644 vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go create mode 100644 vendor/golang.org/x/oauth2/paypal/paypal.go create mode 100644 vendor/golang.org/x/oauth2/slack/slack.go create mode 100644 vendor/golang.org/x/oauth2/spotify/spotify.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/token_test.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/golang.org/x/oauth2/transport_test.go create mode 100644 vendor/golang.org/x/oauth2/twitch/twitch.go create mode 100644 vendor/golang.org/x/oauth2/uber/uber.go create mode 100644 vendor/golang.org/x/oauth2/vk/vk.go create mode 100644 vendor/golang.org/x/oauth2/yahoo/yahoo.go create mode 100644 vendor/golang.org/x/oauth2/yandex/yandex.go create mode 100644 vendor/google.golang.org/appengine/.travis.yml create mode 100644 vendor/google.golang.org/appengine/LICENSE create mode 100644 vendor/google.golang.org/appengine/README.md create mode 100644 vendor/google.golang.org/appengine/aetest/doc.go create mode 100644 vendor/google.golang.org/appengine/aetest/instance.go create mode 100644 vendor/google.golang.org/appengine/aetest/instance_classic.go create mode 100644 vendor/google.golang.org/appengine/aetest/instance_test.go create mode 100644 vendor/google.golang.org/appengine/aetest/instance_vm.go create mode 100644 vendor/google.golang.org/appengine/aetest/user.go create mode 100644 vendor/google.golang.org/appengine/appengine.go create mode 100644 vendor/google.golang.org/appengine/appengine_test.go create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 vendor/google.golang.org/appengine/blobstore/blobstore.go create mode 100644 vendor/google.golang.org/appengine/blobstore/blobstore_test.go create mode 100644 vendor/google.golang.org/appengine/blobstore/read.go create mode 100644 vendor/google.golang.org/appengine/capability/capability.go create mode 100644 vendor/google.golang.org/appengine/channel/channel.go create mode 100644 vendor/google.golang.org/appengine/channel/channel_test.go create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql.go create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go create mode 100644 vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go create mode 100644 vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/ae.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/ae_test.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/fix.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/main.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/main_test.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/typecheck.go create mode 100644 vendor/google.golang.org/appengine/datastore/datastore.go create mode 100644 vendor/google.golang.org/appengine/datastore/datastore_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/doc.go create mode 100644 vendor/google.golang.org/appengine/datastore/key.go create mode 100644 vendor/google.golang.org/appengine/datastore/key_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/load.go create mode 100644 vendor/google.golang.org/appengine/datastore/metadata.go create mode 100644 vendor/google.golang.org/appengine/datastore/prop.go create mode 100644 vendor/google.golang.org/appengine/datastore/prop_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/query.go create mode 100644 vendor/google.golang.org/appengine/datastore/query_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/save.go create mode 100644 vendor/google.golang.org/appengine/datastore/time_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/transaction.go create mode 100644 vendor/google.golang.org/appengine/delay/delay.go create mode 100644 vendor/google.golang.org/appengine/delay/delay_test.go create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/app.yaml create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/favicon.ico create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/guestbook.go create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/index.yaml create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html create mode 100644 vendor/google.golang.org/appengine/demos/helloworld/app.yaml create mode 100644 vendor/google.golang.org/appengine/demos/helloworld/favicon.ico create mode 100644 vendor/google.golang.org/appengine/demos/helloworld/helloworld.go create mode 100644 vendor/google.golang.org/appengine/errors.go create mode 100644 vendor/google.golang.org/appengine/file/file.go create mode 100644 vendor/google.golang.org/appengine/identity.go create mode 100644 vendor/google.golang.org/appengine/image/image.go create mode 100644 vendor/google.golang.org/appengine/internal/aetesting/fake.go create mode 100644 vendor/google.golang.org/appengine/internal/api.go create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 vendor/google.golang.org/appengine/internal/api_race_test.go create mode 100644 vendor/google.golang.org/appengine/internal/api_test.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id_test.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto create mode 100644 vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/capability/capability_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/channel/channel_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100755 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto create mode 100644 vendor/google.golang.org/appengine/internal/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/image/images_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/image/images_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/internal.go create mode 100644 vendor/google.golang.org/appengine/internal/internal_vm_test.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/mail/mail_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/main.go create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/net.go create mode 100644 vendor/google.golang.org/appengine/internal/net_test.go create mode 100755 vendor/google.golang.org/appengine/internal/regen.sh create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto create mode 100644 vendor/google.golang.org/appengine/internal/search/search.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/search/search.proto create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/system/system_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/system/system_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/user/user_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/user/user_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto create mode 100644 vendor/google.golang.org/appengine/log/api.go create mode 100644 vendor/google.golang.org/appengine/log/log.go create mode 100644 vendor/google.golang.org/appengine/log/log_test.go create mode 100644 vendor/google.golang.org/appengine/mail/mail.go create mode 100644 vendor/google.golang.org/appengine/mail/mail_test.go create mode 100644 vendor/google.golang.org/appengine/memcache/memcache.go create mode 100644 vendor/google.golang.org/appengine/memcache/memcache_test.go create mode 100644 vendor/google.golang.org/appengine/module/module.go create mode 100644 vendor/google.golang.org/appengine/module/module_test.go create mode 100644 vendor/google.golang.org/appengine/namespace.go create mode 100644 vendor/google.golang.org/appengine/namespace_test.go create mode 100644 vendor/google.golang.org/appengine/remote_api/client.go create mode 100644 vendor/google.golang.org/appengine/remote_api/client_test.go create mode 100644 vendor/google.golang.org/appengine/remote_api/remote_api.go create mode 100644 vendor/google.golang.org/appengine/runtime/runtime.go create mode 100644 vendor/google.golang.org/appengine/runtime/runtime_test.go create mode 100644 vendor/google.golang.org/appengine/search/doc.go create mode 100644 vendor/google.golang.org/appengine/search/field.go create mode 100644 vendor/google.golang.org/appengine/search/search.go create mode 100644 vendor/google.golang.org/appengine/search/search_test.go create mode 100644 vendor/google.golang.org/appengine/search/struct.go create mode 100644 vendor/google.golang.org/appengine/search/struct_test.go create mode 100644 vendor/google.golang.org/appengine/socket/doc.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go create mode 100644 vendor/google.golang.org/appengine/taskqueue/taskqueue.go create mode 100644 vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go create mode 100644 vendor/google.golang.org/appengine/timeout.go create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 vendor/google.golang.org/appengine/user/oauth.go create mode 100644 vendor/google.golang.org/appengine/user/user.go create mode 100644 vendor/google.golang.org/appengine/user/user_classic.go create mode 100644 vendor/google.golang.org/appengine/user/user_test.go create mode 100644 vendor/google.golang.org/appengine/user/user_vm.go create mode 100644 vendor/google.golang.org/appengine/xmpp/xmpp.go create mode 100644 vendor/google.golang.org/appengine/xmpp/xmpp_test.go diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..4983f6f --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,99 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "cloud.google.com/go" + packages = ["compute/metadata"] + revision = "29f476ffa9c4cd4fd14336b6043090ac1ad76733" + version = "v0.21.0" + +[[projects]] + name = "github.com/elazarl/go-bindata-assetfs" + packages = ["."] + revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43" + version = "v1.0.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "925541529c1fa6821df4e44ce2723319eb2be768" + version = "v1.0.0" + +[[projects]] + name = "github.com/gorilla/context" + packages = ["."] + revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a" + version = "v1.1" + +[[projects]] + name = "github.com/gorilla/securecookie" + packages = ["."] + revision = "e59506cc896acb7f7bf732d4fdf5e25f7ccd8983" + version = "v1.1.1" + +[[projects]] + name = "github.com/gorilla/sessions" + packages = ["."] + revision = "ca9ada44574153444b00d3fd9c8559e4cc95f896" + version = "v1.1" + +[[projects]] + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + branch = "master" + name = "github.com/jmoiron/sqlx" + packages = [".","reflectx"] + revision = "2aeb6a910c2b94f2d5eb53d9895d80e27264ec41" + +[[projects]] + name = "github.com/kelseyhightower/envconfig" + packages = ["."] + revision = "f611eb38b3875cc3bd991ca91c51d06446afa14c" + version = "v1.3.0" + +[[projects]] + branch = "master" + name = "github.com/lib/pq" + packages = [".","oid"] + revision = "d34b9ff171c21ad295489235aec8b6626023cd04" + +[[projects]] + name = "github.com/spf13/cobra" + packages = ["."] + revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4" + version = "v0.0.2" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = ["context","context/ctxhttp"] + revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41" + +[[projects]] + branch = "master" + name = "golang.org/x/oauth2" + packages = [".","google","internal","jws","jwt"] + revision = "921ae394b9430ed4fb549668d7b087601bd60a81" + +[[projects]] + name = "google.golang.org/appengine" + packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"] + revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" + version = "v1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "fd75a68c1bc7409dea5a6600e3170f8bd68cfd35ce068ea384e5a2beae0c6353" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..da6e54d --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,54 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/elazarl/go-bindata-assetfs" + version = "1.0.0" + +[[constraint]] + name = "github.com/gorilla/context" + version = "1.1.0" + +[[constraint]] + name = "github.com/gorilla/sessions" + version = "1.1.0" + +[[constraint]] + branch = "master" + name = "github.com/jmoiron/sqlx" + +[[constraint]] + name = "github.com/kelseyhightower/envconfig" + version = "1.3.0" + +[[constraint]] + branch = "master" + name = "github.com/lib/pq" + +[[constraint]] + name = "github.com/spf13/cobra" + version = "0.0.2" + +[[constraint]] + branch = "master" + name = "golang.org/x/oauth2" diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..472ac23 --- /dev/null +++ b/LICENSE @@ -0,0 +1,8 @@ +MIT License +Copyright (c) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..1e41d8a --- /dev/null +++ b/README.md @@ -0,0 +1,6 @@ +# chipmunk + +[![GoDoc](https://godoc.org/github.com/dmmcquay/chipmunk?status.svg)](https://godoc.org/github.com/dmmcquay/chipmunk) +[![Go Report Card](https://goreportcard.com/badge/github.com/dmmcquay/chipmunk)](https://goreportcard.com/report/github.com/dmmcquay/chipmunk) + +Budget planning and logging. diff --git a/cmd/hdxd/main.go b/cmd/hdxd/main.go new file mode 100644 index 0000000..f5bbd87 --- /dev/null +++ b/cmd/hdxd/main.go @@ -0,0 +1,116 @@ +package main + +import ( + "fmt" + "log" + "math/rand" + "net/http" + "os" + "os/signal" + "time" + + "github.com/gorilla/context" + "github.com/kelseyhightower/envconfig" + "github.com/spf13/cobra" + "s.mcquay.me/dm/hdx" +) + +type Config struct { + Host string + Port int + DBHost string + DBName string + DBUser string +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func main() { + var host string + var port int + + var run = &cobra.Command{ + Use: "run", + Short: "run command", + Long: `run chipd with given options`, + Run: func(cmd *cobra.Command, args []string) { + config := &Config{ + DBHost: "localhost", + DBName: "hdx", + DBUser: "dm", + } + err := envconfig.Process("hdxd", config) + if err != nil { + log.Fatal(err) + } + if host != "" { + config.Host = host + } + if port != -1 { + config.Port = port + } else { + if config.Port == 0 { + config.Port = 8080 + } + } + log.Printf("%+v", config) + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, os.Interrupt, os.Kill) + go func() { + s := <-sigs + log.Printf("signal: %+v", s) + os.Exit(1) + }() + + sm := http.NewServeMux() + _, err = hdx.NewServer( + sm, + config.DBHost, + config.DBName, + config.DBUser, + "", + ) + if err != nil { + log.Fatalf("problem initializing hdxd server: %+v", err) + } + + hostname := "localhost" + if config.Host == "" { + hostname, err = os.Hostname() + if err != nil { + log.Printf("problem getting hostname:", err) + } + } + log.Printf("serving at: http://%s:%d/", hostname, config.Port) + + addr := fmt.Sprintf("%s:%d", config.Host, config.Port) + err = http.ListenAndServe(addr, context.ClearHandler(sm)) + if err != nil { + log.Printf("%+v", err) + os.Exit(1) + } + }, + } + + run.Flags().StringVarP( + &host, + "host", + "n", + "", + "hostname", + ) + run.Flags().IntVarP( + &port, + "port", + "p", + -1, + "port", + ) + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(run) + rootCmd.Execute() +} diff --git a/db.go b/db.go new file mode 100644 index 0000000..4089b3a --- /dev/null +++ b/db.go @@ -0,0 +1,53 @@ +package hdx + +import ( + "fmt" + + "github.com/jmoiron/sqlx" + _ "github.com/lib/pq" +) + +type DB struct { + db *sqlx.DB +} + +func NewDB(dbhost, dbname, dbuser string) (*DB, error) { + var err error + config := fmt.Sprintf( + "user=%s dbname=%s host=%s sslmode=disable", + dbuser, + dbname, + dbhost, + ) + db, err := sqlx.Connect( + "postgres", + config, + ) + if err != nil { + return nil, err + } + + d := &DB{db} + err = d.initializeDB() + return d, err +} + +func (d *DB) initializeDB() error { + // XXX ignoring errors + _, err := d.db.Exec(createdb) + return err + +} + +func (d *DB) getcount(ip string) (int, error) { + result := 0 + row := d.db.QueryRow("SELECT COUNT(*) FROM ips WHERE ip = $1", + ip, + ) + + err := row.Scan(&result) + if err != nil { + return 0, err + } + return result, nil +} diff --git a/ip.go b/ip.go new file mode 100644 index 0000000..92d6103 --- /dev/null +++ b/ip.go @@ -0,0 +1,77 @@ +package hdx + +import ( + "errors" + "net" + "net/http" + "strings" +) + +var cidrs []*net.IPNet + +func init() { + maxCidrBlocks := []string{ + "127.0.0.1/8", // localhost + "10.0.0.0/8", // 24-bit block + "172.16.0.0/12", // 20-bit block + "192.168.0.0/16", // 16-bit block + "169.254.0.0/16", // link local address + "::1/128", // localhost IPv6 + "fc00::/7", // unique local address IPv6 + "fe80::/10", // link local address IPv6 + } + + cidrs = make([]*net.IPNet, len(maxCidrBlocks)) + for i, maxCidrBlock := range maxCidrBlocks { + _, cidr, _ := net.ParseCIDR(maxCidrBlock) + cidrs[i] = cidr + } +} + +func getIP(r *http.Request) string { + // Fetch header value + xRealIP := r.Header.Get("X-Real-Ip") + xForwardedFor := r.Header.Get("X-Forwarded-For") + + // If both empty, return IP from remote address + if xRealIP == "" && xForwardedFor == "" { + var remoteIP string + + // If there are colon in remote address, remove the port number + // otherwise, return remote address as is + if strings.ContainsRune(r.RemoteAddr, ':') { + remoteIP, _, _ = net.SplitHostPort(r.RemoteAddr) + } else { + remoteIP = r.RemoteAddr + } + + return remoteIP + } + + // Check list of IP in X-Forwarded-For and return the first global address + for _, address := range strings.Split(xForwardedFor, ",") { + address = strings.TrimSpace(address) + isPrivate, err := isPrivateAddress(address) + if !isPrivate && err == nil { + return address + } + } + + // If nothing succeed, return X-Real-IP + return xRealIP +} + +func isPrivateAddress(address string) (bool, error) { + ipAddress := net.ParseIP(address) + if ipAddress == nil { + return false, errors.New("address is not valid") + } + + for i := range cidrs { + if cidrs[i].Contains(ipAddress) { + return true, nil + } + } + + return false, nil +} diff --git a/routes.go b/routes.go new file mode 100644 index 0000000..013b4ab --- /dev/null +++ b/routes.go @@ -0,0 +1,19 @@ +package hdx + +import ( + "net/http" +) + +var prefix map[string]string + +func addRoutes(sm *http.ServeMux, server *Server, staticFiles string) { + prefix = map[string]string{ + "info": "/info/", + "health": "/healthz", + "visit": "/", + } + + sm.HandleFunc(prefix["visit"], server.visit) + sm.HandleFunc(prefix["info"], server.serverInfo) + sm.HandleFunc(prefix["health"], server.health) +} diff --git a/server.go b/server.go new file mode 100644 index 0000000..3be450b --- /dev/null +++ b/server.go @@ -0,0 +1,110 @@ +package hdx + +import ( + "encoding/json" + "io" + "log" + "net" + "net/http" + "time" +) + +var Version string = "dev" +var start time.Time + +type failure struct { + Success bool `json:"success"` + Error string `json:"error"` +} + +func NewFailure(msg string) *failure { + return &failure{ + Success: false, + Error: msg, + } +} + +type Server struct { + db *DB +} + +type visit struct { + IP net.IP `json:"ip"` + Count int `json:"count"` +} + +func init() { + log.SetFlags(log.Ltime) + start = time.Now() +} + +func NewServer(sm *http.ServeMux, dbhost, dbname, dbuser, static string) (*Server, error) { + db, err := NewDB(dbhost, dbname, dbuser) + if err != nil { + return nil, err + } + db.db.SetMaxOpenConns(32) + + server := &Server{ + db: db, + } + addRoutes(sm, server, static) + return server, nil +} + +func (s *Server) serverInfo(w http.ResponseWriter, r *http.Request) { + output := struct { + Version string `json:"version"` + Start string `json:"start"` + Uptime string `json:"uptime"` + }{ + Version: Version, + Start: start.Format("2006-01-02 15:04:05"), + Uptime: time.Since(start).String(), + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(output) +} + +func (s *Server) health(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"alive": true}`) +} + +func (s *Server) visit(w http.ResponseWriter, req *http.Request) { + switch req.Method { + default: + b, _ := json.Marshal(NewFailure("Allowed methods: GET")) + http.Error(w, string(b), http.StatusBadRequest) + return + case "GET": + //ip := net.ParseIP(getIP(req)) + ip := getIP(req) + if ip == "" { + log.Printf("did not receive valid ip") + b, _ := json.Marshal(NewFailure("did not receive valid ip")) + http.Error(w, string(b), http.StatusBadRequest) + return + } + _, err := s.db.db.Exec( + `INSERT INTO ips (ip) VALUES ($1)`, + string(ip), + ) + if err != nil { + log.Printf("%+v", err) + b, _ := json.Marshal(NewFailure(err.Error())) + http.Error(w, string(b), http.StatusBadRequest) + return + } + c, err := s.db.getcount(ip) + if err != nil { + log.Printf("%+v", err) + b, _ := json.Marshal(NewFailure(err.Error())) + http.Error(w, string(b), http.StatusBadRequest) + return + } + v := visit{IP: net.ParseIP(ip), Count: c} + json.NewEncoder(w).Encode(v) + } +} diff --git a/sql.go b/sql.go new file mode 100644 index 0000000..97df358 --- /dev/null +++ b/sql.go @@ -0,0 +1,10 @@ +package hdx + +const createdb = ` +CREATE TABLE IF NOT EXISTS +ips ( + id SERIAL PRIMARY KEY, + ip Varchar(64), + time timestamp DEFAULT CURRENT_TIMESTAMP +); +` diff --git a/vendor/cloud.google.com/go/.travis.yml b/vendor/cloud.google.com/go/.travis.yml new file mode 100644 index 0000000..59594d4 --- /dev/null +++ b/vendor/cloud.google.com/go/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go: +- 1.6.x +- 1.7.x +- 1.8.x +- 1.9.x +install: +- go get -v cloud.google.com/go/... +script: +- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d +- tar xvf keys.tar +- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" + GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json" + GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests" + GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json" + ./run-tests.sh $TRAVIS_COMMIT +env: + matrix: + # The GCLOUD_TESTS_API_KEY environment variable. + secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI= diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 0000000..c364af1 --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +PaweÅ‚ Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md new file mode 100644 index 0000000..95c94a4 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -0,0 +1,152 @@ +# Contributing + +1. Sign one of the contributor license agreements below. +1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. + 1. You will need to ensure that your `GOBIN` directory (by default + `$GOPATH/bin`) is in your `PATH` so that git can find the command. + 1. If you would like, you may want to set up aliases for git-codereview, + such that `git codereview change` becomes `git change`. See the + [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. + 1. Should you run into issues with the git-codereview tool, please note + that all error messages will assume that you have set up these + aliases. +1. Get the cloud package by running `go get -d cloud.google.com/go`. + 1. If you have already checked out the source, make sure that the remote git + origin is https://code.googlesource.com/gocloud: + + git remote set-url origin https://code.googlesource.com/gocloud +1. Make sure your auth is configured correctly by visiting + https://code.googlesource.com, clicking "Generate Password", and following + the directions. +1. Make changes and create a change by running `git codereview change `, +provide a commit message, and use `git codereview mail` to create a Gerrit CL. +1. Keep amending to the change with `git codereview change` and mail as your receive +feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. + +## Integration Tests + +In addition to the unit tests, you may run the integration test suite. + +To run the integrations tests, creating and configuration of a project in the +Google Developers Console is required. + +After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). +Ensure the project-level **Owner** +[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the +service account. Alternatively, the account can be granted all of the following roles: +- **Editor** +- **Logs Configuration Writer** +- **PubSub Admin** + +Once you create a project, set the following environment variables to be able to +run the against the actual APIs. + +- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) +- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. +- **GCLOUD_TESTS_API_KEY**: Your API key. + +Firestore requires a different project and key: + +- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID + supporting Firestore +- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it +to create some resources used in integration tests. + +From the project's root directory: + +``` sh +# Set the default project in your env. +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticate the gcloud tool with your account. +$ gcloud auth login + +# Create the indexes used in the datastore integration tests. +$ gcloud preview datastore create-indexes datastore/testdata/index.yaml + +# Create a Google Cloud storage bucket with the same name as your test project, +# and with the Stackdriver Logging service account as owner, for the sink +# integration tests in logging. +$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID +$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Create a PubSub topic for integration tests of storage notifications. +$ gcloud beta pubsub topics create go-storage-notification-test + +# Create a Spanner instance for the spanner integration tests. +$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' +# NOTE: Spanner instances are priced by the node-hour, so you may want to delete +# the instance after testing with 'gcloud beta spanner instances delete'. + + +``` + +Once you've set the environment variables, you can run the integration tests by +running: + +``` sh +$ go test -v cloud.google.com/go/... +``` + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your +work**, then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 0000000..3b3cbed --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,40 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Alexis Hunt +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +James Hall +Johan Euphrosine +Jonathan Amsterdam +Kunpei Sakai +Luna Duclos +Magnus Hiie +Mario Castro +Michael McGreevy +Omar Jarjur +PaweÅ‚ Knap +Péter Szilágyi +Sarah Adams +Thanatat Tamtan +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/MIGRATION.md b/vendor/cloud.google.com/go/MIGRATION.md new file mode 100644 index 0000000..791210d --- /dev/null +++ b/vendor/cloud.google.com/go/MIGRATION.md @@ -0,0 +1,54 @@ +# Code Changes + +## v0.10.0 + +- pubsub: Replace + + ``` + sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) + ``` + + with + + ``` + sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ + PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, + }) + ``` + +- trace: traceGRPCServerInterceptor will be provided from *trace.Client. +Given an initialized `*trace.Client` named `tc`, instead of + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) + ``` + + write + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) + ``` + +- trace trace.GRPCClientInterceptor will also provided from *trace.Client. +Instead of + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) + ``` + + write + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + ``` + +- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC +interceptor as a dial option as shown below when initializing Cloud package +clients: + + ``` + c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) + if err != nil { + ... + } + ``` diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md new file mode 100644 index 0000000..769a731 --- /dev/null +++ b/vendor/cloud.google.com/go/README.md @@ -0,0 +1,593 @@ +# Google Cloud Client Libraries for Go + +[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) + +Go packages for [Google Cloud Platform](https://cloud.google.com) services. + +``` go +import "cloud.google.com/go" +``` + +To install the packages on your system, + +``` +$ go get -u cloud.google.com/go/... +``` + +**NOTE:** Some of these packages are under development, and may occasionally +make backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + + * [News](#news) + * [Supported APIs](#supported-apis) + * [Go Versions Supported](#go-versions-supported) + * [Authorization](#authorization) + * [Cloud Datastore](#cloud-datastore-) + * [Cloud Storage](#cloud-storage-) + * [Cloud Pub/Sub](#cloud-pub-sub-) + * [Cloud BigQuery](#cloud-bigquery-) + * [Stackdriver Logging](#stackdriver-logging-) + * [Cloud Spanner](#cloud-spanner-) + + +## News + +_April 9, 2018_ + +*v0.21.0* + +- bigquery: + - Add OpenCensus tracing. + +- firestore: + - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot + whose Exists method returns false. DocumentRef.Get and Transaction.Get + return the non-nil DocumentSnapshot in addition to a NotFound error. + **DocumentRef.GetAll and Transaction.GetAll return a non-nil + DocumentSnapshot instead of nil.** + - Add DocumentIterator.Stop. **Call Stop whenever you are done with a + DocumentIterator.** + - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime + notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen. + - Canceling an RPC now always returns a grpc.Status with codes.Canceled. + +- spanner: + - Add `CommitTimestamp`, which supports inserting the commit timestamp of a + transaction into a column. + +_March 22, 2018_ + +*v0.20.0* + +- bigquery: Support SchemaUpdateOptions for load jobs. + +- bigtable: + - Add SampleRowKeys. + - cbt: Support union, intersection GCPolicy. + - Retry admin RPCS. + - Add trace spans to retries. + +- datastore: Add OpenCensus tracing. + +- firestore: + - Fix queries involving Null and NaN. + - Allow Timestamp protobuffers for time values. + +- logging: Add a WriteTimeout option. + +- spanner: Support Batch API. + +- storage: Add OpenCensus tracing. + + +_February 26, 2018_ + +*v0.19.0* + +- bigquery: + - Support customer-managed encryption keys. + +- bigtable: + - Improved emulator support. + - Support GetCluster. + +- datastore: + - Add general mutations. + - Support pointer struct fields. + - Support transaction options. + +- firestore: + - Add Transaction.GetAll. + - Support document cursors. + +- logging: + - Support concurrent RPCs to the service. + - Support per-entry resources. + +- profiler: + - Add config options to disable heap and thread profiling. + - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. + +- pubsub: + - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the + callback returns). + - Add SubscriptionInProject. + - Add OpenCensus instrumentation for streaming pull. + +- storage: + - Support CORS. + + +_January 18, 2018_ + +*v0.18.0* + +- bigquery: + - Marked stable. + - Schema inference of nullable fields supported. + - Added TimePartitioning to QueryConfig. + +- firestore: Data provided to DocumentRef.Set with a Merge option can contain + Delete sentinels. + +- logging: Clients can accept parent resources other than projects. + +- pubsub: + - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome. + - Support updating more subscription metadata: AckDeadline, + RetainAckedMessages and RetentionDuration. + +- oslogin/apiv1beta: New client for the Cloud OS Login API. + +- rpcreplay: A package for recording and replaying gRPC traffic. + +- spanner: + - Add a ReadWithOptions that supports a row limit, as well as an index. + - Support query plan and execution statistics. + - Added [OpenCensus](http://opencensus.io) support. + +- storage: Clarify checksum validation for gzipped files (it is not validated + when the file is served uncompressed). + + +_December 11, 2017_ + +*v0.17.0* + +- firestore BREAKING CHANGES: + - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. + Change + `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` + to + `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` + + Change + `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` + to + `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` + - Rename MergePaths to Merge; require args to be FieldPaths + - A value stored as an integer can be read into a floating-point field, and vice versa. +- bigtable/cmd/cbt: + - Support deleting a column. + - Add regex option for row read. +- spanner: Mark stable. +- storage: + - Add Reader.ContentEncoding method. + - Fix handling of SignedURL headers. +- bigquery: + - If Uploader.Put is called with no rows, it returns nil without making a + call. + - Schema inference supports the "nullable" option in struct tags for + non-required fields. + - TimePartitioning supports "Field". + + +[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) + +## Supported APIs + +Google API | Status | Package +---------------------------------|--------------|----------------------------------------------------------- +[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] +[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref] +[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref] +[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] +[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref] +[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref] +[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref] +[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] +[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] +[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] +[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref] +[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] +[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref] +[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] +[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] +[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] +[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] +[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] + + +> **Alpha status**: the API is still being actively developed. As a +> result, it might change in backward-incompatible ways and is not recommended +> for production use. +> +> **Beta status**: the API is largely complete, but still has outstanding +> features and bugs to be addressed. There may be minor backwards-incompatible +> changes where necessary. +> +> **Stable status**: the API is mature and ready for production use. We will +> continue addressing bugs and feature requests. + +Documentation and examples are available at +https://godoc.org/cloud.google.com/go + +Visit or join the +[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) +for updates on these packages. + +## Go Versions Supported + +We support the two most recent major versions of Go. If Google App Engine uses +an older version, we support that as well. You can see which versions are +currently supported by looking at the lines following `go:` in +[`.travis.yml`](.travis.yml). + +## Authorization + +By default, each API will use [Google Application Default Credentials][default-creds] +for authorization credentials used in calling the API endpoints. This will allow your +application to run in many environments without requiring explicit configuration. + +[snip]:# (auth) +```go +client, err := storage.NewClient(ctx) +``` + +To authorize using a +[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), +pass +[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) +to the `NewClient` function of the desired package. For example: + +[snip]:# (auth-JSON) +```go +client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) +``` + +You can exert more control over authorization by using the +[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to +create an `oauth2.TokenSource`. Then pass +[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) +to the `NewClient` function: +[snip]:# (auth-ts) +```go +tokenSource := ... +client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +``` + +## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) + +- [About Cloud Datastore][cloud-datastore] +- [Activating the API for your project][cloud-datastore-activation] +- [API documentation][cloud-datastore-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) +- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) + +### Example Usage + +First create a `datastore.Client` to use throughout your application: + +[snip]:# (datastore-1) +```go +client, err := datastore.NewClient(ctx, "my-project-id") +if err != nil { + log.Fatal(err) +} +``` + +Then use that client to interact with the API: + +[snip]:# (datastore-2) +```go +type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time +} +keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), +} +posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, +} +if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) +} +``` + +## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) + +- [About Cloud Storage][cloud-storage] +- [API documentation][cloud-storage-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) + +### Example Usage + +First create a `storage.Client` to use throughout your application: + +[snip]:# (storage-1) +```go +client, err := storage.NewClient(ctx) +if err != nil { + log.Fatal(err) +} +``` + +[snip]:# (storage-2) +```go +// Read the object1 from bucket. +rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) +if err != nil { + log.Fatal(err) +} +defer rc.Close() +body, err := ioutil.ReadAll(rc) +if err != nil { + log.Fatal(err) +} +``` + +## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) + +- [About Cloud Pubsub][cloud-pubsub] +- [API documentation][cloud-pubsub-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) + +### Example Usage + +First create a `pubsub.Client` to use throughout your application: + +[snip]:# (pubsub-1) +```go +client, err := pubsub.NewClient(ctx, "project-id") +if err != nil { + log.Fatal(err) +} +``` + +Then use the client to publish and subscribe: + +[snip]:# (pubsub-2) +```go +// Publish "hello world" on topic1. +topic := client.Topic("topic1") +res := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), +}) +// The publish happens asynchronously. +// Later, you can get the result from res: +... +msgID, err := res.Get(ctx) +if err != nil { + log.Fatal(err) +} + +// Use a callback to receive messages via subscription1. +sub := client.Subscription("subscription1") +err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + fmt.Println(m.Data) + m.Ack() // Acknowledge that we've consumed the message. +}) +if err != nil { + log.Println(err) +} +``` + +## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) + +- [About Cloud BigQuery][cloud-bigquery] +- [API documentation][cloud-bigquery-docs] +- [Go client documentation][cloud-bigquery-ref] +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) + +### Example Usage + +First create a `bigquery.Client` to use throughout your application: +[snip]:# (bq-1) +```go +c, err := bigquery.NewClient(ctx, "my-project-ID") +if err != nil { + // TODO: Handle error. +} +``` + +Then use that client to interact with the API: +[snip]:# (bq-2) +```go +// Construct a query. +q := c.Query(` + SELECT year, SUM(number) + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year +`) +// Execute the query. +it, err := q.Read(ctx) +if err != nil { + // TODO: Handle error. +} +// Iterate through the results. +for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) +} +``` + + +## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) + +- [About Stackdriver Logging][cloud-logging] +- [API documentation][cloud-logging-docs] +- [Go client documentation][cloud-logging-ref] +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) + +### Example Usage + +First create a `logging.Client` to use throughout your application: +[snip]:# (logging-1) +```go +ctx := context.Background() +client, err := logging.NewClient(ctx, "my-project") +if err != nil { + // TODO: Handle error. +} +``` + +Usually, you'll want to add log entries to a buffer to be periodically flushed +(automatically and asynchronously) to the Stackdriver Logging service. +[snip]:# (logging-2) +```go +logger := client.Logger("my-log") +logger.Log(logging.Entry{Payload: "something happened!"}) +``` + +Close your client before your program exits, to flush any buffered log entries. +[snip]:# (logging-3) +```go +err = client.Close() +if err != nil { + // TODO: Handle error. +} +``` + +## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) + +- [About Cloud Spanner][cloud-spanner] +- [API documentation][cloud-spanner-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) + +### Example Usage + +First create a `spanner.Client` to use throughout your application: + +[snip]:# (spanner-1) +```go +client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") +if err != nil { + log.Fatal(err) +} +``` + +[snip]:# (spanner-2) +```go +// Simple Reads And Writes +_, err = client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) +if err != nil { + log.Fatal(err) +} +row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) +if err != nil { + log.Fatal(err) +} +``` + + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) +document for details. We're using Gerrit for our code reviews. Please don't open pull +requests against this repo, new pull requests will be automatically closed. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore +[cloud-datastore-docs]: https://cloud.google.com/datastore/docs +[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate + +[cloud-firestore]: https://cloud.google.com/firestore/ +[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore +[cloud-firestore-docs]: https://cloud.google.com/firestore/docs +[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate + +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub +[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs + +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage +[cloud-storage-docs]: https://cloud.google.com/storage/docs +[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets + +[cloud-bigtable]: https://cloud.google.com/bigtable/ +[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable + +[cloud-bigquery]: https://cloud.google.com/bigquery/ +[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs +[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery + +[cloud-logging]: https://cloud.google.com/logging/ +[cloud-logging-docs]: https://cloud.google.com/logging/docs +[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging + +[cloud-monitoring]: https://cloud.google.com/monitoring/ +[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3 + +[cloud-vision]: https://cloud.google.com/vision +[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1 + +[cloud-language]: https://cloud.google.com/natural-language +[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 + +[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest +[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest + +[cloud-speech]: https://cloud.google.com/speech +[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1 + +[cloud-spanner]: https://cloud.google.com/spanner/ +[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner +[cloud-spanner-docs]: https://cloud.google.com/spanner/docs + +[cloud-translation]: https://cloud.google.com/translation +[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation + +[cloud-video]: https://cloud.google.com/video-intelligence/ +[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1 + +[cloud-errors]: https://cloud.google.com/error-reporting/ +[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting + +[cloud-container]: https://cloud.google.com/containers/ +[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1 + +[cloud-debugger]: https://cloud.google.com/debugger/ +[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2 + +[cloud-dlp]: https://cloud.google.com/dlp/ +[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1 + +[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/cloud.google.com/go/appveyor.yml b/vendor/cloud.google.com/go/appveyor.yml new file mode 100644 index 0000000..e66cd00 --- /dev/null +++ b/vendor/cloud.google.com/go/appveyor.yml @@ -0,0 +1,32 @@ +# This file configures AppVeyor (http://www.appveyor.com), +# a Windows-based CI service similar to Travis. + +# Identifier for this run +version: "{build}" + +# Clone the repo into this path, which conforms to the standard +# Go workspace structure. +clone_folder: c:\gopath\src\cloud.google.com\go + +environment: + GOPATH: c:\gopath + GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762 + GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json + KEYFILE_CONTENTS: + secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje + +install: + # Info for debugging. + - echo %PATH% + - go version + - go env + - go get -v -d -t ./... + + +# Provide a build script, or AppVeyor will call msbuild. +build_script: + - go install -v ./... + - echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY% + +test_script: + - go test -v ./... diff --git a/vendor/cloud.google.com/go/authexample_test.go b/vendor/cloud.google.com/go/authexample_test.go new file mode 100644 index 0000000..fe75467 --- /dev/null +++ b/vendor/cloud.google.com/go/authexample_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud_test + +import ( + "cloud.google.com/go/datastore" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +func Example_applicationDefaultCredentials() { + // Google Application Default Credentials is the recommended way to authorize + // and authenticate clients. + // + // See the following link on how to create and obtain Application Default Credentials: + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := datastore.NewClient(context.Background(), "project-id") + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. +} + +func Example_serviceAccountFile() { + // Use a JSON key file associated with a Google service account to + // authenticate and authorize. Service Account keys can be created and + // downloaded from https://console.developers.google.com/permissions/serviceaccounts. + // + // Note: This example uses the datastore client, but the same steps apply to + // the other client libraries underneath this package. + client, err := datastore.NewClient(context.Background(), + "project-id", option.WithServiceAccountFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. +} diff --git a/vendor/cloud.google.com/go/bigquery/benchmarks/README.md b/vendor/cloud.google.com/go/bigquery/benchmarks/README.md new file mode 100644 index 0000000..c97f9d8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/benchmarks/README.md @@ -0,0 +1,8 @@ +# BigQuery Benchmark +This directory contains benchmarks for BigQuery client. + +## Usage +`go run bench.go -- queries.json` + +BigQuery service caches requests so the benchmark should be run +at least twice, disregarding the first result. diff --git a/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go b/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go new file mode 100644 index 0000000..56d80ec --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//+build ignore + +package main + +import ( + "encoding/json" + "flag" + "io/ioutil" + "log" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func main() { + flag.Parse() + + ctx := context.Background() + c, err := bigquery.NewClient(ctx, flag.Arg(0)) + if err != nil { + log.Fatal(err) + } + + queriesJSON, err := ioutil.ReadFile(flag.Arg(1)) + if err != nil { + log.Fatal(err) + } + + var queries []string + if err := json.Unmarshal(queriesJSON, &queries); err != nil { + log.Fatal(err) + } + + for _, q := range queries { + doQuery(ctx, c, q) + } +} + +func doQuery(ctx context.Context, c *bigquery.Client, qt string) { + startTime := time.Now() + q := c.Query(qt) + it, err := q.Read(ctx) + if err != nil { + log.Fatal(err) + } + + numRows, numCols := 0, 0 + var firstByte time.Duration + + for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + log.Fatal(err) + } + if numRows == 0 { + numCols = len(values) + firstByte = time.Since(startTime) + } else if numCols != len(values) { + log.Fatalf("got %d columns, want %d", len(values), numCols) + } + numRows++ + } + log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec", + qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds()) +} diff --git a/vendor/cloud.google.com/go/bigquery/benchmarks/queries.json b/vendor/cloud.google.com/go/bigquery/benchmarks/queries.json new file mode 100644 index 0000000..13fed38 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/benchmarks/queries.json @@ -0,0 +1,10 @@ +[ + "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000", + "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000", + "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000", + "SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000", + "SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id", + "SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId", + "SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000", + "SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000" +] diff --git a/vendor/cloud.google.com/go/bigquery/bigquery.go b/vendor/cloud.google.com/go/bigquery/bigquery.go new file mode 100644 index 0000000..6427a5c --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/bigquery.go @@ -0,0 +1,161 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "io" + "net/http" + "time" + + gax "github.com/googleapis/gax-go" + + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/version" + + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +const ( + prodAddr = "https://www.googleapis.com/bigquery/v2/" + Scope = "https://www.googleapis.com/auth/bigquery" + userAgent = "gcloud-golang-bigquery/20160429" +) + +var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogHeader) +} + +// Client may be used to perform BigQuery operations. +type Client struct { + // Location, if set, will be used as the default location for all subsequent + // dataset creation and job operations. A location specified directly in one of + // those operations will override this value. + Location string + + projectID string + bqs *bq.Service +} + +// NewClient constructs a new Client which can perform BigQuery operations. +// Operations performed via the client are billed to the specified GCP project. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(Scope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + httpClient, endpoint, err := htransport.NewClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("bigquery: dialing: %v", err) + } + bqs, err := bq.New(httpClient) + if err != nil { + return nil, fmt.Errorf("bigquery: constructing client: %v", err) + } + bqs.BasePath = endpoint + c := &Client{ + projectID: projectID, + bqs: bqs, + } + return c, nil +} + +// Close closes any resources held by the client. +// Close should be called when the client is no longer needed. +// It need not be called at program exit. +func (c *Client) Close() error { + return nil +} + +// Calls the Jobs.Insert RPC and returns a Job. +func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) { + call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx) + setClientHeader(call.Header()) + if media != nil { + call.Media(media) + } + var res *bq.Job + var err error + invoke := func() error { + res, err = call.Do() + return err + } + // A job with a client-generated ID can be retried; the presence of the + // ID makes the insert operation idempotent. + // We don't retry if there is media, because it is an io.Reader. We'd + // have to read the contents and keep it in memory, and that could be expensive. + // TODO(jba): Look into retrying if media != nil. + if job.JobReference != nil && media == nil { + err = runWithRetry(ctx, invoke) + } else { + err = invoke() + } + if err != nil { + return nil, err + } + return bqToJob(res, c) +} + +// Convert a number of milliseconds since the Unix epoch to a time.Time. +// Treat an input of zero specially: convert it to the zero time, +// rather than the start of the epoch. +func unixMillisToTime(m int64) time.Time { + if m == 0 { + return time.Time{} + } + return time.Unix(0, m*1e6) +} + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +// See the similar function in ../storage/invoke.go. The main difference is the +// reason for retrying. +func runWithRetry(ctx context.Context, call func() error) error { + // These parameters match the suggestions in https://cloud.google.com/bigquery/sla. + backoff := gax.Backoff{ + Initial: 1 * time.Second, + Max: 32 * time.Second, + Multiplier: 2, + } + return internal.Retry(ctx, backoff, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + return !retryableError(err), err + }) +} + +// This is the correct definition of retryable according to the BigQuery team. +func retryableError(err error) bool { + e, ok := err.(*googleapi.Error) + if !ok { + return false + } + var reason string + if len(e.Errors) > 0 { + reason = e.Errors[0].Reason + } + return e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded" +} diff --git a/vendor/cloud.google.com/go/bigquery/copy.go b/vendor/cloud.google.com/go/bigquery/copy.go new file mode 100644 index 0000000..4f11ef5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/copy.go @@ -0,0 +1,106 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// CopyConfig holds the configuration for a copy job. +type CopyConfig struct { + // Srcs are the tables from which data will be copied. + Srcs []*Table + + // Dst is the table into which the data will be copied. + Dst *Table + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteEmpty. + WriteDisposition TableWriteDisposition + + // The labels associated with this job. + Labels map[string]string + + // Custom encryption configuration (e.g., Cloud KMS keys). + DestinationEncryptionConfig *EncryptionConfig +} + +func (c *CopyConfig) toBQ() *bq.JobConfiguration { + var ts []*bq.TableReference + for _, t := range c.Srcs { + ts = append(ts, t.toBQ()) + } + return &bq.JobConfiguration{ + Labels: c.Labels, + Copy: &bq.JobConfigurationTableCopy{ + CreateDisposition: string(c.CreateDisposition), + WriteDisposition: string(c.WriteDisposition), + DestinationTable: c.Dst.toBQ(), + DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(), + SourceTables: ts, + }, + } +} + +func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig { + cc := &CopyConfig{ + Labels: q.Labels, + CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition), + WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition), + Dst: bqToTable(q.Copy.DestinationTable, c), + DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration), + } + for _, t := range q.Copy.SourceTables { + cc.Srcs = append(cc.Srcs, bqToTable(t, c)) + } + return cc +} + +// A Copier copies data into a BigQuery table from one or more BigQuery tables. +type Copier struct { + JobIDConfig + CopyConfig + c *Client +} + +// CopierFrom returns a Copier which can be used to copy data into a +// BigQuery table from one or more BigQuery tables. +// The returned Copier may optionally be further configured before its Run method is called. +func (t *Table) CopierFrom(srcs ...*Table) *Copier { + return &Copier{ + c: t.c, + CopyConfig: CopyConfig{ + Srcs: srcs, + Dst: t, + }, + } +} + +// Run initiates a copy job. +func (c *Copier) Run(ctx context.Context) (*Job, error) { + return c.c.insertJob(ctx, c.newJob(), nil) +} + +func (c *Copier) newJob() *bq.Job { + return &bq.Job{ + JobReference: c.JobIDConfig.createJobRef(c.c), + Configuration: c.CopyConfig.toBQ(), + } +} diff --git a/vendor/cloud.google.com/go/bigquery/copy_test.go b/vendor/cloud.google.com/go/bigquery/copy_test.go new file mode 100644 index 0000000..4b2327b --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/copy_test.go @@ -0,0 +1,165 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "github.com/google/go-cmp/cmp/cmpopts" + + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultCopyJob() *bq.Job { + return &bq.Job{ + JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, + Configuration: &bq.JobConfiguration{ + Copy: &bq.JobConfigurationTableCopy{ + DestinationTable: &bq.TableReference{ + ProjectId: "d-project-id", + DatasetId: "d-dataset-id", + TableId: "d-table-id", + }, + SourceTables: []*bq.TableReference{ + { + ProjectId: "s-project-id", + DatasetId: "s-dataset-id", + TableId: "s-table-id", + }, + }, + }, + }, + } +} + +func TestCopy(t *testing.T) { + defer fixRandomID("RANDOM")() + testCases := []struct { + dst *Table + srcs []*Table + jobID string + location string + config CopyConfig + want *bq.Job + }{ + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + want: defaultCopyJob(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + config: CopyConfig{ + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, + Labels: map[string]string{"a": "b"}, + }, + want: func() *bq.Job { + j := defaultCopyJob() + j.Configuration.Labels = map[string]string{"a": "b"} + j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" + j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"} + return j + }(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + jobID: "job-id", + want: func() *bq.Job { + j := defaultCopyJob() + j.JobReference.JobId = "job-id" + return j + }(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + location: "asia-northeast1", + want: func() *bq.Job { + j := defaultCopyJob() + j.JobReference.Location = "asia-northeast1" + return j + }(), + }, + } + c := &Client{projectID: "client-project-id"} + for i, tc := range testCases { + tc.dst.c = c + copier := tc.dst.CopierFrom(tc.srcs...) + copier.JobID = tc.jobID + copier.Location = tc.location + tc.config.Srcs = tc.srcs + tc.config.Dst = tc.dst + copier.CopyConfig = tc.config + got := copier.newJob() + checkJob(t, i, got, tc.want) + + jc, err := bqToJobConfig(got.Configuration, c) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig, + cmpopts.IgnoreUnexported(Table{})) + if diff != "" { + t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/dataset.go b/vendor/cloud.google.com/go/bigquery/dataset.go new file mode 100644 index 0000000..6a4e56d --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/dataset.go @@ -0,0 +1,518 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "time" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/iterator" +) + +// Dataset is a reference to a BigQuery dataset. +type Dataset struct { + ProjectID string + DatasetID string + c *Client +} + +// DatasetMetadata contains information about a BigQuery dataset. +type DatasetMetadata struct { + // These fields can be set when creating a dataset. + Name string // The user-friendly name for this dataset. + Description string // The user-friendly description of this dataset. + Location string // The geo location of the dataset. + DefaultTableExpiration time.Duration // The default expiration time for new tables. + Labels map[string]string // User-provided labels. + Access []*AccessEntry // Access permissions. + + // These fields are read-only. + CreationTime time.Time + LastModifiedTime time.Time // When the dataset or any of its tables were modified. + FullID string // The full dataset ID in the form projectID:datasetID. + + // ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to + // ensure that the metadata hasn't changed since it was read. + ETag string +} + +// DatasetMetadataToUpdate is used when updating a dataset's metadata. +// Only non-nil fields will be updated. +type DatasetMetadataToUpdate struct { + Description optional.String // The user-friendly description of this table. + Name optional.String // The user-friendly name for this dataset. + + // DefaultTableExpiration is the the default expiration time for new tables. + // If set to time.Duration(0), new tables never expire. + DefaultTableExpiration optional.Duration + + // The entire access list. It is not possible to replace individual entries. + Access []*AccessEntry + + labelUpdater +} + +// Dataset creates a handle to a BigQuery dataset in the client's project. +func (c *Client) Dataset(id string) *Dataset { + return c.DatasetInProject(c.projectID, id) +} + +// DatasetInProject creates a handle to a BigQuery dataset in the specified project. +func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset { + return &Dataset{ + ProjectID: projectID, + DatasetID: datasetID, + c: c, + } +} + +// Create creates a dataset in the BigQuery service. An error will be returned if the +// dataset already exists. Pass in a DatasetMetadata value to configure the dataset. +func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Create") + defer func() { trace.EndSpan(ctx, err) }() + + ds, err := md.toBQ() + if err != nil { + return err + } + ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID} + // Use Client.Location as a default. + if ds.Location == "" { + ds.Location = d.c.Location + } + call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx) + setClientHeader(call.Header()) + _, err = call.Do() + return err +} + +func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) { + ds := &bq.Dataset{} + if dm == nil { + return ds, nil + } + ds.FriendlyName = dm.Name + ds.Description = dm.Description + ds.Location = dm.Location + ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond) + ds.Labels = dm.Labels + var err error + ds.Access, err = accessListToBQ(dm.Access) + if err != nil { + return nil, err + } + if !dm.CreationTime.IsZero() { + return nil, errors.New("bigquery: Dataset.CreationTime is not writable") + } + if !dm.LastModifiedTime.IsZero() { + return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable") + } + if dm.FullID != "" { + return nil, errors.New("bigquery: Dataset.FullID is not writable") + } + if dm.ETag != "" { + return nil, errors.New("bigquery: Dataset.ETag is not writable") + } + return ds, nil +} + +func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) { + var q []*bq.DatasetAccess + for _, e := range a { + a, err := e.toBQ() + if err != nil { + return nil, err + } + q = append(q, a) + } + return q, nil +} + +// Delete deletes the dataset. +func (d *Dataset) Delete(ctx context.Context) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx) + setClientHeader(call.Header()) + return call.Do() +} + +// Metadata fetches the metadata for the dataset. +func (d *Dataset) Metadata(ctx context.Context) (md *DatasetMetadata, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Metadata") + defer func() { trace.EndSpan(ctx, err) }() + + call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx) + setClientHeader(call.Header()) + var ds *bq.Dataset + if err := runWithRetry(ctx, func() (err error) { + ds, err = call.Do() + return err + }); err != nil { + return nil, err + } + return bqToDatasetMetadata(ds) +} + +func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) { + dm := &DatasetMetadata{ + CreationTime: unixMillisToTime(d.CreationTime), + LastModifiedTime: unixMillisToTime(d.LastModifiedTime), + DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond, + Description: d.Description, + Name: d.FriendlyName, + FullID: d.Id, + Location: d.Location, + Labels: d.Labels, + ETag: d.Etag, + } + for _, a := range d.Access { + e, err := bqToAccessEntry(a, nil) + if err != nil { + return nil, err + } + dm.Access = append(dm.Access, e) + } + return dm, nil +} + +// Update modifies specific Dataset metadata fields. +// To perform a read-modify-write that protects against intervening reads, +// set the etag argument to the DatasetMetadata.ETag field from the read. +// Pass the empty string for etag for a "blind write" that will always succeed. +func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (md *DatasetMetadata, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Update") + defer func() { trace.EndSpan(ctx, err) }() + + ds, err := dm.toBQ() + if err != nil { + return nil, err + } + call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx) + setClientHeader(call.Header()) + if etag != "" { + call.Header().Set("If-Match", etag) + } + var ds2 *bq.Dataset + if err := runWithRetry(ctx, func() (err error) { + ds2, err = call.Do() + return err + }); err != nil { + return nil, err + } + return bqToDatasetMetadata(ds2) +} + +func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) { + ds := &bq.Dataset{} + forceSend := func(field string) { + ds.ForceSendFields = append(ds.ForceSendFields, field) + } + + if dm.Description != nil { + ds.Description = optional.ToString(dm.Description) + forceSend("Description") + } + if dm.Name != nil { + ds.FriendlyName = optional.ToString(dm.Name) + forceSend("FriendlyName") + } + if dm.DefaultTableExpiration != nil { + dur := optional.ToDuration(dm.DefaultTableExpiration) + if dur == 0 { + // Send a null to delete the field. + ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs") + } else { + ds.DefaultTableExpirationMs = int64(dur / time.Millisecond) + } + } + if dm.Access != nil { + var err error + ds.Access, err = accessListToBQ(dm.Access) + if err != nil { + return nil, err + } + if len(ds.Access) == 0 { + ds.NullFields = append(ds.NullFields, "Access") + } + } + labels, forces, nulls := dm.update() + ds.Labels = labels + ds.ForceSendFields = append(ds.ForceSendFields, forces...) + ds.NullFields = append(ds.NullFields, nulls...) + return ds, nil +} + +// Table creates a handle to a BigQuery table in the dataset. +// To determine if a table exists, call Table.Metadata. +// If the table does not already exist, use Table.Create to create it. +func (d *Dataset) Table(tableID string) *Table { + return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c} +} + +// Tables returns an iterator over the tables in the Dataset. +func (d *Dataset) Tables(ctx context.Context) *TableIterator { + it := &TableIterator{ + ctx: ctx, + dataset: d, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.tables) }, + func() interface{} { b := it.tables; it.tables = nil; return b }) + return it +} + +// A TableIterator is an iterator over Tables. +type TableIterator struct { + ctx context.Context + dataset *Dataset + tables []*Table + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *TableIterator) Next() (*Table, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + t := it.tables[0] + it.tables = it.tables[1:] + return t, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// for testing +var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) { + call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID). + PageToken(pageToken). + Context(it.ctx) + setClientHeader(call.Header()) + if pageSize > 0 { + call.MaxResults(int64(pageSize)) + } + var res *bq.TableList + err := runWithRetry(it.ctx, func() (err error) { + res, err = call.Do() + return err + }) + return res, err +} + +func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) { + res, err := listTables(it, pageSize, pageToken) + if err != nil { + return "", err + } + for _, t := range res.Tables { + it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c)) + } + return res.NextPageToken, nil +} + +func bqToTable(tr *bq.TableReference, c *Client) *Table { + return &Table{ + ProjectID: tr.ProjectId, + DatasetID: tr.DatasetId, + TableID: tr.TableId, + c: c, + } +} + +// Datasets returns an iterator over the datasets in a project. +// The Client's project is used by default, but that can be +// changed by setting ProjectID on the returned iterator before calling Next. +func (c *Client) Datasets(ctx context.Context) *DatasetIterator { + return c.DatasetsInProject(ctx, c.projectID) +} + +// DatasetsInProject returns an iterator over the datasets in the provided project. +// +// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator. +func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator { + it := &DatasetIterator{ + ctx: ctx, + c: c, + ProjectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// DatasetIterator iterates over the datasets in a project. +type DatasetIterator struct { + // ListHidden causes hidden datasets to be listed when set to true. + // Set before the first call to Next. + ListHidden bool + + // Filter restricts the datasets returned by label. The filter syntax is described in + // https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels + // Set before the first call to Next. + Filter string + + // The project ID of the listed datasets. + // Set before the first call to Next. + ProjectID string + + ctx context.Context + c *Client + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Dataset +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *DatasetIterator) Next() (*Dataset, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +// for testing +var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) { + call := it.c.bqs.Datasets.List(it.ProjectID). + Context(it.ctx). + PageToken(pageToken). + All(it.ListHidden) + setClientHeader(call.Header()) + if pageSize > 0 { + call.MaxResults(int64(pageSize)) + } + if it.Filter != "" { + call.Filter(it.Filter) + } + var res *bq.DatasetList + err := runWithRetry(it.ctx, func() (err error) { + res, err = call.Do() + return err + }) + return res, err +} + +func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) { + res, err := listDatasets(it, pageSize, pageToken) + if err != nil { + return "", err + } + for _, d := range res.Datasets { + it.items = append(it.items, &Dataset{ + ProjectID: d.DatasetReference.ProjectId, + DatasetID: d.DatasetReference.DatasetId, + c: it.c, + }) + } + return res.NextPageToken, nil +} + +// An AccessEntry describes the permissions that an entity has on a dataset. +type AccessEntry struct { + Role AccessRole // The role of the entity + EntityType EntityType // The type of entity + Entity string // The entity (individual or group) granted access + View *Table // The view granted access (EntityType must be ViewEntity) +} + +// AccessRole is the level of access to grant to a dataset. +type AccessRole string + +const ( + OwnerRole AccessRole = "OWNER" + ReaderRole AccessRole = "READER" + WriterRole AccessRole = "WRITER" +) + +// EntityType is the type of entity in an AccessEntry. +type EntityType int + +const ( + // A domain (e.g. "example.com") + DomainEntity EntityType = iota + 1 + + // Email address of a Google Group + GroupEmailEntity + + // Email address of an individual user. + UserEmailEntity + + // A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers. + SpecialGroupEntity + + // A BigQuery view. + ViewEntity +) + +func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) { + q := &bq.DatasetAccess{Role: string(e.Role)} + switch e.EntityType { + case DomainEntity: + q.Domain = e.Entity + case GroupEmailEntity: + q.GroupByEmail = e.Entity + case UserEmailEntity: + q.UserByEmail = e.Entity + case SpecialGroupEntity: + q.SpecialGroup = e.Entity + case ViewEntity: + q.View = e.View.toBQ() + default: + return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType) + } + return q, nil +} + +func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) { + e := &AccessEntry{Role: AccessRole(q.Role)} + switch { + case q.Domain != "": + e.Entity = q.Domain + e.EntityType = DomainEntity + case q.GroupByEmail != "": + e.Entity = q.GroupByEmail + e.EntityType = GroupEmailEntity + case q.UserByEmail != "": + e.Entity = q.UserByEmail + e.EntityType = UserEmailEntity + case q.SpecialGroup != "": + e.Entity = q.SpecialGroup + e.EntityType = SpecialGroupEntity + case q.View != nil: + e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId) + e.EntityType = ViewEntity + default: + return nil, errors.New("bigquery: invalid access value") + } + return e, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/dataset_test.go b/vendor/cloud.google.com/go/bigquery/dataset_test.go new file mode 100644 index 0000000..a434530 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/dataset_test.go @@ -0,0 +1,328 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + itest "google.golang.org/api/iterator/testing" +) + +// readServiceStub services read requests by returning data from an in-memory list of values. +type listTablesStub struct { + expectedProject, expectedDataset string + tables []*bq.TableListTables +} + +func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) { + if it.dataset.ProjectID != s.expectedProject { + return nil, errors.New("wrong project id") + } + if it.dataset.DatasetID != s.expectedDataset { + return nil, errors.New("wrong dataset id") + } + const maxPageSize = 2 + if pageSize <= 0 || pageSize > maxPageSize { + pageSize = maxPageSize + } + start := 0 + if pageToken != "" { + var err error + start, err = strconv.Atoi(pageToken) + if err != nil { + return nil, err + } + } + end := start + pageSize + if end > len(s.tables) { + end = len(s.tables) + } + nextPageToken := "" + if end < len(s.tables) { + nextPageToken = strconv.Itoa(end) + } + return &bq.TableList{ + Tables: s.tables[start:end], + NextPageToken: nextPageToken, + }, nil +} + +func TestTables(t *testing.T) { + c := &Client{projectID: "p1"} + inTables := []*bq.TableListTables{ + {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}}, + {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}}, + {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}}, + } + outTables := []*Table{ + {ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c}, + {ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c}, + {ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c}, + } + + lts := &listTablesStub{ + expectedProject: "p1", + expectedDataset: "d1", + tables: inTables, + } + old := listTables + listTables = lts.listTables // cannot use t.Parallel with this test + defer func() { listTables = old }() + + msg, ok := itest.TestIterator(outTables, + func() interface{} { return c.Dataset("d1").Tables(context.Background()) }, + func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() }) + if !ok { + t.Error(msg) + } +} + +type listDatasetsStub struct { + expectedProject string + datasets []*bq.DatasetListDatasets + hidden map[*bq.DatasetListDatasets]bool +} + +func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) { + const maxPageSize = 2 + if pageSize <= 0 || pageSize > maxPageSize { + pageSize = maxPageSize + } + if it.Filter != "" { + return nil, errors.New("filter not supported") + } + if it.ProjectID != s.expectedProject { + return nil, errors.New("bad project ID") + } + start := 0 + if pageToken != "" { + var err error + start, err = strconv.Atoi(pageToken) + if err != nil { + return nil, err + } + } + var ( + i int + result []*bq.DatasetListDatasets + nextPageToken string + ) + for i = start; len(result) < pageSize && i < len(s.datasets); i++ { + if s.hidden[s.datasets[i]] && !it.ListHidden { + continue + } + result = append(result, s.datasets[i]) + } + if i < len(s.datasets) { + nextPageToken = strconv.Itoa(i) + } + return &bq.DatasetList{ + Datasets: result, + NextPageToken: nextPageToken, + }, nil +} + +func TestDatasets(t *testing.T) { + client := &Client{projectID: "p"} + inDatasets := []*bq.DatasetListDatasets{ + {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}}, + {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}}, + {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}}, + {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}}, + } + outDatasets := []*Dataset{ + {"p", "a", client}, + {"p", "b", client}, + {"p", "hidden", client}, + {"p", "c", client}, + } + lds := &listDatasetsStub{ + expectedProject: "p", + datasets: inDatasets, + hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true}, + } + old := listDatasets + listDatasets = lds.listDatasets // cannot use t.Parallel with this test + defer func() { listDatasets = old }() + + msg, ok := itest.TestIterator(outDatasets, + func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it }, + func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) + if !ok { + t.Fatalf("ListHidden=true: %s", msg) + } + + msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]}, + func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it }, + func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) + if !ok { + t.Fatalf("ListHidden=false: %s", msg) + } +} + +func TestDatasetToBQ(t *testing.T) { + for _, test := range []struct { + in *DatasetMetadata + want *bq.Dataset + }{ + {nil, &bq.Dataset{}}, + {&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}}, + {&DatasetMetadata{ + Name: "name", + Description: "desc", + DefaultTableExpiration: time.Hour, + Location: "EU", + Labels: map[string]string{"x": "y"}, + Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}}, + }, &bq.Dataset{ + FriendlyName: "name", + Description: "desc", + DefaultTableExpirationMs: 60 * 60 * 1000, + Location: "EU", + Labels: map[string]string{"x": "y"}, + Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}}, + }}, + } { + got, err := test.in.toBQ() + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want) + } + } + + // Check that non-writeable fields are unset. + aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + for _, dm := range []*DatasetMetadata{ + {CreationTime: aTime}, + {LastModifiedTime: aTime}, + {FullID: "x"}, + {ETag: "e"}, + } { + if _, err := dm.toBQ(); err == nil { + t.Errorf("%+v: got nil, want error", dm) + } + } +} + +func TestBQToDatasetMetadata(t *testing.T) { + cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + cMillis := cTime.UnixNano() / 1e6 + mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local) + mMillis := mTime.UnixNano() / 1e6 + q := &bq.Dataset{ + CreationTime: cMillis, + LastModifiedTime: mMillis, + FriendlyName: "name", + Description: "desc", + DefaultTableExpirationMs: 60 * 60 * 1000, + Location: "EU", + Labels: map[string]string{"x": "y"}, + Access: []*bq.DatasetAccess{ + {Role: "READER", UserByEmail: "joe@example.com"}, + {Role: "WRITER", GroupByEmail: "users@example.com"}, + }, + Etag: "etag", + } + want := &DatasetMetadata{ + CreationTime: cTime, + LastModifiedTime: mTime, + Name: "name", + Description: "desc", + DefaultTableExpiration: time.Hour, + Location: "EU", + Labels: map[string]string{"x": "y"}, + Access: []*AccessEntry{ + {Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity}, + {Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity}, + }, + ETag: "etag", + } + got, err := bqToDatasetMetadata(q) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("-got, +want:\n%s", diff) + } +} + +func TestDatasetMetadataToUpdateToBQ(t *testing.T) { + dm := DatasetMetadataToUpdate{ + Description: "desc", + Name: "name", + DefaultTableExpiration: time.Hour, + } + dm.SetLabel("label", "value") + dm.DeleteLabel("del") + + got, err := dm.toBQ() + if err != nil { + t.Fatal(err) + } + want := &bq.Dataset{ + Description: "desc", + FriendlyName: "name", + DefaultTableExpirationMs: 60 * 60 * 1000, + Labels: map[string]string{"label": "value"}, + ForceSendFields: []string{"Description", "FriendlyName"}, + NullFields: []string{"Labels.del"}, + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("-got, +want:\n%s", diff) + } +} + +func TestConvertAccessEntry(t *testing.T) { + c := &Client{projectID: "pid"} + for _, e := range []*AccessEntry{ + {Role: ReaderRole, Entity: "e", EntityType: DomainEntity}, + {Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity}, + {Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity}, + {Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity}, + {Role: ReaderRole, EntityType: ViewEntity, + View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}}, + } { + q, err := e.toBQ() + if err != nil { + t.Fatal(err) + } + got, err := bqToAccessEntry(q, c) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } + } + + e := &AccessEntry{Role: ReaderRole, Entity: "e"} + if _, err := e.toBQ(); err == nil { + t.Error("got nil, want error") + } + if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil { + t.Error("got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go new file mode 100644 index 0000000..86fdde5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package datatransfer + +import ( + datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestDataTransferServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var formattedParent string = fmt.Sprintf("projects/%s", projectId) + var request = &datatransferpb.ListDataSourcesRequest{ + Parent: formattedParent, + } + + iter := c.ListDataSources(ctx, request) + if _, err := iter.Next(); err != nil && err != iterator.Done { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go new file mode 100644 index 0000000..5ad6dd4 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go @@ -0,0 +1,601 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package datatransfer + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + GetDataSource []gax.CallOption + ListDataSources []gax.CallOption + CreateTransferConfig []gax.CallOption + UpdateTransferConfig []gax.CallOption + DeleteTransferConfig []gax.CallOption + GetTransferConfig []gax.CallOption + ListTransferConfigs []gax.CallOption + ScheduleTransferRuns []gax.CallOption + GetTransferRun []gax.CallOption + DeleteTransferRun []gax.CallOption + ListTransferRuns []gax.CallOption + ListTransferLogs []gax.CallOption + CheckValidCreds []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + GetDataSource: retry[[2]string{"default", "idempotent"}], + ListDataSources: retry[[2]string{"default", "idempotent"}], + CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}], + UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}], + DeleteTransferConfig: retry[[2]string{"default", "idempotent"}], + GetTransferConfig: retry[[2]string{"default", "idempotent"}], + ListTransferConfigs: retry[[2]string{"default", "idempotent"}], + ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}], + GetTransferRun: retry[[2]string{"default", "idempotent"}], + DeleteTransferRun: retry[[2]string{"default", "idempotent"}], + ListTransferRuns: retry[[2]string{"default", "idempotent"}], + ListTransferLogs: retry[[2]string{"default", "idempotent"}], + CheckValidCreds: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with BigQuery Data Transfer API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client datatransferpb.DataTransferServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new data transfer service client. +// +// The Google BigQuery Data Transfer Service API enables BigQuery users to +// configure the transfer of their data from other Google Products into BigQuery. +// This service contains methods that are end user exposed. It backs up the +// frontend. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: datatransferpb.NewDataTransferServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// GetDataSource retrieves a supported data source and returns its settings, +// which can be used for UI rendering. +func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...) + var resp *datatransferpb.DataSource + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDataSources lists supported data sources and returns their settings, +// which can be used for UI rendering. +func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...) + it := &DataSourceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) { + var resp *datatransferpb.ListDataSourcesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.DataSources, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateTransferConfig creates a new data transfer configuration. +func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...) + var resp *datatransferpb.TransferConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateTransferConfig updates a data transfer configuration. +// All fields must be set, even if they are not updated. +func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...) + var resp *datatransferpb.TransferConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteTransferConfig deletes a data transfer configuration, +// including any associated transfer runs and logs. +func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetTransferConfig returns information about a data transfer config. +func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...) + var resp *datatransferpb.TransferConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTransferConfigs returns information about all data transfers in the project. +func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...) + it := &TransferConfigIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) { + var resp *datatransferpb.ListTransferConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TransferConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time]. +// For each date - or whatever granularity the data source supports - in the +// range, one transfer run is created. +// Note that runs are created per UTC time in the time range. +func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...) + var resp *datatransferpb.ScheduleTransferRunsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetTransferRun returns information about the particular transfer run. +func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...) + var resp *datatransferpb.TransferRun + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteTransferRun deletes the specified transfer run. +func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListTransferRuns returns information about running and completed jobs. +func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...) + it := &TransferRunIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) { + var resp *datatransferpb.ListTransferRunsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TransferRuns, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListTransferLogs returns user facing log messages for the data transfer run. +func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...) + it := &TransferMessageIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) { + var resp *datatransferpb.ListTransferLogsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TransferMessages, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CheckValidCreds returns true if valid credentials exist for the given data source and +// requesting user. +// Some data sources doesn't support service account, so we need to talk to +// them on behalf of the end user. This API just checks whether we have OAuth +// token for the particular user, which is a pre-requisite before user can +// create a transfer config. +func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...) + var resp *datatransferpb.CheckValidCredsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DataSourceIterator manages a stream of *datatransferpb.DataSource. +type DataSourceIterator struct { + items []*datatransferpb.DataSource + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DataSourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) { + var item *datatransferpb.DataSource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DataSourceIterator) bufLen() int { + return len(it.items) +} + +func (it *DataSourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig. +type TransferConfigIterator struct { + items []*datatransferpb.TransferConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) { + var item *datatransferpb.TransferConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TransferConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *TransferConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage. +type TransferMessageIterator struct { + items []*datatransferpb.TransferMessage + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) { + var item *datatransferpb.TransferMessage + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TransferMessageIterator) bufLen() int { + return len(it.items) +} + +func (it *TransferMessageIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TransferRunIterator manages a stream of *datatransferpb.TransferRun. +type TransferRunIterator struct { + items []*datatransferpb.TransferRun + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TransferRunIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) { + var item *datatransferpb.TransferRun + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TransferRunIterator) bufLen() int { + return len(it.items) +} + +func (it *TransferRunIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go new file mode 100644 index 0000000..e8ef24b --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go @@ -0,0 +1,288 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package datatransfer_test + +import ( + "cloud.google.com/go/bigquery/datatransfer/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_GetDataSource() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.GetDataSourceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDataSource(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDataSources() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ListDataSourcesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDataSources(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_CreateTransferConfig() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.CreateTransferConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateTransferConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateTransferConfig() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.UpdateTransferConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateTransferConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteTransferConfig() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.DeleteTransferConfigRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteTransferConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_GetTransferConfig() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.GetTransferConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTransferConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListTransferConfigs() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ListTransferConfigsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTransferConfigs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ScheduleTransferRuns() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ScheduleTransferRunsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ScheduleTransferRuns(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetTransferRun() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.GetTransferRunRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTransferRun(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteTransferRun() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.DeleteTransferRunRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteTransferRun(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_ListTransferRuns() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ListTransferRunsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTransferRuns(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListTransferLogs() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ListTransferLogsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTransferLogs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_CheckValidCreds() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.CheckValidCredsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CheckValidCreds(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go new file mode 100644 index 0000000..856e8f9 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package datatransfer is an auto-generated package for the +// BigQuery Data Transfer API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Transfers data from partner SaaS applications to Google BigQuery on a +// scheduled, managed basis. +package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go new file mode 100644 index 0000000..5278318 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go @@ -0,0 +1,1146 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package datatransfer + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDataTransferServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + datatransferpb.DataTransferServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDataTransferServer) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest) (*datatransferpb.DataSource, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.DataSource), nil +} + +func (s *mockDataTransferServer) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest) (*datatransferpb.ListDataSourcesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ListDataSourcesResponse), nil +} + +func (s *mockDataTransferServer) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest) (*datatransferpb.TransferConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.TransferConfig), nil +} + +func (s *mockDataTransferServer) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest) (*datatransferpb.TransferConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.TransferConfig), nil +} + +func (s *mockDataTransferServer) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDataTransferServer) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest) (*datatransferpb.TransferConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.TransferConfig), nil +} + +func (s *mockDataTransferServer) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest) (*datatransferpb.ListTransferConfigsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ListTransferConfigsResponse), nil +} + +func (s *mockDataTransferServer) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest) (*datatransferpb.ScheduleTransferRunsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ScheduleTransferRunsResponse), nil +} + +func (s *mockDataTransferServer) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest) (*datatransferpb.TransferRun, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.TransferRun), nil +} + +func (s *mockDataTransferServer) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDataTransferServer) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest) (*datatransferpb.ListTransferRunsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ListTransferRunsResponse), nil +} + +func (s *mockDataTransferServer) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest) (*datatransferpb.ListTransferLogsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ListTransferLogsResponse), nil +} + +func (s *mockDataTransferServer) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest) (*datatransferpb.CheckValidCredsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.CheckValidCredsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDataTransfer mockDataTransferServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + datatransferpb.RegisterDataTransferServiceServer(serv, &mockDataTransfer) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDataTransferServiceGetDataSource(t *testing.T) { + var name2 string = "name2-1052831874" + var dataSourceId string = "dataSourceId-1015796374" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var clientId string = "clientId-1904089585" + var supportsMultipleTransfers bool = true + var updateDeadlineSeconds int32 = 991471694 + var defaultSchedule string = "defaultSchedule-800168235" + var supportsCustomSchedule bool = true + var helpUrl string = "helpUrl-789431439" + var defaultDataRefreshWindowDays int32 = 1804935157 + var manualRunsDisabled bool = true + var expectedResponse = &datatransferpb.DataSource{ + Name: name2, + DataSourceId: dataSourceId, + DisplayName: displayName, + Description: description, + ClientId: clientId, + SupportsMultipleTransfers: supportsMultipleTransfers, + UpdateDeadlineSeconds: updateDeadlineSeconds, + DefaultSchedule: defaultSchedule, + SupportsCustomSchedule: supportsCustomSchedule, + HelpUrl: helpUrl, + DefaultDataRefreshWindowDays: defaultDataRefreshWindowDays, + ManualRunsDisabled: manualRunsDisabled, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]") + var request = &datatransferpb.GetDataSourceRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDataSource(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceGetDataSourceError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]") + var request = &datatransferpb.GetDataSourceRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDataSource(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceListDataSources(t *testing.T) { + var nextPageToken string = "" + var dataSourcesElement *datatransferpb.DataSource = &datatransferpb.DataSource{} + var dataSources = []*datatransferpb.DataSource{dataSourcesElement} + var expectedResponse = &datatransferpb.ListDataSourcesResponse{ + NextPageToken: nextPageToken, + DataSources: dataSources, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &datatransferpb.ListDataSourcesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDataSources(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.DataSources[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceListDataSourcesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &datatransferpb.ListDataSourcesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDataSources(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceCreateTransferConfig(t *testing.T) { + var name string = "name3373707" + var destinationDatasetId string = "destinationDatasetId1541564179" + var displayName string = "displayName1615086568" + var dataSourceId string = "dataSourceId-1015796374" + var schedule string = "schedule-697920873" + var dataRefreshWindowDays int32 = 327632845 + var disabled bool = true + var userId int64 = 147132913 + var datasetRegion string = "datasetRegion959248539" + var expectedResponse = &datatransferpb.TransferConfig{ + Name: name, + DestinationDatasetId: destinationDatasetId, + DisplayName: displayName, + DataSourceId: dataSourceId, + Schedule: schedule, + DataRefreshWindowDays: dataRefreshWindowDays, + Disabled: disabled, + UserId: userId, + DatasetRegion: datasetRegion, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var request = &datatransferpb.CreateTransferConfigRequest{ + Parent: formattedParent, + TransferConfig: transferConfig, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTransferConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceCreateTransferConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var request = &datatransferpb.CreateTransferConfigRequest{ + Parent: formattedParent, + TransferConfig: transferConfig, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTransferConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceUpdateTransferConfig(t *testing.T) { + var name string = "name3373707" + var destinationDatasetId string = "destinationDatasetId1541564179" + var displayName string = "displayName1615086568" + var dataSourceId string = "dataSourceId-1015796374" + var schedule string = "schedule-697920873" + var dataRefreshWindowDays int32 = 327632845 + var disabled bool = true + var userId int64 = 147132913 + var datasetRegion string = "datasetRegion959248539" + var expectedResponse = &datatransferpb.TransferConfig{ + Name: name, + DestinationDatasetId: destinationDatasetId, + DisplayName: displayName, + DataSourceId: dataSourceId, + Schedule: schedule, + DataRefreshWindowDays: dataRefreshWindowDays, + Disabled: disabled, + UserId: userId, + DatasetRegion: datasetRegion, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &datatransferpb.UpdateTransferConfigRequest{ + TransferConfig: transferConfig, + UpdateMask: updateMask, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateTransferConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceUpdateTransferConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &datatransferpb.UpdateTransferConfigRequest{ + TransferConfig: transferConfig, + UpdateMask: updateMask, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateTransferConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceDeleteTransferConfig(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.DeleteTransferConfigRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTransferConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDataTransferServiceDeleteTransferConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.DeleteTransferConfigRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTransferConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDataTransferServiceGetTransferConfig(t *testing.T) { + var name2 string = "name2-1052831874" + var destinationDatasetId string = "destinationDatasetId1541564179" + var displayName string = "displayName1615086568" + var dataSourceId string = "dataSourceId-1015796374" + var schedule string = "schedule-697920873" + var dataRefreshWindowDays int32 = 327632845 + var disabled bool = true + var userId int64 = 147132913 + var datasetRegion string = "datasetRegion959248539" + var expectedResponse = &datatransferpb.TransferConfig{ + Name: name2, + DestinationDatasetId: destinationDatasetId, + DisplayName: displayName, + DataSourceId: dataSourceId, + Schedule: schedule, + DataRefreshWindowDays: dataRefreshWindowDays, + Disabled: disabled, + UserId: userId, + DatasetRegion: datasetRegion, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.GetTransferConfigRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTransferConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceGetTransferConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.GetTransferConfigRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTransferConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceListTransferConfigs(t *testing.T) { + var nextPageToken string = "" + var transferConfigsElement *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var transferConfigs = []*datatransferpb.TransferConfig{transferConfigsElement} + var expectedResponse = &datatransferpb.ListTransferConfigsResponse{ + NextPageToken: nextPageToken, + TransferConfigs: transferConfigs, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &datatransferpb.ListTransferConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferConfigs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TransferConfigs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceListTransferConfigsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &datatransferpb.ListTransferConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferConfigs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceScheduleTransferRuns(t *testing.T) { + var expectedResponse *datatransferpb.ScheduleTransferRunsResponse = &datatransferpb.ScheduleTransferRunsResponse{} + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") + var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var request = &datatransferpb.ScheduleTransferRunsRequest{ + Parent: formattedParent, + StartTime: startTime, + EndTime: endTime, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ScheduleTransferRuns(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceScheduleTransferRunsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") + var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var request = &datatransferpb.ScheduleTransferRunsRequest{ + Parent: formattedParent, + StartTime: startTime, + EndTime: endTime, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ScheduleTransferRuns(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceGetTransferRun(t *testing.T) { + var name2 string = "name2-1052831874" + var destinationDatasetId string = "destinationDatasetId1541564179" + var dataSourceId string = "dataSourceId-1015796374" + var userId int64 = 147132913 + var schedule string = "schedule-697920873" + var expectedResponse = &datatransferpb.TransferRun{ + Name: name2, + DestinationDatasetId: destinationDatasetId, + DataSourceId: dataSourceId, + UserId: userId, + Schedule: schedule, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.GetTransferRunRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTransferRun(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceGetTransferRunError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.GetTransferRunRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTransferRun(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceDeleteTransferRun(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.DeleteTransferRunRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTransferRun(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDataTransferServiceDeleteTransferRunError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.DeleteTransferRunRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTransferRun(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDataTransferServiceListTransferRuns(t *testing.T) { + var nextPageToken string = "" + var transferRunsElement *datatransferpb.TransferRun = &datatransferpb.TransferRun{} + var transferRuns = []*datatransferpb.TransferRun{transferRunsElement} + var expectedResponse = &datatransferpb.ListTransferRunsResponse{ + NextPageToken: nextPageToken, + TransferRuns: transferRuns, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.ListTransferRunsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferRuns(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TransferRuns[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceListTransferRunsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.ListTransferRunsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferRuns(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceListTransferLogs(t *testing.T) { + var nextPageToken string = "" + var transferMessagesElement *datatransferpb.TransferMessage = &datatransferpb.TransferMessage{} + var transferMessages = []*datatransferpb.TransferMessage{transferMessagesElement} + var expectedResponse = &datatransferpb.ListTransferLogsResponse{ + NextPageToken: nextPageToken, + TransferMessages: transferMessages, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.ListTransferLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferLogs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TransferMessages[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceListTransferLogsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.ListTransferLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferLogs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceCheckValidCreds(t *testing.T) { + var hasValidCreds bool = false + var expectedResponse = &datatransferpb.CheckValidCredsResponse{ + HasValidCreds: hasValidCreds, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]") + var request = &datatransferpb.CheckValidCredsRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CheckValidCreds(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceCheckValidCredsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]") + var request = &datatransferpb.CheckValidCredsRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CheckValidCreds(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go new file mode 100644 index 0000000..89eb5bb --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go @@ -0,0 +1,135 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datatransfer + +// ProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// LocationPath returns the path for the location resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/locations/%s", project, location) +// instead. +func LocationPath(project, location string) string { + return "" + + "projects/" + + project + + "/locations/" + + location + + "" +} + +// LocationDataSourcePath returns the path for the location data source resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource) +// instead. +func LocationDataSourcePath(project, location, dataSource string) string { + return "" + + "projects/" + + project + + "/locations/" + + location + + "/dataSources/" + + dataSource + + "" +} + +// LocationTransferConfigPath returns the path for the location transfer config resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig) +// instead. +func LocationTransferConfigPath(project, location, transferConfig string) string { + return "" + + "projects/" + + project + + "/locations/" + + location + + "/transferConfigs/" + + transferConfig + + "" +} + +// LocationRunPath returns the path for the location run resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run) +// instead. +func LocationRunPath(project, location, transferConfig, run string) string { + return "" + + "projects/" + + project + + "/locations/" + + location + + "/transferConfigs/" + + transferConfig + + "/runs/" + + run + + "" +} + +// DataSourcePath returns the path for the data source resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource) +// instead. +func DataSourcePath(project, dataSource string) string { + return "" + + "projects/" + + project + + "/dataSources/" + + dataSource + + "" +} + +// TransferConfigPath returns the path for the transfer config resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig) +// instead. +func TransferConfigPath(project, transferConfig string) string { + return "" + + "projects/" + + project + + "/transferConfigs/" + + transferConfig + + "" +} + +// RunPath returns the path for the run resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run) +// instead. +func RunPath(project, transferConfig, run string) string { + return "" + + "projects/" + + project + + "/transferConfigs/" + + transferConfig + + "/runs/" + + run + + "" +} diff --git a/vendor/cloud.google.com/go/bigquery/doc.go b/vendor/cloud.google.com/go/bigquery/doc.go new file mode 100644 index 0000000..7a19b37 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/doc.go @@ -0,0 +1,303 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package bigquery provides a client for the BigQuery service. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + +The following assumes a basic familiarity with BigQuery concepts. +See https://cloud.google.com/bigquery/docs. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, projectID) + if err != nil { + // TODO: Handle error. + } + +Querying + +To query existing tables, create a Query and call its Read method: + + q := client.Query(` + SELECT year, SUM(number) as num + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year + `) + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + +Then iterate through the resulting rows. You can store a row using +anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value. +A slice is simplest: + + for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) + } + +You can also use a struct whose exported fields match the query: + + type Count struct { + Year int + Num int + } + for { + var c Count + err := it.Next(&c) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(c) + } + +You can also start the query running and get the results later. +Create the query as above, but call Run instead of Read. This returns a Job, +which represents an asychronous operation. + + job, err := q.Run(ctx) + if err != nil { + // TODO: Handle error. + } + +Get the job's ID, a printable string. You can save this string to retrieve +the results at a later time, even in another process. + + jobID := job.ID() + fmt.Printf("The job ID is %s\n", jobID) + +To retrieve the job's results from the ID, first look up the Job: + + job, err = client.JobFromID(ctx, jobID) + if err != nil { + // TODO: Handle error. + } + +Use the Job.Read method to obtain an iterator, and loop over the rows. +Query.Read is just a convenience method that combines Query.Run and Job.Read. + + it, err = job.Read(ctx) + if err != nil { + // TODO: Handle error. + } + // Proceed with iteration as above. + +Datasets and Tables + +You can refer to datasets in the client's project with the Dataset method, and +in other projects with the DatasetInProject method: + + myDataset := client.Dataset("my_dataset") + yourDataset := client.DatasetInProject("your-project-id", "your_dataset") + +These methods create references to datasets, not the datasets themselves. You can have +a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to +create a dataset from a reference: + + if err := myDataset.Create(ctx, nil); err != nil { + // TODO: Handle error. + } + +You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference +to an object in BigQuery that may or may not exist. + + table := myDataset.Table("my_table") + +You can create, delete and update the metadata of tables with methods on Table. +For instance, you could create a temporary table with: + + err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{ + ExpirationTime: time.Now().Add(1*time.Hour)}) + if err != nil { + // TODO: Handle error. + } + +We'll see how to create a table with a schema in the next section. + +Schemas + +There are two ways to construct schemas with this package. +You can build a schema by hand, like so: + + schema1 := bigquery.Schema{ + {Name: "Name", Required: true, Type: bigquery.StringFieldType}, + {Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType}, + {Name: "Optional", Required: false, Type: bigquery.IntegerFieldType}, + } + +Or you can infer the schema from a struct: + + type student struct { + Name string + Grades []int + Optional bigquery.NullInt64 + } + schema2, err := bigquery.InferSchema(student{}) + if err != nil { + // TODO: Handle error. + } + // schema1 and schema2 are identical. + +Struct inference supports tags like those of the encoding/json package, so you can +change names, ignore fields, or mark a field as nullable (non-required). Fields +declared as on of the Null types (NullInt64, NullFloat64, NullString, NullBool, +NullTimestamp, NullDate, NullTime and NullDateTime) are automatically inferred as +nullable, so the "nullable" tag is only needed for []byte and pointer-to-struct +fields. + + type student2 struct { + Name string `bigquery:"full_name"` + Grades []int + Secret string `bigquery:"-"` + Optional []byte `bigquery:",nullable" + } + schema3, err := bigquery.InferSchema(student2{}) + if err != nil { + // TODO: Handle error. + } + // schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional". + +Having constructed a schema, you can create a table with it like so: + + if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil { + // TODO: Handle error. + } + +Copying + +You can copy one or more tables to another table. Begin by constructing a Copier +describing the copy. Then set any desired copy options, and finally call Run to get a Job: + + copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src")) + copier.WriteDisposition = bigquery.WriteTruncate + job, err = copier.Run(ctx) + if err != nil { + // TODO: Handle error. + } + +You can chain the call to Run if you don't want to set options: + + job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx) + if err != nil { + // TODO: Handle error. + } + +You can wait for your job to complete: + + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + +Job.Wait polls with exponential backoff. You can also poll yourself, if you +wish: + + for { + status, err := job.Status(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Done() { + if status.Err() != nil { + log.Fatalf("Job failed with error %v", status.Err()) + } + break + } + time.Sleep(pollInterval) + } + +Loading and Uploading + +There are two ways to populate a table with this package: load the data from a Google Cloud Storage +object, or upload rows directly from your program. + +For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure +it as well, and call its Run method. + + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.AllowJaggedRows = true + loader := myDataset.Table("dest").LoaderFrom(gcsRef) + loader.CreateDisposition = bigquery.CreateNever + job, err = loader.Run(ctx) + // Poll the job for completion if desired, as above. + +To upload, first define a type that implements the ValueSaver interface, which has a single method named Save. +Then create an Uploader, and call its Put method with a slice of values. + + u := table.Uploader() + // Item implements the ValueSaver interface. + items := []*Item{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items); err != nil { + // TODO: Handle error. + } + +You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type +to specify the schema and insert ID by hand, or just supply the struct or struct pointer +directly and the schema will be inferred: + + type Item2 struct { + Name string + Size float64 + Count int + } + // Item implements the ValueSaver interface. + items2 := []*Item2{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items2); err != nil { + // TODO: Handle error. + } + +Extracting + +If you've been following so far, extracting data from a BigQuery table +into a Google Cloud Storage object will feel familiar. First create an +Extractor, then optionally configure it, and lastly call its Run method. + + extractor := table.ExtractorTo(gcsRef) + extractor.DisableHeader = true + job, err = extractor.Run(ctx) + // Poll the job for completion if desired, as above. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package bigquery // import "cloud.google.com/go/bigquery" diff --git a/vendor/cloud.google.com/go/bigquery/error.go b/vendor/cloud.google.com/go/bigquery/error.go new file mode 100644 index 0000000..b5abd15 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/error.go @@ -0,0 +1,82 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + bq "google.golang.org/api/bigquery/v2" +) + +// An Error contains detailed information about a failed bigquery operation. +type Error struct { + // Mirrors bq.ErrorProto, but drops DebugInfo + Location, Message, Reason string +} + +func (e Error) Error() string { + return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) +} + +func bqToError(ep *bq.ErrorProto) *Error { + if ep == nil { + return nil + } + return &Error{ + Location: ep.Location, + Message: ep.Message, + Reason: ep.Reason, + } +} + +// A MultiError contains multiple related errors. +type MultiError []error + +func (m MultiError) Error() string { + switch len(m) { + case 0: + return "(0 errors)" + case 1: + return m[0].Error() + case 2: + return m[0].Error() + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1) +} + +// RowInsertionError contains all errors that occurred when attempting to insert a row. +type RowInsertionError struct { + InsertID string // The InsertID associated with the affected row. + RowIndex int // The 0-based index of the affected row in the batch of rows being inserted. + Errors MultiError +} + +func (e *RowInsertionError) Error() string { + errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s" + return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error()) +} + +// PutMultiError contains an error for each row which was not successfully inserted +// into a BigQuery table. +type PutMultiError []RowInsertionError + +func (pme PutMultiError) Error() string { + plural := "s" + if len(pme) == 1 { + plural = "" + } + + return fmt.Sprintf("%v row insertion%s failed", len(pme), plural) +} diff --git a/vendor/cloud.google.com/go/bigquery/error_test.go b/vendor/cloud.google.com/go/bigquery/error_test.go new file mode 100644 index 0000000..1745eec --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/error_test.go @@ -0,0 +1,110 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func rowInsertionError(msg string) RowInsertionError { + return RowInsertionError{Errors: []error{errors.New(msg)}} +} + +func TestPutMultiErrorString(t *testing.T) { + testCases := []struct { + errs PutMultiError + want string + }{ + { + errs: PutMultiError{}, + want: "0 row insertions failed", + }, + { + errs: PutMultiError{rowInsertionError("a")}, + want: "1 row insertion failed", + }, + { + errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")}, + want: "2 row insertions failed", + }, + } + + for _, tc := range testCases { + if tc.errs.Error() != tc.want { + t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) + } + } +} + +func TestMultiErrorString(t *testing.T) { + testCases := []struct { + errs MultiError + want string + }{ + { + errs: MultiError{}, + want: "(0 errors)", + }, + { + errs: MultiError{errors.New("a")}, + want: "a", + }, + { + errs: MultiError{errors.New("a"), errors.New("b")}, + want: "a (and 1 other error)", + }, + { + errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")}, + want: "a (and 2 other errors)", + }, + } + + for _, tc := range testCases { + if tc.errs.Error() != tc.want { + t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) + } + } +} + +func TestErrorFromErrorProto(t *testing.T) { + for _, test := range []struct { + in *bq.ErrorProto + want *Error + }{ + {nil, nil}, + { + in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"}, + want: &Error{Location: "L", Message: "M", Reason: "R"}, + }, + } { + if got := bqToError(test.in); !testutil.Equal(got, test.want) { + t.Errorf("%v: got %v, want %v", test.in, got, test.want) + } + } +} + +func TestErrorString(t *testing.T) { + e := &Error{Location: "", Message: "", Reason: ""} + got := e.Error() + if !strings.Contains(got, "") || !strings.Contains(got, "") || !strings.Contains(got, "") { + t.Errorf(`got %q, expected to see "", "" and ""`, got) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/examples_test.go b/vendor/cloud.google.com/go/bigquery/examples_test.go new file mode 100644 index 0000000..2750b8d --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/examples_test.go @@ -0,0 +1,829 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery_test + +import ( + "fmt" + "os" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +func ExampleClient_Dataset() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + fmt.Println(ds) +} + +func ExampleClient_DatasetInProject() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.DatasetInProject("their-project-id", "their-dataset") + fmt.Println(ds) +} + +func ExampleClient_Datasets() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Datasets(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleClient_DatasetsInProject() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.DatasetsInProject(ctx, "their-project-id") + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func getJobID() string { return "" } + +func ExampleClient_JobFromID() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere. + job, err := client.JobFromID(ctx, jobID) + if err != nil { + // TODO: Handle error. + } + fmt.Println(job.LastStatus()) // Display the job's status. +} + +func ExampleClient_Jobs() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Jobs(ctx) + it.State = bigquery.Running // list only running jobs. + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleNewGCSReference() { + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + fmt.Println(gcsRef) +} + +func ExampleClient_Query() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + q.DefaultProjectID = "project-id" + // TODO: set other options on the Query. + // TODO: Call Query.Run or Query.Read. +} + +func ExampleClient_Query_parameters() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select num from t1 where name = @user") + q.Parameters = []bigquery.QueryParameter{ + {Name: "user", Value: "Elizabeth"}, + } + // TODO: set other options on the Query. + // TODO: Call Query.Run or Query.Read. +} + +// This example demonstrates how to run a query job on a table +// with a customer-managed encryption key. The same +// applies to load and copy jobs as well. +func ExampleClient_Query_encryptionKey() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + // TODO: Replace this key with a key you have created in Cloud KMS. + keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K" + q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName} + // TODO: set other options on the Query. + // TODO: Call Query.Run or Query.Read. +} + +func ExampleQuery_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleRowIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + for { + var row []bigquery.Value + err := it.Next(&row) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(row) + } +} + +func ExampleRowIterator_Next_struct() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type score struct { + Name string + Num int + } + + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + for { + var s score + err := it.Next(&s) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(s) + } +} + +func ExampleJob_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + // Call Query.Run to get a Job, then call Read on the job. + // Note: Query.Read is a shorthand for this. + job, err := q.Run(ctx) + if err != nil { + // TODO: Handle error. + } + it, err := job.Read(ctx) + if err != nil { + // TODO: Handle error. + } + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleJob_Wait() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleJob_Config() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) + if err != nil { + // TODO: Handle error. + } + jc, err := job.Config() + if err != nil { + // TODO: Handle error. + } + copyConfig := jc.(*bigquery.CopyConfig) + fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition) +} + +func ExampleDataset_Create() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil { + // TODO: Handle error. + } +} + +func ExampleDataset_Delete() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + if err := client.Dataset("my_dataset").Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleDataset_Metadata() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + md, err := client.Dataset("my_dataset").Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md) +} + +// This example illustrates how to perform a read-modify-write sequence on dataset +// metadata. Passing the metadata's ETag to the Update call ensures that the call +// will fail if the metadata was changed since the read. +func ExampleDataset_Update_readModifyWrite() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + md, err := ds.Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + md2, err := ds.Update(ctx, + bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name}, + md.ETag) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md2) +} + +// To perform a blind write, ignoring the existing state (and possibly overwriting +// other updates), pass the empty string as the etag. +func ExampleDataset_Update_blindWrite() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "") + if err != nil { + // TODO: Handle error. + } + fmt.Println(md) +} + +func ExampleDataset_Table() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // Table creates a reference to the table. It does not create the actual + // table in BigQuery; to do so, use Table.Create. + t := client.Dataset("my_dataset").Table("my_table") + fmt.Println(t) +} + +func ExampleDataset_Tables() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Tables(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleDatasetIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Datasets(ctx) + for { + ds, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(ds) + } +} + +func ExampleInferSchema() { + type Item struct { + Name string + Size float64 + Count int + } + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + fmt.Println(err) + // TODO: Handle error. + } + for _, fs := range schema { + fmt.Println(fs.Name, fs.Type) + } + // Output: + // Name STRING + // Size FLOAT + // Count INTEGER +} + +func ExampleInferSchema_tags() { + type Item struct { + Name string + Size float64 + Count int `bigquery:"number"` + Secret []byte `bigquery:"-"` + Optional bigquery.NullBool + OptBytes []byte `bigquery:",nullable"` + } + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + fmt.Println(err) + // TODO: Handle error. + } + for _, fs := range schema { + fmt.Println(fs.Name, fs.Type, fs.Required) + } + // Output: + // Name STRING true + // Size FLOAT true + // number INTEGER true + // Optional BOOLEAN false + // OptBytes BYTES false +} + +func ExampleTable_Create() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("new-table") + if err := t.Create(ctx, nil); err != nil { + // TODO: Handle error. + } +} + +// Initialize a new table by passing TableMetadata to Table.Create. +func ExampleTable_Create_initialize() { + ctx := context.Background() + // Infer table schema from a Go type. + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + // TODO: Handle error. + } + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("new-table") + if err := t.Create(ctx, + &bigquery.TableMetadata{ + Name: "My New Table", + Schema: schema, + ExpirationTime: time.Now().Add(24 * time.Hour), + }); err != nil { + // TODO: Handle error. + } +} + +// This example demonstrates how to create a table with +// a customer-managed encryption key. +func ExampleTable_Create_encryptionKey() { + ctx := context.Background() + // Infer table schema from a Go type. + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + // TODO: Handle error. + } + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("new-table") + + // TODO: Replace this key with a key you have created in Cloud KMS. + keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K" + if err := t.Create(ctx, + &bigquery.TableMetadata{ + Name: "My New Table", + Schema: schema, + EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName}, + }); err != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Delete() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Metadata() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md) +} + +func ExampleTable_Uploader() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + _ = u // TODO: Use u. +} + +func ExampleTable_Uploader_options() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + u.SkipInvalidRows = true + u.IgnoreUnknownValues = true + _ = u // TODO: Use u. +} + +func ExampleTable_CopierFrom() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2")) + c.WriteDisposition = bigquery.WriteTruncate + // TODO: set other options on the Copier. + job, err := c.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_ExtractorTo() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.FieldDelimiter = ":" + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + extractor := ds.Table("my_table").ExtractorTo(gcsRef) + extractor.DisableHeader = true + // TODO: set other options on the Extractor. + job, err := extractor.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_LoaderFrom() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.AllowJaggedRows = true + gcsRef.MaxBadRecords = 5 + gcsRef.Schema = schema + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + loader := ds.Table("my_table").LoaderFrom(gcsRef) + loader.CreateDisposition = bigquery.CreateNever + // TODO: set other options on the Loader. + job, err := loader.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_LoaderFrom_reader() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + f, err := os.Open("data.csv") + if err != nil { + // TODO: Handle error. + } + rs := bigquery.NewReaderSource(f) + rs.AllowJaggedRows = true + rs.MaxBadRecords = 5 + rs.Schema = schema + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + loader := ds.Table("my_table").LoaderFrom(rs) + loader.CreateDisposition = bigquery.CreateNever + // TODO: set other options on the Loader. + job, err := loader.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Table("my_table").Read(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +// This example illustrates how to perform a read-modify-write sequence on table +// metadata. Passing the metadata's ETag to the Update call ensures that the call +// will fail if the metadata was changed since the read. +func ExampleTable_Update_readModifyWrite() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("my_table") + md, err := t.Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + md2, err := t.Update(ctx, + bigquery.TableMetadataToUpdate{Name: "new " + md.Name}, + md.ETag) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md2) +} + +// To perform a blind write, ignoring the existing state (and possibly overwriting +// other updates), pass the empty string as the etag. +func ExampleTable_Update_blindWrite() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("my_table") + tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "my favorite table", + }, "") + if err != nil { + // TODO: Handle error. + } + fmt.Println(tm) +} + +func ExampleTableIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Tables(ctx) + for { + t, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(t) + } +} + +type Item struct { + Name string + Size float64 + Count int +} + +// Save implements the ValueSaver interface. +func (i *Item) Save() (map[string]bigquery.Value, string, error) { + return map[string]bigquery.Value{ + "Name": i.Name, + "Size": i.Size, + "Count": i.Count, + }, "", nil +} + +func ExampleUploader_Put() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + // Item implements the ValueSaver interface. + items := []*Item{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items); err != nil { + // TODO: Handle error. + } +} + +var schema bigquery.Schema + +func ExampleUploader_Put_structSaver() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + + type score struct { + Name string + Num int + } + + // Assume schema holds the table's schema. + savers := []*bigquery.StructSaver{ + {Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"}, + {Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"}, + {Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"}, + } + if err := u.Put(ctx, savers); err != nil { + // TODO: Handle error. + } +} + +func ExampleUploader_Put_struct() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + + type score struct { + Name string + Num int + } + scores := []score{ + {Name: "n1", Num: 12}, + {Name: "n2", Num: 31}, + {Name: "n3", Num: 7}, + } + // Schema is inferred from the score type. + if err := u.Put(ctx, scores); err != nil { + // TODO: Handle error. + } +} + +func ExampleUploader_Put_valuesSaver() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + u := client.Dataset("my_dataset").Table("my_table").Uploader() + + var vss []*bigquery.ValuesSaver + for i, name := range []string{"n1", "n2", "n3"} { + // Assume schema holds the table's schema. + vss = append(vss, &bigquery.ValuesSaver{ + Schema: schema, + InsertID: name, + Row: []bigquery.Value{name, int64(i)}, + }) + } + + if err := u.Put(ctx, vss); err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/bigquery/external.go b/vendor/cloud.google.com/go/bigquery/external.go new file mode 100644 index 0000000..36eb9d9 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/external.go @@ -0,0 +1,398 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "unicode/utf8" + + bq "google.golang.org/api/bigquery/v2" +) + +// DataFormat describes the format of BigQuery table data. +type DataFormat string + +// Constants describing the format of BigQuery table data. +const ( + CSV DataFormat = "CSV" + Avro DataFormat = "AVRO" + JSON DataFormat = "NEWLINE_DELIMITED_JSON" + DatastoreBackup DataFormat = "DATASTORE_BACKUP" + GoogleSheets DataFormat = "GOOGLE_SHEETS" + Bigtable DataFormat = "BIGTABLE" +) + +// ExternalData is a table which is stored outside of BigQuery. It is implemented by +// *ExternalDataConfig. +// GCSReference also implements it, for backwards compatibility. +type ExternalData interface { + toBQ() bq.ExternalDataConfiguration +} + +// ExternalDataConfig describes data external to BigQuery that can be used +// in queries and to create external tables. +type ExternalDataConfig struct { + // The format of the data. Required. + SourceFormat DataFormat + + // The fully-qualified URIs that point to your + // data in Google Cloud. Required. + // + // For Google Cloud Storage URIs, each URI can contain one '*' wildcard character + // and it must come after the 'bucket' name. Size limits related to load jobs + // apply to external data sources. + // + // For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be + // a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. + // + // For Google Cloud Datastore backups, exactly one URI can be specified. Also, + // the '*' wildcard character is not allowed. + SourceURIs []string + + // The schema of the data. Required for CSV and JSON; disallowed for the + // other formats. + Schema Schema + + // Try to detect schema and format options automatically. + // Any option specified explicitly will be honored. + AutoDetect bool + + // The compression type of the data. + Compression Compression + + // IgnoreUnknownValues causes values not matching the schema to be + // tolerated. Unknown values are ignored. For CSV this ignores extra values + // at the end of a line. For JSON this ignores named values that do not + // match any column name. If this field is not set, records containing + // unknown values are treated as bad records. The MaxBadRecords field can + // be used to customize how bad records are handled. + IgnoreUnknownValues bool + + // MaxBadRecords is the maximum number of bad records that will be ignored + // when reading data. + MaxBadRecords int64 + + // Additional options for CSV, GoogleSheets and Bigtable formats. + Options ExternalDataConfigOptions +} + +func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration { + q := bq.ExternalDataConfiguration{ + SourceFormat: string(e.SourceFormat), + SourceUris: e.SourceURIs, + Autodetect: e.AutoDetect, + Compression: string(e.Compression), + IgnoreUnknownValues: e.IgnoreUnknownValues, + MaxBadRecords: e.MaxBadRecords, + } + if e.Schema != nil { + q.Schema = e.Schema.toBQ() + } + if e.Options != nil { + e.Options.populateExternalDataConfig(&q) + } + return q +} + +func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) { + e := &ExternalDataConfig{ + SourceFormat: DataFormat(q.SourceFormat), + SourceURIs: q.SourceUris, + AutoDetect: q.Autodetect, + Compression: Compression(q.Compression), + IgnoreUnknownValues: q.IgnoreUnknownValues, + MaxBadRecords: q.MaxBadRecords, + Schema: bqToSchema(q.Schema), + } + switch { + case q.CsvOptions != nil: + e.Options = bqToCSVOptions(q.CsvOptions) + case q.GoogleSheetsOptions != nil: + e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions) + case q.BigtableOptions != nil: + var err error + e.Options, err = bqToBigtableOptions(q.BigtableOptions) + if err != nil { + return nil, err + } + } + return e, nil +} + +// ExternalDataConfigOptions are additional options for external data configurations. +// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions. +type ExternalDataConfigOptions interface { + populateExternalDataConfig(*bq.ExternalDataConfiguration) +} + +// CSVOptions are additional options for CSV external data sources. +type CSVOptions struct { + // AllowJaggedRows causes missing trailing optional columns to be tolerated + // when reading CSV data. Missing values are treated as nulls. + AllowJaggedRows bool + + // AllowQuotedNewlines sets whether quoted data sections containing + // newlines are allowed when reading CSV data. + AllowQuotedNewlines bool + + // Encoding is the character encoding of data to be read. + Encoding Encoding + + // FieldDelimiter is the separator for fields in a CSV file, used when + // reading or exporting data. The default is ",". + FieldDelimiter string + + // Quote is the value used to quote data sections in a CSV file. The + // default quotation character is the double quote ("), which is used if + // both Quote and ForceZeroQuote are unset. + // To specify that no character should be interpreted as a quotation + // character, set ForceZeroQuote to true. + // Only used when reading data. + Quote string + ForceZeroQuote bool + + // The number of rows at the top of a CSV file that BigQuery will skip when + // reading data. + SkipLeadingRows int64 +} + +func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { + c.CsvOptions = &bq.CsvOptions{ + AllowJaggedRows: o.AllowJaggedRows, + AllowQuotedNewlines: o.AllowQuotedNewlines, + Encoding: string(o.Encoding), + FieldDelimiter: o.FieldDelimiter, + Quote: o.quote(), + SkipLeadingRows: o.SkipLeadingRows, + } +} + +// quote returns the CSV quote character, or nil if unset. +func (o *CSVOptions) quote() *string { + if o.ForceZeroQuote { + quote := "" + return "e + } + if o.Quote == "" { + return nil + } + return &o.Quote +} + +func (o *CSVOptions) setQuote(ps *string) { + if ps != nil { + o.Quote = *ps + if o.Quote == "" { + o.ForceZeroQuote = true + } + } +} + +func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions { + o := &CSVOptions{ + AllowJaggedRows: q.AllowJaggedRows, + AllowQuotedNewlines: q.AllowQuotedNewlines, + Encoding: Encoding(q.Encoding), + FieldDelimiter: q.FieldDelimiter, + SkipLeadingRows: q.SkipLeadingRows, + } + o.setQuote(q.Quote) + return o +} + +// GoogleSheetsOptions are additional options for GoogleSheets external data sources. +type GoogleSheetsOptions struct { + // The number of rows at the top of a sheet that BigQuery will skip when + // reading data. + SkipLeadingRows int64 +} + +func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { + c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{ + SkipLeadingRows: o.SkipLeadingRows, + } +} + +func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions { + return &GoogleSheetsOptions{ + SkipLeadingRows: q.SkipLeadingRows, + } +} + +// BigtableOptions are additional options for Bigtable external data sources. +type BigtableOptions struct { + // A list of column families to expose in the table schema along with their + // types. If omitted, all column families are present in the table schema and + // their values are read as BYTES. + ColumnFamilies []*BigtableColumnFamily + + // If true, then the column families that are not specified in columnFamilies + // list are not exposed in the table schema. Otherwise, they are read with BYTES + // type values. The default is false. + IgnoreUnspecifiedColumnFamilies bool + + // If true, then the rowkey column families will be read and converted to string. + // Otherwise they are read with BYTES type values and users need to manually cast + // them with CAST if necessary. The default is false. + ReadRowkeyAsString bool +} + +func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { + q := &bq.BigtableOptions{ + IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies, + ReadRowkeyAsString: o.ReadRowkeyAsString, + } + for _, f := range o.ColumnFamilies { + q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ()) + } + c.BigtableOptions = q +} + +func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) { + b := &BigtableOptions{ + IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies, + ReadRowkeyAsString: q.ReadRowkeyAsString, + } + for _, f := range q.ColumnFamilies { + f2, err := bqToBigtableColumnFamily(f) + if err != nil { + return nil, err + } + b.ColumnFamilies = append(b.ColumnFamilies, f2) + } + return b, nil +} + +// BigtableColumnFamily describes how BigQuery should access a Bigtable column family. +type BigtableColumnFamily struct { + // Identifier of the column family. + FamilyID string + + // Lists of columns that should be exposed as individual fields as opposed to a + // list of (column name, value) pairs. All columns whose qualifier matches a + // qualifier in this list can be accessed as .. Other columns can be accessed as + // a list through .Column field. + Columns []*BigtableColumn + + // The encoding of the values when the type is not STRING. Acceptable encoding values are: + // - TEXT - indicates values are alphanumeric text strings. + // - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. + // This can be overridden for a specific column by listing that column in 'columns' and + // specifying an encoding for it. + Encoding string + + // If true, only the latest version of values are exposed for all columns in this + // column family. This can be overridden for a specific column by listing that + // column in 'columns' and specifying a different setting for that column. + OnlyReadLatest bool + + // The type to convert the value in cells of this + // column family. The values are expected to be encoded using HBase + // Bytes.toBytes function when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive): + // BYTES STRING INTEGER FLOAT BOOLEAN. + // The default type is BYTES. This can be overridden for a specific column by + // listing that column in 'columns' and specifying a type for it. + Type string +} + +func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily { + q := &bq.BigtableColumnFamily{ + FamilyId: b.FamilyID, + Encoding: b.Encoding, + OnlyReadLatest: b.OnlyReadLatest, + Type: b.Type, + } + for _, col := range b.Columns { + q.Columns = append(q.Columns, col.toBQ()) + } + return q +} + +func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) { + b := &BigtableColumnFamily{ + FamilyID: q.FamilyId, + Encoding: q.Encoding, + OnlyReadLatest: q.OnlyReadLatest, + Type: q.Type, + } + for _, col := range q.Columns { + c, err := bqToBigtableColumn(col) + if err != nil { + return nil, err + } + b.Columns = append(b.Columns, c) + } + return b, nil +} + +// BigtableColumn describes how BigQuery should access a Bigtable column. +type BigtableColumn struct { + // Qualifier of the column. Columns in the parent column family that have this + // exact qualifier are exposed as . field. The column field name is the + // same as the column qualifier. + Qualifier string + + // If the qualifier is not a valid BigQuery field identifier i.e. does not match + // [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field + // name and is used as field name in queries. + FieldName string + + // If true, only the latest version of values are exposed for this column. + // See BigtableColumnFamily.OnlyReadLatest. + OnlyReadLatest bool + + // The encoding of the values when the type is not STRING. + // See BigtableColumnFamily.Encoding + Encoding string + + // The type to convert the value in cells of this column. + // See BigtableColumnFamily.Type + Type string +} + +func (b *BigtableColumn) toBQ() *bq.BigtableColumn { + q := &bq.BigtableColumn{ + FieldName: b.FieldName, + OnlyReadLatest: b.OnlyReadLatest, + Encoding: b.Encoding, + Type: b.Type, + } + if utf8.ValidString(b.Qualifier) { + q.QualifierString = b.Qualifier + } else { + q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier)) + } + return q +} + +func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) { + b := &BigtableColumn{ + FieldName: q.FieldName, + OnlyReadLatest: q.OnlyReadLatest, + Encoding: q.Encoding, + Type: q.Type, + } + if q.QualifierString != "" { + b.Qualifier = q.QualifierString + } else { + bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded) + if err != nil { + return nil, err + } + b.Qualifier = string(bytes) + } + return b, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/external_test.go b/vendor/cloud.google.com/go/bigquery/external_test.go new file mode 100644 index 0000000..54aae2d --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/external_test.go @@ -0,0 +1,143 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" +) + +func TestExternalDataConfig(t *testing.T) { + // Round-trip of ExternalDataConfig to underlying representation. + for i, want := range []*ExternalDataConfig{ + { + SourceFormat: CSV, + SourceURIs: []string{"uri"}, + Schema: Schema{{Name: "n", Type: IntegerFieldType}}, + AutoDetect: true, + Compression: Gzip, + IgnoreUnknownValues: true, + MaxBadRecords: 17, + Options: &CSVOptions{ + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: UTF_8, + FieldDelimiter: "f", + Quote: "q", + SkipLeadingRows: 3, + }, + }, + { + SourceFormat: GoogleSheets, + Options: &GoogleSheetsOptions{SkipLeadingRows: 4}, + }, + { + SourceFormat: Bigtable, + Options: &BigtableOptions{ + IgnoreUnspecifiedColumnFamilies: true, + ReadRowkeyAsString: true, + ColumnFamilies: []*BigtableColumnFamily{ + { + FamilyID: "f1", + Encoding: "TEXT", + OnlyReadLatest: true, + Type: "FLOAT", + Columns: []*BigtableColumn{ + { + Qualifier: "valid-utf-8", + FieldName: "fn", + OnlyReadLatest: true, + Encoding: "BINARY", + Type: "STRING", + }, + }, + }, + }, + }, + }, + } { + q := want.toBQ() + got, err := bqToExternalDataConfig(&q) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("#%d: got=-, want=+:\n%s", i, diff) + } + } +} + +func TestQuote(t *testing.T) { + ptr := func(s string) *string { return &s } + + for _, test := range []struct { + quote string + force bool + want *string + }{ + {"", false, nil}, + {"", true, ptr("")}, + {"-", false, ptr("-")}, + {"-", true, ptr("")}, + } { + o := CSVOptions{ + Quote: test.quote, + ForceZeroQuote: test.force, + } + got := o.quote() + if (got == nil) != (test.want == nil) { + t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want)) + } + if got != nil && test.want != nil && *got != *test.want { + t.Errorf("%+v: got %q, want %q", test, *got, *test.want) + } + } +} + +func TestQualifier(t *testing.T) { + b := BigtableColumn{Qualifier: "a"} + q := b.toBQ() + if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" { + t.Errorf("got (%q, %q), want (%q, %q)", + q.QualifierString, q.QualifierEncoded, b.Qualifier, "") + } + b2, err := bqToBigtableColumn(q) + if err != nil { + t.Fatal(err) + } + if got, want := b2.Qualifier, b.Qualifier; got != want { + t.Errorf("got %q, want %q", got, want) + } + + const ( + invalidUTF8 = "\xDF\xFF" + invalidEncoded = "3/8" + ) + b = BigtableColumn{Qualifier: invalidUTF8} + q = b.toBQ() + if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded { + t.Errorf("got (%q, %q), want (%q, %q)", + q.QualifierString, "", b.Qualifier, invalidEncoded) + } + b2, err = bqToBigtableColumn(q) + if err != nil { + t.Fatal(err) + } + if got, want := b2.Qualifier, b.Qualifier; got != want { + t.Errorf("got %q, want %q", got, want) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/extract.go b/vendor/cloud.google.com/go/bigquery/extract.go new file mode 100644 index 0000000..0ffe91a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/extract.go @@ -0,0 +1,109 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// ExtractConfig holds the configuration for an extract job. +type ExtractConfig struct { + // Src is the table from which data will be extracted. + Src *Table + + // Dst is the destination into which the data will be extracted. + Dst *GCSReference + + // DisableHeader disables the printing of a header row in exported data. + DisableHeader bool + + // The labels associated with this job. + Labels map[string]string +} + +func (e *ExtractConfig) toBQ() *bq.JobConfiguration { + var printHeader *bool + if e.DisableHeader { + f := false + printHeader = &f + } + return &bq.JobConfiguration{ + Labels: e.Labels, + Extract: &bq.JobConfigurationExtract{ + DestinationUris: append([]string{}, e.Dst.URIs...), + Compression: string(e.Dst.Compression), + DestinationFormat: string(e.Dst.DestinationFormat), + FieldDelimiter: e.Dst.FieldDelimiter, + SourceTable: e.Src.toBQ(), + PrintHeader: printHeader, + }, + } +} + +func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig { + qe := q.Extract + return &ExtractConfig{ + Labels: q.Labels, + Dst: &GCSReference{ + URIs: qe.DestinationUris, + Compression: Compression(qe.Compression), + DestinationFormat: DataFormat(qe.DestinationFormat), + FileConfig: FileConfig{ + CSVOptions: CSVOptions{ + FieldDelimiter: qe.FieldDelimiter, + }, + }, + }, + DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader, + Src: bqToTable(qe.SourceTable, c), + } +} + +// An Extractor extracts data from a BigQuery table into Google Cloud Storage. +type Extractor struct { + JobIDConfig + ExtractConfig + c *Client +} + +// ExtractorTo returns an Extractor which can be used to extract data from a +// BigQuery table into Google Cloud Storage. +// The returned Extractor may optionally be further configured before its Run method is called. +func (t *Table) ExtractorTo(dst *GCSReference) *Extractor { + return &Extractor{ + c: t.c, + ExtractConfig: ExtractConfig{ + Src: t, + Dst: dst, + }, + } +} + +// Run initiates an extract job. +func (e *Extractor) Run(ctx context.Context) (j *Job, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Extractor.Run") + defer func() { trace.EndSpan(ctx, err) }() + + return e.c.insertJob(ctx, e.newJob(), nil) +} + +func (e *Extractor) newJob() *bq.Job { + return &bq.Job{ + JobReference: e.JobIDConfig.createJobRef(e.c), + Configuration: e.ExtractConfig.toBQ(), + } +} diff --git a/vendor/cloud.google.com/go/bigquery/extract_test.go b/vendor/cloud.google.com/go/bigquery/extract_test.go new file mode 100644 index 0000000..95d5b79 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/extract_test.go @@ -0,0 +1,118 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultExtractJob() *bq.Job { + return &bq.Job{ + JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, + Configuration: &bq.JobConfiguration{ + Extract: &bq.JobConfigurationExtract{ + SourceTable: &bq.TableReference{ + ProjectId: "client-project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + DestinationUris: []string{"uri"}, + }, + }, + } +} + +func defaultGCS() *GCSReference { + return &GCSReference{ + URIs: []string{"uri"}, + } +} + +func TestExtract(t *testing.T) { + defer fixRandomID("RANDOM")() + c := &Client{ + projectID: "client-project-id", + } + + testCases := []struct { + dst *GCSReference + src *Table + config ExtractConfig + want *bq.Job + }{ + { + dst: defaultGCS(), + src: c.Dataset("dataset-id").Table("table-id"), + want: defaultExtractJob(), + }, + { + dst: defaultGCS(), + src: c.Dataset("dataset-id").Table("table-id"), + config: ExtractConfig{ + DisableHeader: true, + Labels: map[string]string{"a": "b"}, + }, + want: func() *bq.Job { + j := defaultExtractJob() + j.Configuration.Labels = map[string]string{"a": "b"} + f := false + j.Configuration.Extract.PrintHeader = &f + return j + }(), + }, + { + dst: func() *GCSReference { + g := NewGCSReference("uri") + g.Compression = Gzip + g.DestinationFormat = JSON + g.FieldDelimiter = "\t" + return g + }(), + src: c.Dataset("dataset-id").Table("table-id"), + want: func() *bq.Job { + j := defaultExtractJob() + j.Configuration.Extract.Compression = "GZIP" + j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Extract.FieldDelimiter = "\t" + return j + }(), + }, + } + + for i, tc := range testCases { + ext := tc.src.ExtractorTo(tc.dst) + tc.config.Src = ext.Src + tc.config.Dst = ext.Dst + ext.ExtractConfig = tc.config + got := ext.newJob() + checkJob(t, i, got, tc.want) + + jc, err := bqToJobConfig(got.Configuration, c) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + diff := testutil.Diff(jc, &ext.ExtractConfig, + cmp.AllowUnexported(Table{}, Client{})) + if diff != "" { + t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/file.go b/vendor/cloud.google.com/go/bigquery/file.go new file mode 100644 index 0000000..c44c902 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/file.go @@ -0,0 +1,135 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "io" + + bq "google.golang.org/api/bigquery/v2" +) + +// A ReaderSource is a source for a load operation that gets +// data from an io.Reader. +// +// When a ReaderSource is part of a LoadConfig obtained via Job.Config, +// its internal io.Reader will be nil, so it cannot be used for a +// subsequent load operation. +type ReaderSource struct { + r io.Reader + FileConfig +} + +// NewReaderSource creates a ReaderSource from an io.Reader. You may +// optionally configure properties on the ReaderSource that describe the +// data being read, before passing it to Table.LoaderFrom. +func NewReaderSource(r io.Reader) *ReaderSource { + return &ReaderSource{r: r} +} + +func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { + r.FileConfig.populateLoadConfig(lc) + return r.r +} + +// FileConfig contains configuration options that pertain to files, typically +// text files that require interpretation to be used as a BigQuery table. A +// file may live in Google Cloud Storage (see GCSReference), or it may be +// loaded into a table via the Table.LoaderFromReader. +type FileConfig struct { + // SourceFormat is the format of the GCS data to be read. + // Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV. + SourceFormat DataFormat + + // Indicates if we should automatically infer the options and + // schema for CSV and JSON sources. + AutoDetect bool + + // MaxBadRecords is the maximum number of bad records that will be ignored + // when reading data. + MaxBadRecords int64 + + // IgnoreUnknownValues causes values not matching the schema to be + // tolerated. Unknown values are ignored. For CSV this ignores extra values + // at the end of a line. For JSON this ignores named values that do not + // match any column name. If this field is not set, records containing + // unknown values are treated as bad records. The MaxBadRecords field can + // be used to customize how bad records are handled. + IgnoreUnknownValues bool + + // Schema describes the data. It is required when reading CSV or JSON data, + // unless the data is being loaded into a table that already exists. + Schema Schema + + // Additional options for CSV files. + CSVOptions +} + +func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) { + conf.SkipLeadingRows = fc.SkipLeadingRows + conf.SourceFormat = string(fc.SourceFormat) + conf.Autodetect = fc.AutoDetect + conf.AllowJaggedRows = fc.AllowJaggedRows + conf.AllowQuotedNewlines = fc.AllowQuotedNewlines + conf.Encoding = string(fc.Encoding) + conf.FieldDelimiter = fc.FieldDelimiter + conf.IgnoreUnknownValues = fc.IgnoreUnknownValues + conf.MaxBadRecords = fc.MaxBadRecords + if fc.Schema != nil { + conf.Schema = fc.Schema.toBQ() + } + conf.Quote = fc.quote() +} + +func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) { + fc.SourceFormat = DataFormat(conf.SourceFormat) + fc.AutoDetect = conf.Autodetect + fc.MaxBadRecords = conf.MaxBadRecords + fc.IgnoreUnknownValues = conf.IgnoreUnknownValues + fc.Schema = bqToSchema(conf.Schema) + fc.SkipLeadingRows = conf.SkipLeadingRows + fc.AllowJaggedRows = conf.AllowJaggedRows + fc.AllowQuotedNewlines = conf.AllowQuotedNewlines + fc.Encoding = Encoding(conf.Encoding) + fc.FieldDelimiter = conf.FieldDelimiter + fc.CSVOptions.setQuote(conf.Quote) +} + +func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) { + format := fc.SourceFormat + if format == "" { + // Format must be explicitly set for external data sources. + format = CSV + } + conf.Autodetect = fc.AutoDetect + conf.IgnoreUnknownValues = fc.IgnoreUnknownValues + conf.MaxBadRecords = fc.MaxBadRecords + conf.SourceFormat = string(format) + if fc.Schema != nil { + conf.Schema = fc.Schema.toBQ() + } + if format == CSV { + fc.CSVOptions.populateExternalDataConfig(conf) + } +} + +// Encoding specifies the character encoding of data to be loaded into BigQuery. +// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding +// for more details about how this is used. +type Encoding string + +const ( + UTF_8 Encoding = "UTF-8" + ISO_8859_1 Encoding = "ISO-8859-1" +) diff --git a/vendor/cloud.google.com/go/bigquery/file_test.go b/vendor/cloud.google.com/go/bigquery/file_test.go new file mode 100644 index 0000000..ad24415 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/file_test.go @@ -0,0 +1,98 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + bq "google.golang.org/api/bigquery/v2" +) + +var ( + hyphen = "-" + fc = FileConfig{ + SourceFormat: CSV, + AutoDetect: true, + MaxBadRecords: 7, + IgnoreUnknownValues: true, + Schema: Schema{ + stringFieldSchema(), + nestedFieldSchema(), + }, + CSVOptions: CSVOptions{ + Quote: hyphen, + FieldDelimiter: "\t", + SkipLeadingRows: 8, + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: UTF_8, + }, + } +) + +func TestFileConfigPopulateLoadConfig(t *testing.T) { + want := &bq.JobConfigurationLoad{ + SourceFormat: "CSV", + FieldDelimiter: "\t", + SkipLeadingRows: 8, + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Autodetect: true, + Encoding: "UTF-8", + MaxBadRecords: 7, + IgnoreUnknownValues: true, + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }}, + Quote: &hyphen, + } + got := &bq.JobConfigurationLoad{} + fc.populateLoadConfig(got) + if !testutil.Equal(got, want) { + t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want)) + } +} + +func TestFileConfigPopulateExternalDataConfig(t *testing.T) { + got := &bq.ExternalDataConfiguration{} + fc.populateExternalDataConfig(got) + + want := &bq.ExternalDataConfiguration{ + SourceFormat: "CSV", + Autodetect: true, + MaxBadRecords: 7, + IgnoreUnknownValues: true, + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }}, + CsvOptions: &bq.CsvOptions{ + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: "UTF-8", + FieldDelimiter: "\t", + Quote: &hyphen, + SkipLeadingRows: 8, + }, + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/gcs.go b/vendor/cloud.google.com/go/bigquery/gcs.go new file mode 100644 index 0000000..6936b4f --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/gcs.go @@ -0,0 +1,73 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "io" + + bq "google.golang.org/api/bigquery/v2" +) + +// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute +// an input or output to a BigQuery operation. +type GCSReference struct { + // URIs refer to Google Cloud Storage objects. + URIs []string + + FileConfig + + // DestinationFormat is the format to use when writing exported files. + // Allowed values are: CSV, Avro, JSON. The default is CSV. + // CSV is not supported for tables with nested or repeated fields. + DestinationFormat DataFormat + + // Compression specifies the type of compression to apply when writing data + // to Google Cloud Storage, or using this GCSReference as an ExternalData + // source with CSV or JSON SourceFormat. Default is None. + Compression Compression +} + +// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. +// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. +// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. +// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. +// For more information about the treatment of wildcards and multiple URIs, +// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple +func NewGCSReference(uri ...string) *GCSReference { + return &GCSReference{URIs: uri} +} + +// Compression is the type of compression to apply when writing data to Google Cloud Storage. +type Compression string + +const ( + None Compression = "NONE" + Gzip Compression = "GZIP" +) + +func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { + lc.SourceUris = gcs.URIs + gcs.FileConfig.populateLoadConfig(lc) + return nil +} + +func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration { + conf := bq.ExternalDataConfiguration{ + Compression: string(gcs.Compression), + SourceUris: append([]string{}, gcs.URIs...), + } + gcs.FileConfig.populateExternalDataConfig(&conf) + return conf +} diff --git a/vendor/cloud.google.com/go/bigquery/integration_test.go b/vendor/cloud.google.com/go/bigquery/integration_test.go new file mode 100644 index 0000000..622998e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/integration_test.go @@ -0,0 +1,1858 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "flag" + "fmt" + "log" + "net/http" + "os" + "sort" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + gax "github.com/googleapis/gax-go" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var ( + client *Client + storageClient *storage.Client + dataset *Dataset + schema = Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "nums", Type: IntegerFieldType, Repeated: true}, + {Name: "rec", Type: RecordFieldType, Schema: Schema{ + {Name: "bool", Type: BooleanFieldType}, + }}, + } + testTableExpiration time.Time + // BigQuery does not accept hyphens in dataset or table IDs, so we create IDs + // with underscores. + datasetIDs = testutil.NewUIDSpaceSep("dataset", '_') + tableIDs = testutil.NewUIDSpaceSep("table", '_') +) + +// Note: integration tests cannot be run in parallel, because TestIntegration_Location +// modifies the client. + +func TestMain(m *testing.M) { + cleanup := initIntegrationTest() + r := m.Run() + cleanup() + os.Exit(r) +} + +func getClient(t *testing.T) *Client { + if client == nil { + t.Skip("Integration tests skipped") + } + return client +} + +// If integration tests will be run, create a unique bucket for them. +func initIntegrationTest() func() { + flag.Parse() // needed for testing.Short() + if testing.Short() { + return func() {} + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + log.Println("Integration tests skipped. See CONTRIBUTING.md for details") + return func() {} + } + projID := testutil.ProjID() + var err error + client, err = NewClient(ctx, projID, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + storageClient, err = storage.NewClient(ctx, + option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl))) + if err != nil { + log.Fatalf("storage.NewClient: %v", err) + } + dataset = client.Dataset(datasetIDs.New()) + if err := dataset.Create(ctx, nil); err != nil { + log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err) + } + testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second) + return func() { + if err := deleteDataset(ctx, dataset); err != nil { + log.Printf("could not delete %s", dataset.DatasetID) + } + } +} + +func deleteDataset(ctx context.Context, ds *Dataset) error { + it := ds.Tables(ctx) + for { + tbl, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + if err := tbl.Delete(ctx); err != nil { + return err + } + } + return ds.Delete(ctx) +} +func TestIntegration_TableCreate(t *testing.T) { + // Check that creating a record field with an empty schema is an error. + if client == nil { + t.Skip("Integration tests skipped") + } + table := dataset.Table("t_bad") + schema := Schema{ + {Name: "rec", Type: RecordFieldType, Schema: Schema{}}, + } + err := table.Create(context.Background(), &TableMetadata{ + Schema: schema, + ExpirationTime: time.Now().Add(5 * time.Minute), + }) + if err == nil { + t.Fatal("want error, got nil") + } + if !hasStatusCode(err, http.StatusBadRequest) { + t.Fatalf("want a 400 error, got %v", err) + } +} + +func TestIntegration_TableCreateView(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Test that standard SQL views work. + view := dataset.Table("t_view_standardsql") + query := fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`", + dataset.ProjectID, dataset.DatasetID, table.TableID) + err := view.Create(context.Background(), &TableMetadata{ + ViewQuery: query, + UseStandardSQL: true, + }) + if err != nil { + t.Fatalf("table.create: Did not expect an error, got: %v", err) + } + view.Delete(ctx) +} + +func TestIntegration_TableMetadata(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + // Check table metadata. + md, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + // TODO(jba): check md more thorougly. + if got, want := md.FullID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want { + t.Errorf("metadata.FullID: got %q, want %q", got, want) + } + if got, want := md.Type, RegularTable; got != want { + t.Errorf("metadata.Type: got %v, want %v", got, want) + } + if got, want := md.ExpirationTime, testTableExpiration; !got.Equal(want) { + t.Errorf("metadata.Type: got %v, want %v", got, want) + } + + // Check that timePartitioning is nil by default + if md.TimePartitioning != nil { + t.Errorf("metadata.TimePartitioning: got %v, want %v", md.TimePartitioning, nil) + } + + // Create tables that have time partitioning + partitionCases := []struct { + timePartitioning TimePartitioning + wantExpiration time.Duration + wantField string + }{ + {TimePartitioning{}, time.Duration(0), ""}, + {TimePartitioning{Expiration: time.Second}, time.Second, ""}, + { + TimePartitioning{ + Expiration: time.Second, + Field: "date", + }, time.Second, "date"}, + } + + schema2 := Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "date", Type: DateFieldType}, + } + + for i, c := range partitionCases { + table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i)) + err = table.Create(context.Background(), &TableMetadata{ + Schema: schema2, + TimePartitioning: &c.timePartitioning, + ExpirationTime: time.Now().Add(5 * time.Minute), + }) + if err != nil { + t.Fatal(err) + } + defer table.Delete(ctx) + md, err = table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + + got := md.TimePartitioning + want := &TimePartitioning{ + Expiration: c.wantExpiration, + Field: c.wantField, + } + if !testutil.Equal(got, want) { + t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want) + } + } +} + +func TestIntegration_DatasetCreate(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + ds := client.Dataset(datasetIDs.New()) + wmd := &DatasetMetadata{Name: "name", Location: "EU"} + err := ds.Create(ctx, wmd) + if err != nil { + t.Fatal(err) + } + gmd, err := ds.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if got, want := gmd.Name, wmd.Name; got != want { + t.Errorf("name: got %q, want %q", got, want) + } + if got, want := gmd.Location, wmd.Location; got != want { + t.Errorf("location: got %q, want %q", got, want) + } + if err := ds.Delete(ctx); err != nil { + t.Fatalf("deleting dataset %v: %v", ds, err) + } +} + +func TestIntegration_DatasetMetadata(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if got, want := md.FullID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want { + t.Errorf("FullID: got %q, want %q", got, want) + } + jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC) + if md.CreationTime.Before(jan2016) { + t.Errorf("CreationTime: got %s, want > 2016-1-1", md.CreationTime) + } + if md.LastModifiedTime.Before(jan2016) { + t.Errorf("LastModifiedTime: got %s, want > 2016-1-1", md.LastModifiedTime) + } + + // Verify that we get a NotFound for a nonexistent dataset. + _, err = client.Dataset("does_not_exist").Metadata(ctx) + if err == nil || !hasStatusCode(err, http.StatusNotFound) { + t.Errorf("got %v, want NotFound error", err) + } +} + +func TestIntegration_DatasetDelete(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + ds := client.Dataset(datasetIDs.New()) + if err := ds.Create(ctx, nil); err != nil { + t.Fatalf("creating dataset %s: %v", ds.DatasetID, err) + } + if err := ds.Delete(ctx); err != nil { + t.Fatalf("deleting dataset %s: %v", ds.DatasetID, err) + } +} + +func TestIntegration_DatasetUpdateETags(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + + check := func(md *DatasetMetadata, wantDesc, wantName string) { + if md.Description != wantDesc { + t.Errorf("description: got %q, want %q", md.Description, wantDesc) + } + if md.Name != wantName { + t.Errorf("name: got %q, want %q", md.Name, wantName) + } + } + + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if md.ETag == "" { + t.Fatal("empty ETag") + } + // Write without ETag succeeds. + desc := md.Description + "d2" + name := md.Name + "n2" + md2, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: desc, Name: name}, "") + if err != nil { + t.Fatal(err) + } + check(md2, desc, name) + + // Write with original ETag fails because of intervening write. + _, err = dataset.Update(ctx, DatasetMetadataToUpdate{Description: "d", Name: "n"}, md.ETag) + if err == nil { + t.Fatal("got nil, want error") + } + + // Write with most recent ETag succeeds. + md3, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: "", Name: ""}, md2.ETag) + if err != nil { + t.Fatal(err) + } + check(md3, "", "") +} + +func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + // Set the default expiration time. + md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "") + if err != nil { + t.Fatal(err) + } + if md.DefaultTableExpiration != time.Hour { + t.Fatalf("got %s, want 1h", md.DefaultTableExpiration) + } + // Omitting DefaultTableExpiration doesn't change it. + md, err = dataset.Update(ctx, DatasetMetadataToUpdate{Name: "xyz"}, "") + if err != nil { + t.Fatal(err) + } + if md.DefaultTableExpiration != time.Hour { + t.Fatalf("got %s, want 1h", md.DefaultTableExpiration) + } + // Setting it to 0 deletes it (which looks like a 0 duration). + md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Duration(0)}, "") + if err != nil { + t.Fatal(err) + } + if md.DefaultTableExpiration != 0 { + t.Fatalf("got %s, want 0", md.DefaultTableExpiration) + } +} + +func TestIntegration_DatasetUpdateAccess(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + origAccess := append([]*AccessEntry(nil), md.Access...) + newEntry := &AccessEntry{ + Role: ReaderRole, + Entity: "Joe@example.com", + EntityType: UserEmailEntity, + } + newAccess := append(md.Access, newEntry) + dm := DatasetMetadataToUpdate{Access: newAccess} + md, err = dataset.Update(ctx, dm, md.ETag) + if err != nil { + t.Fatal(err) + } + defer func() { + _, err := dataset.Update(ctx, DatasetMetadataToUpdate{Access: origAccess}, md.ETag) + if err != nil { + t.Log("could not restore dataset access list") + } + }() + if diff := testutil.Diff(md.Access, newAccess); diff != "" { + t.Fatalf("got=-, want=+:\n%s", diff) + } +} + +func TestIntegration_DatasetUpdateLabels(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + var dm DatasetMetadataToUpdate + dm.SetLabel("label", "value") + md, err = dataset.Update(ctx, dm, "") + if err != nil { + t.Fatal(err) + } + if got, want := md.Labels["label"], "value"; got != want { + t.Errorf("got %q, want %q", got, want) + } + dm = DatasetMetadataToUpdate{} + dm.DeleteLabel("label") + md, err = dataset.Update(ctx, dm, "") + if err != nil { + t.Fatal(err) + } + if _, ok := md.Labels["label"]; ok { + t.Error("label still present after deletion") + } +} + +func TestIntegration_TableUpdateLabels(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + var tm TableMetadataToUpdate + tm.SetLabel("label", "value") + md, err := table.Update(ctx, tm, "") + if err != nil { + t.Fatal(err) + } + if got, want := md.Labels["label"], "value"; got != want { + t.Errorf("got %q, want %q", got, want) + } + tm = TableMetadataToUpdate{} + tm.DeleteLabel("label") + md, err = table.Update(ctx, tm, "") + if err != nil { + t.Fatal(err) + } + if _, ok := md.Labels["label"]; ok { + t.Error("label still present after deletion") + } +} + +func TestIntegration_Tables(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + wantName := table.FullyQualifiedName() + + // This test is flaky due to eventual consistency. + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + // Iterate over tables in the dataset. + it := dataset.Tables(ctx) + var tableNames []string + for { + tbl, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return false, err + } + tableNames = append(tableNames, tbl.FullyQualifiedName()) + } + // Other tests may be running with this dataset, so there might be more + // than just our table in the list. So don't try for an exact match; just + // make sure that our table is there somewhere. + for _, tn := range tableNames { + if tn == wantName { + return true, nil + } + } + return false, fmt.Errorf("got %v\nwant %s in the list", tableNames, wantName) + }) + if err != nil { + t.Fatal(err) + } +} + +func TestIntegration_UploadAndRead(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Populate the table. + upl := table.Uploader() + var ( + wantRows [][]Value + saverRows []*ValuesSaver + ) + for i, name := range []string{"a", "b", "c"} { + row := []Value{name, []Value{int64(i)}, []Value{true}} + wantRows = append(wantRows, row) + saverRows = append(saverRows, &ValuesSaver{ + Schema: schema, + InsertID: name, + Row: row, + }) + } + if err := upl.Put(ctx, saverRows); err != nil { + t.Fatal(putError(err)) + } + + // Wait until the data has been uploaded. This can take a few seconds, according + // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // Read the table. + checkRead(t, "upload", table.Read(ctx), wantRows) + + // Query the table. + q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID)) + q.DefaultProjectID = dataset.ProjectID + q.DefaultDatasetID = dataset.DatasetID + + rit, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "query", rit, wantRows) + + // Query the long way. + job1, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + if job1.LastStatus() == nil { + t.Error("no LastStatus") + } + job2, err := client.JobFromID(ctx, job1.ID()) + if err != nil { + t.Fatal(err) + } + if job2.LastStatus() == nil { + t.Error("no LastStatus") + } + rit, err = job2.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "job.Read", rit, wantRows) + + // Get statistics. + jobStatus, err := job2.Status(ctx) + if err != nil { + t.Fatal(err) + } + if jobStatus.Statistics == nil { + t.Fatal("jobStatus missing statistics") + } + if _, ok := jobStatus.Statistics.Details.(*QueryStatistics); !ok { + t.Errorf("expected QueryStatistics, got %T", jobStatus.Statistics.Details) + } + + // Test reading directly into a []Value. + valueLists, schema, _, err := readAll(table.Read(ctx)) + if err != nil { + t.Fatal(err) + } + it := table.Read(ctx) + for i, vl := range valueLists { + var got []Value + if err := it.Next(&got); err != nil { + t.Fatal(err) + } + if !testutil.Equal(it.Schema, schema) { + t.Fatalf("got schema %v, want %v", it.Schema, schema) + } + want := []Value(vl) + if !testutil.Equal(got, want) { + t.Errorf("%d: got %v, want %v", i, got, want) + } + } + + // Test reading into a map. + it = table.Read(ctx) + for _, vl := range valueLists { + var vm map[string]Value + if err := it.Next(&vm); err != nil { + t.Fatal(err) + } + if got, want := len(vm), len(vl); got != want { + t.Fatalf("valueMap len: got %d, want %d", got, want) + } + // With maps, structs become nested maps. + vl[2] = map[string]Value{"bool": vl[2].([]Value)[0]} + for i, v := range vl { + if got, want := vm[schema[i].Name], v; !testutil.Equal(got, want) { + t.Errorf("%d, name=%s: got %#v, want %#v", + i, schema[i].Name, got, want) + } + } + } +} + +type SubSubTestStruct struct { + Integer int64 +} + +type SubTestStruct struct { + String string + Record SubSubTestStruct + RecordArray []SubSubTestStruct +} + +type TestStruct struct { + Name string + Bytes []byte + Integer int64 + Float float64 + Boolean bool + Timestamp time.Time + Date civil.Date + Time civil.Time + DateTime civil.DateTime + + StringArray []string + IntegerArray []int64 + FloatArray []float64 + BooleanArray []bool + TimestampArray []time.Time + DateArray []civil.Date + TimeArray []civil.Time + DateTimeArray []civil.DateTime + + Record SubTestStruct + RecordArray []SubTestStruct +} + +// Round times to the microsecond for comparison purposes. +var roundToMicros = cmp.Transformer("RoundToMicros", + func(t time.Time) time.Time { return t.Round(time.Microsecond) }) + +func TestIntegration_UploadAndReadStructs(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + schema, err := InferSchema(TestStruct{}) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + d := civil.Date{Year: 2016, Month: 3, Day: 20} + tm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000} + ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC) + dtm := civil.DateTime{Date: d, Time: tm} + d2 := civil.Date{Year: 1994, Month: 5, Day: 15} + tm2 := civil.Time{Hour: 1, Minute: 2, Second: 4, Nanosecond: 0} + ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC) + dtm2 := civil.DateTime{Date: d2, Time: tm2} + + // Populate the table. + upl := table.Uploader() + want := []*TestStruct{ + { + "a", + []byte("byte"), + 42, + 3.14, + true, + ts, + d, + tm, + dtm, + []string{"a", "b"}, + []int64{1, 2}, + []float64{1, 1.41}, + []bool{true, false}, + []time.Time{ts, ts2}, + []civil.Date{d, d2}, + []civil.Time{tm, tm2}, + []civil.DateTime{dtm, dtm2}, + SubTestStruct{ + "string", + SubSubTestStruct{24}, + []SubSubTestStruct{{1}, {2}}, + }, + []SubTestStruct{ + {String: "empty"}, + { + "full", + SubSubTestStruct{1}, + []SubSubTestStruct{{1}, {2}}, + }, + }, + }, + { + Name: "b", + Bytes: []byte("byte2"), + Integer: 24, + Float: 4.13, + Boolean: false, + Timestamp: ts, + Date: d, + Time: tm, + DateTime: dtm, + }, + } + var savers []*StructSaver + for _, s := range want { + savers = append(savers, &StructSaver{Schema: schema, Struct: s}) + } + if err := upl.Put(ctx, savers); err != nil { + t.Fatal(putError(err)) + } + + // Wait until the data has been uploaded. This can take a few seconds, according + // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // Test iteration with structs. + it := table.Read(ctx) + var got []*TestStruct + for { + var g TestStruct + err := it.Next(&g) + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + got = append(got, &g) + } + sort.Sort(byName(got)) + + // BigQuery does not elide nils. It reports an error for nil fields. + for i, g := range got { + if i >= len(want) { + t.Errorf("%d: got %v, past end of want", i, pretty.Value(g)) + } else if diff := testutil.Diff(g, want[i], roundToMicros); diff != "" { + t.Errorf("%d: got=-, want=+:\n%s", i, diff) + } + } +} + +type byName []*TestStruct + +func (b byName) Len() int { return len(b) } +func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +func TestIntegration_UploadAndReadNullable(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000} + cdt := civil.DateTime{Date: testDate, Time: ctm} + testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema))) + testUploadAndReadNullable(t, testStructNullable{ + String: NullString{"x", true}, + Bytes: []byte{1, 2, 3}, + Integer: NullInt64{1, true}, + Float: NullFloat64{2.3, true}, + Boolean: NullBool{true, true}, + Timestamp: NullTimestamp{testTimestamp, true}, + Date: NullDate{testDate, true}, + Time: NullTime{ctm, true}, + DateTime: NullDateTime{cdt, true}, + Record: &subNullable{X: NullInt64{4, true}}, + }, + []Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, ctm, cdt, []Value{int64(4)}}) +} + +func testUploadAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) { + ctx := context.Background() + table := newTable(t, testStructNullableSchema) + defer table.Delete(ctx) + + // Populate the table. + upl := table.Uploader() + if err := upl.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil { + t.Fatal(putError(err)) + } + // Wait until the data has been uploaded. This can take a few seconds, according + // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // Read into a []Value. + iter := table.Read(ctx) + gotRows, _, _, err := readAll(iter) + if err != nil { + t.Fatal(err) + } + if len(gotRows) != 1 { + t.Fatalf("got %d rows, want 1", len(gotRows)) + } + if diff := testutil.Diff(gotRows[0], wantRow, roundToMicros); diff != "" { + t.Error(diff) + } + + // Read into a struct. + want := ts + var sn testStructNullable + it := table.Read(ctx) + if err := it.Next(&sn); err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(sn, want, roundToMicros); diff != "" { + t.Error(diff) + } +} + +func TestIntegration_TableUpdate(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Test Update of non-schema fields. + tm, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + wantDescription := tm.Description + "more" + wantName := tm.Name + "more" + wantExpiration := tm.ExpirationTime.Add(time.Hour * 24) + got, err := table.Update(ctx, TableMetadataToUpdate{ + Description: wantDescription, + Name: wantName, + ExpirationTime: wantExpiration, + }, tm.ETag) + if err != nil { + t.Fatal(err) + } + if got.Description != wantDescription { + t.Errorf("Description: got %q, want %q", got.Description, wantDescription) + } + if got.Name != wantName { + t.Errorf("Name: got %q, want %q", got.Name, wantName) + } + if got.ExpirationTime != wantExpiration { + t.Errorf("ExpirationTime: got %q, want %q", got.ExpirationTime, wantExpiration) + } + if !testutil.Equal(got.Schema, schema) { + t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema)) + } + + // Blind write succeeds. + _, err = table.Update(ctx, TableMetadataToUpdate{Name: "x"}, "") + if err != nil { + t.Fatal(err) + } + // Write with old etag fails. + _, err = table.Update(ctx, TableMetadataToUpdate{Name: "y"}, got.ETag) + if err == nil { + t.Fatal("Update with old ETag succeeded, wanted failure") + } + + // Test schema update. + // Columns can be added. schema2 is the same as schema, except for the + // added column in the middle. + nested := Schema{ + {Name: "nested", Type: BooleanFieldType}, + {Name: "other", Type: StringFieldType}, + } + schema2 := Schema{ + schema[0], + {Name: "rec2", Type: RecordFieldType, Schema: nested}, + schema[1], + schema[2], + } + + got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}, "") + if err != nil { + t.Fatal(err) + } + + // Wherever you add the column, it appears at the end. + schema3 := Schema{schema2[0], schema2[2], schema2[3], schema2[1]} + if !testutil.Equal(got.Schema, schema3) { + t.Errorf("add field:\ngot %v\nwant %v", + pretty.Value(got.Schema), pretty.Value(schema3)) + } + + // Updating with the empty schema succeeds, but is a no-op. + got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}, "") + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got.Schema, schema3) { + t.Errorf("empty schema:\ngot %v\nwant %v", + pretty.Value(got.Schema), pretty.Value(schema3)) + } + + // Error cases when updating schema. + for _, test := range []struct { + desc string + fields Schema + }{ + {"change from optional to required", Schema{ + {Name: "name", Type: StringFieldType, Required: true}, + schema3[1], + schema3[2], + schema3[3], + }}, + {"add a required field", Schema{ + schema3[0], schema3[1], schema3[2], schema3[3], + {Name: "req", Type: StringFieldType, Required: true}, + }}, + {"remove a field", Schema{schema3[0], schema3[1], schema3[2]}}, + {"remove a nested field", Schema{ + schema3[0], schema3[1], schema3[2], + {Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}}, + {"remove all nested fields", Schema{ + schema3[0], schema3[1], schema3[2], + {Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}}, + } { + _, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}, "") + if err == nil { + t.Errorf("%s: want error, got nil", test.desc) + } else if !hasStatusCode(err, 400) { + t.Errorf("%s: want 400, got %v", test.desc, err) + } + } +} + +func TestIntegration_Load(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + // CSV data can't be loaded into a repeated field, so we use a different schema. + table := newTable(t, Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "nums", Type: IntegerFieldType}, + }) + defer table.Delete(ctx) + + // Load the table from a reader. + r := strings.NewReader("a,0\nb,1\nc,2\n") + wantRows := [][]Value{ + []Value{"a", int64(0)}, + []Value{"b", int64(1)}, + []Value{"c", int64(2)}, + } + rs := NewReaderSource(r) + loader := table.LoaderFrom(rs) + loader.WriteDisposition = WriteTruncate + loader.Labels = map[string]string{"test": "go"} + job, err := loader.Run(ctx) + if err != nil { + t.Fatal(err) + } + if job.LastStatus() == nil { + t.Error("no LastStatus") + } + conf, err := job.Config() + if err != nil { + t.Fatal(err) + } + config, ok := conf.(*LoadConfig) + if !ok { + t.Fatalf("got %T, want LoadConfig", conf) + } + diff := testutil.Diff(config, &loader.LoadConfig, + cmp.AllowUnexported(Table{}), + cmpopts.IgnoreUnexported(Client{}, ReaderSource{}), + // returned schema is at top level, not in the config + cmpopts.IgnoreFields(FileConfig{}, "Schema")) + if diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + checkReadAndTotalRows(t, "reader load", table.Read(ctx), wantRows) + +} + +func TestIntegration_DML(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + sql := fmt.Sprintf(`INSERT %s.%s (name, nums, rec) + VALUES ('a', [0], STRUCT(TRUE)), + ('b', [1], STRUCT(FALSE)), + ('c', [2], STRUCT(TRUE))`, + table.DatasetID, table.TableID) + if err := dmlInsert(ctx, sql); err != nil { + t.Fatal(err) + } + wantRows := [][]Value{ + []Value{"a", []Value{int64(0)}, []Value{true}}, + []Value{"b", []Value{int64(1)}, []Value{false}}, + []Value{"c", []Value{int64(2)}, []Value{true}}, + } + checkRead(t, "DML", table.Read(ctx), wantRows) +} + +func dmlInsert(ctx context.Context, sql string) error { + // Retry insert; sometimes it fails with INTERNAL. + return internal.Retry(ctx, gax.Backoff{}, func() (bool, error) { + // Use DML to insert. + q := client.Query(sql) + job, err := q.Run(ctx) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code < 500 { + return true, err // fail on 4xx + } + return false, err + } + if err := wait(ctx, job); err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code < 500 { + return true, err // fail on 4xx + } + return false, err + } + return true, nil + }) +} + +func TestIntegration_TimeTypes(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + dtSchema := Schema{ + {Name: "d", Type: DateFieldType}, + {Name: "t", Type: TimeFieldType}, + {Name: "dt", Type: DateTimeFieldType}, + {Name: "ts", Type: TimestampFieldType}, + } + table := newTable(t, dtSchema) + defer table.Delete(ctx) + + d := civil.Date{Year: 2016, Month: 3, Day: 20} + tm := civil.Time{Hour: 12, Minute: 30, Second: 0, Nanosecond: 6000} + dtm := civil.DateTime{Date: d, Time: tm} + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + wantRows := [][]Value{ + []Value{d, tm, dtm, ts}, + } + upl := table.Uploader() + if err := upl.Put(ctx, []*ValuesSaver{ + {Schema: dtSchema, Row: wantRows[0]}, + }); err != nil { + t.Fatal(putError(err)) + } + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // SQL wants DATETIMEs with a space between date and time, but the service + // returns them in RFC3339 form, with a "T" between. + query := fmt.Sprintf("INSERT %s.%s (d, t, dt, ts) "+ + "VALUES ('%s', '%s', '%s', '%s')", + table.DatasetID, table.TableID, + d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05")) + if err := dmlInsert(ctx, query); err != nil { + t.Fatal(err) + } + wantRows = append(wantRows, wantRows[0]) + checkRead(t, "TimeTypes", table.Read(ctx), wantRows) +} + +func TestIntegration_StandardQuery(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + + d := civil.Date{Year: 2016, Month: 3, Day: 20} + tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 0} + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + dtm := ts.Format("2006-01-02 15:04:05") + + // Constructs Value slices made up of int64s. + ints := func(args ...int) []Value { + vals := make([]Value, len(args)) + for i, arg := range args { + vals[i] = int64(arg) + } + return vals + } + + testCases := []struct { + query string + wantRow []Value + }{ + {"SELECT 1", ints(1)}, + {"SELECT 1.3", []Value{1.3}}, + {"SELECT TRUE", []Value{true}}, + {"SELECT 'ABC'", []Value{"ABC"}}, + {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}}, + {fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}}, + {fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}}, + {fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}}, + {fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{Date: d, Time: tm}}}, + {fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}}, + {fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}}, + {"SELECT (1, 2)", []Value{ints(1, 2)}}, + {"SELECT [1, 2, 3]", []Value{ints(1, 2, 3)}}, + {"SELECT ([1, 2], 3, [4, 5])", []Value{[]Value{ints(1, 2), int64(3), ints(4, 5)}}}, + {"SELECT [(1, 2, 3), (4, 5, 6)]", []Value{[]Value{ints(1, 2, 3), ints(4, 5, 6)}}}, + {"SELECT [([1, 2, 3], 4), ([5, 6], 7)]", []Value{[]Value{[]Value{ints(1, 2, 3), int64(4)}, []Value{ints(5, 6), int64(7)}}}}, + {"SELECT ARRAY(SELECT STRUCT([1, 2]))", []Value{[]Value{[]Value{ints(1, 2)}}}}, + } + for _, c := range testCases { + q := client.Query(c.query) + it, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "StandardQuery", it, [][]Value{c.wantRow}) + } +} + +func TestIntegration_LegacyQuery(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + dtm := ts.Format("2006-01-02 15:04:05") + + testCases := []struct { + query string + wantRow []Value + }{ + {"SELECT 1", []Value{int64(1)}}, + {"SELECT 1.3", []Value{1.3}}, + {"SELECT TRUE", []Value{true}}, + {"SELECT 'ABC'", []Value{"ABC"}}, + {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}}, + {fmt.Sprintf("SELECT TIMESTAMP('%s')", dtm), []Value{ts}}, + {fmt.Sprintf("SELECT DATE(TIMESTAMP('%s'))", dtm), []Value{"2016-03-20"}}, + {fmt.Sprintf("SELECT TIME(TIMESTAMP('%s'))", dtm), []Value{"15:04:05"}}, + } + for _, c := range testCases { + q := client.Query(c.query) + q.UseLegacySQL = true + it, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "LegacyQuery", it, [][]Value{c.wantRow}) + } +} + +func TestIntegration_QueryParameters(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + + d := civil.Date{Year: 2016, Month: 3, Day: 20} + tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 3008} + rtm := tm + rtm.Nanosecond = 3000 // round to microseconds + dtm := civil.DateTime{Date: d, Time: tm} + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + + type ss struct { + String string + } + + type s struct { + Timestamp time.Time + StringArray []string + SubStruct ss + SubStructArray []ss + } + + testCases := []struct { + query string + parameters []QueryParameter + wantRow []Value + wantConfig interface{} + }{ + { + "SELECT @val", + []QueryParameter{{"val", 1}}, + []Value{int64(1)}, + int64(1), + }, + { + "SELECT @val", + []QueryParameter{{"val", 1.3}}, + []Value{1.3}, + 1.3, + }, + { + "SELECT @val", + []QueryParameter{{"val", true}}, + []Value{true}, + true, + }, + { + "SELECT @val", + []QueryParameter{{"val", "ABC"}}, + []Value{"ABC"}, + "ABC", + }, + { + "SELECT @val", + []QueryParameter{{"val", []byte("foo")}}, + []Value{[]byte("foo")}, + []byte("foo"), + }, + { + "SELECT @val", + []QueryParameter{{"val", ts}}, + []Value{ts}, + ts, + }, + { + "SELECT @val", + []QueryParameter{{"val", []time.Time{ts, ts}}}, + []Value{[]Value{ts, ts}}, + []interface{}{ts, ts}, + }, + { + "SELECT @val", + []QueryParameter{{"val", dtm}}, + []Value{civil.DateTime{Date: d, Time: rtm}}, + civil.DateTime{Date: d, Time: rtm}, + }, + { + "SELECT @val", + []QueryParameter{{"val", d}}, + []Value{d}, + d, + }, + { + "SELECT @val", + []QueryParameter{{"val", tm}}, + []Value{rtm}, + rtm, + }, + { + "SELECT @val", + []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}}, + []Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}}, + map[string]interface{}{ + "Timestamp": ts, + "StringArray": []interface{}{"a", "b"}, + "SubStruct": map[string]interface{}{"String": "c"}, + "SubStructArray": []interface{}{ + map[string]interface{}{"String": "d"}, + map[string]interface{}{"String": "e"}, + }, + }, + }, + { + "SELECT @val.Timestamp, @val.SubStruct.String", + []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}}, + []Value{ts, "a"}, + map[string]interface{}{ + "Timestamp": ts, + "SubStruct": map[string]interface{}{"String": "a"}, + "StringArray": nil, + "SubStructArray": nil, + }, + }, + } + for _, c := range testCases { + q := client.Query(c.query) + q.Parameters = c.parameters + job, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + if job.LastStatus() == nil { + t.Error("no LastStatus") + } + it, err := job.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "QueryParameters", it, [][]Value{c.wantRow}) + config, err := job.Config() + if err != nil { + t.Fatal(err) + } + got := config.(*QueryConfig).Parameters[0].Value + if !testutil.Equal(got, c.wantConfig) { + t.Errorf("param %[1]v (%[1]T): config:\ngot %[2]v (%[2]T)\nwant %[3]v (%[3]T)", + c.parameters[0].Value, got, c.wantConfig) + } + } +} + +func TestIntegration_QueryDryRun(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + q := client.Query("SELECT word from " + stdName + " LIMIT 10") + q.DryRun = true + job, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + + s := job.LastStatus() + if s.State != Done { + t.Errorf("state is %v, expected Done", s.State) + } + if s.Statistics == nil { + t.Fatal("no statistics") + } + if s.Statistics.Details.(*QueryStatistics).Schema == nil { + t.Fatal("no schema") + } +} + +func TestIntegration_ExtractExternal(t *testing.T) { + // Create a table, extract it to GCS, then query it externally. + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + schema := Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "num", Type: IntegerFieldType}, + } + table := newTable(t, schema) + defer table.Delete(ctx) + + // Insert table data. + sql := fmt.Sprintf(`INSERT %s.%s (name, num) + VALUES ('a', 1), ('b', 2), ('c', 3)`, + table.DatasetID, table.TableID) + if err := dmlInsert(ctx, sql); err != nil { + t.Fatal(err) + } + // Extract to a GCS object as CSV. + bucketName := testutil.ProjID() + objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID) + uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName) + defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx) + gr := NewGCSReference(uri) + gr.DestinationFormat = CSV + e := table.ExtractorTo(gr) + job, err := e.Run(ctx) + if err != nil { + t.Fatal(err) + } + conf, err := job.Config() + if err != nil { + t.Fatal(err) + } + config, ok := conf.(*ExtractConfig) + if !ok { + t.Fatalf("got %T, want ExtractConfig", conf) + } + diff := testutil.Diff(config, &e.ExtractConfig, + cmp.AllowUnexported(Table{}), + cmpopts.IgnoreUnexported(Client{})) + if diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + + edc := &ExternalDataConfig{ + SourceFormat: CSV, + SourceURIs: []string{uri}, + Schema: schema, + Options: &CSVOptions{SkipLeadingRows: 1}, + } + // Query that CSV file directly. + q := client.Query("SELECT * FROM csv") + q.TableDefinitions = map[string]ExternalData{"csv": edc} + wantRows := [][]Value{ + []Value{"a", int64(1)}, + []Value{"b", int64(2)}, + []Value{"c", int64(3)}, + } + iter, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkReadAndTotalRows(t, "external query", iter, wantRows) + + // Make a table pointing to the file, and query it. + // BigQuery does not allow a Table.Read on an external table. + table = dataset.Table(tableIDs.New()) + err = table.Create(context.Background(), &TableMetadata{ + Schema: schema, + ExpirationTime: testTableExpiration, + ExternalDataConfig: edc, + }) + if err != nil { + t.Fatal(err) + } + q = client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID)) + iter, err = q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkReadAndTotalRows(t, "external table", iter, wantRows) + + // While we're here, check that the table metadata is correct. + md, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + // One difference: since BigQuery returns the schema as part of the ordinary + // table metadata, it does not populate ExternalDataConfig.Schema. + md.ExternalDataConfig.Schema = md.Schema + if diff := testutil.Diff(md.ExternalDataConfig, edc); diff != "" { + t.Errorf("got=-, want=+\n%s", diff) + } +} + +func TestIntegration_ReadNullIntoStruct(t *testing.T) { + // Reading a null into a struct field should return an error (not panic). + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + upl := table.Uploader() + row := &ValuesSaver{ + Schema: schema, + Row: []Value{nil, []Value{}, []Value{nil}}, + } + if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil { + t.Fatal(putError(err)) + } + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + q := client.Query(fmt.Sprintf("select name from %s", table.TableID)) + q.DefaultProjectID = dataset.ProjectID + q.DefaultDatasetID = dataset.DatasetID + it, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + type S struct{ Name string } + var s S + if err := it.Next(&s); err == nil { + t.Fatal("got nil, want error") + } +} + +const ( + stdName = "`bigquery-public-data.samples.shakespeare`" + legacyName = "[bigquery-public-data:samples.shakespeare]" +) + +// These tests exploit the fact that the two SQL versions have different syntaxes for +// fully-qualified table names. +var useLegacySqlTests = []struct { + t string // name of table + std, legacy bool // use standard/legacy SQL + err bool // do we expect an error? +}{ + {t: legacyName, std: false, legacy: true, err: false}, + {t: legacyName, std: true, legacy: false, err: true}, + {t: legacyName, std: false, legacy: false, err: true}, // standard SQL is default + {t: legacyName, std: true, legacy: true, err: true}, + {t: stdName, std: false, legacy: true, err: true}, + {t: stdName, std: true, legacy: false, err: false}, + {t: stdName, std: false, legacy: false, err: false}, // standard SQL is default + {t: stdName, std: true, legacy: true, err: true}, +} + +func TestIntegration_QueryUseLegacySQL(t *testing.T) { + // Test the UseLegacySQL and UseStandardSQL options for queries. + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + for _, test := range useLegacySqlTests { + q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t)) + q.UseStandardSQL = test.std + q.UseLegacySQL = test.legacy + _, err := q.Read(ctx) + gotErr := err != nil + if gotErr && !test.err { + t.Errorf("%+v:\nunexpected error: %v", test, err) + } else if !gotErr && test.err { + t.Errorf("%+v:\nsucceeded, but want error", test) + } + } +} + +func TestIntegration_TableUseLegacySQL(t *testing.T) { + // Test UseLegacySQL and UseStandardSQL for Table.Create. + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + for i, test := range useLegacySqlTests { + view := dataset.Table(fmt.Sprintf("t_view_%d", i)) + tm := &TableMetadata{ + ViewQuery: fmt.Sprintf("SELECT word from %s", test.t), + UseStandardSQL: test.std, + UseLegacySQL: test.legacy, + } + err := view.Create(ctx, tm) + gotErr := err != nil + if gotErr && !test.err { + t.Errorf("%+v:\nunexpected error: %v", test, err) + } else if !gotErr && test.err { + t.Errorf("%+v:\nsucceeded, but want error", test) + } + view.Delete(ctx) + } +} + +func TestIntegration_ListJobs(t *testing.T) { + // It's difficult to test the list of jobs, because we can't easily + // control what's in it. Also, there are many jobs in the test project, + // and it takes considerable time to list them all. + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + + // About all we can do is list a few jobs. + const max = 20 + var jobs []*Job + it := client.Jobs(ctx) + for { + job, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + jobs = append(jobs, job) + if len(jobs) >= max { + break + } + } + // We expect that there is at least one job in the last few months. + if len(jobs) == 0 { + t.Fatal("did not get any jobs") + } +} + +const tokyo = "asia-northeast1" + +func TestIntegration_Location(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + client.Location = "" + testLocation(t, tokyo) + client.Location = tokyo + defer func() { + client.Location = "" + }() + testLocation(t, "") +} + +func testLocation(t *testing.T, loc string) { + ctx := context.Background() + tokyoDataset := client.Dataset("tokyo") + err := tokyoDataset.Create(ctx, &DatasetMetadata{Location: loc}) + if err != nil && !hasStatusCode(err, 409) { // 409 = already exists + t.Fatal(err) + } + md, err := tokyoDataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if md.Location != tokyo { + t.Fatalf("dataset location: got %s, want %s", md.Location, tokyo) + } + table := tokyoDataset.Table(tableIDs.New()) + err = table.Create(context.Background(), &TableMetadata{ + Schema: Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "nums", Type: IntegerFieldType}, + }, + ExpirationTime: testTableExpiration, + }) + if err != nil { + t.Fatal(err) + } + defer table.Delete(ctx) + loader := table.LoaderFrom(NewReaderSource(strings.NewReader("a,0\nb,1\nc,2\n"))) + loader.Location = loc + job, err := loader.Run(ctx) + if err != nil { + t.Fatal("loader.Run", err) + } + if job.Location() != tokyo { + t.Fatalf("job location: got %s, want %s", job.Location(), tokyo) + } + _, err = client.JobFromID(ctx, job.ID()) + if client.Location == "" && err == nil { + t.Error("JobFromID with Tokyo job, no client location: want error, got nil") + } + if client.Location != "" && err != nil { + t.Errorf("JobFromID with Tokyo job, with client location: want nil, got %v", err) + } + _, err = client.JobFromIDLocation(ctx, job.ID(), "US") + if err == nil { + t.Error("JobFromIDLocation with US: want error, got nil") + } + job2, err := client.JobFromIDLocation(ctx, job.ID(), loc) + if loc == tokyo && err != nil { + t.Errorf("loc=tokyo: %v", err) + } + if loc == "" && err == nil { + t.Error("loc empty: got nil, want error") + } + if job2 != nil && (job2.ID() != job.ID() || job2.Location() != tokyo) { + t.Errorf("got id %s loc %s, want id%s loc %s", job2.ID(), job2.Location(), job.ID(), tokyo) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + // Cancel should succeed even if the job is done. + if err := job.Cancel(ctx); err != nil { + t.Fatal(err) + } + + q := client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID)) + q.Location = loc + iter, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + wantRows := [][]Value{ + []Value{"a", int64(0)}, + []Value{"b", int64(1)}, + []Value{"c", int64(2)}, + } + checkRead(t, "location", iter, wantRows) + + table2 := tokyoDataset.Table(tableIDs.New()) + copier := table2.CopierFrom(table) + copier.Location = loc + if _, err := copier.Run(ctx); err != nil { + t.Fatal(err) + } + bucketName := testutil.ProjID() + objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID) + uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName) + defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx) + gr := NewGCSReference(uri) + gr.DestinationFormat = CSV + e := table.ExtractorTo(gr) + e.Location = loc + if _, err := e.Run(ctx); err != nil { + t.Fatal(err) + } +} + +// Creates a new, temporary table with a unique name and the given schema. +func newTable(t *testing.T, s Schema) *Table { + table := dataset.Table(tableIDs.New()) + err := table.Create(context.Background(), &TableMetadata{ + Schema: s, + ExpirationTime: testTableExpiration, + }) + if err != nil { + t.Fatal(err) + } + return table +} + +func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) { + if msg2, ok := compareRead(it, want, false); !ok { + t.Errorf("%s: %s", msg, msg2) + } +} + +func checkReadAndTotalRows(t *testing.T, msg string, it *RowIterator, want [][]Value) { + if msg2, ok := compareRead(it, want, true); !ok { + t.Errorf("%s: %s", msg, msg2) + } +} + +func compareRead(it *RowIterator, want [][]Value, compareTotalRows bool) (msg string, ok bool) { + got, _, totalRows, err := readAll(it) + if err != nil { + return err.Error(), false + } + if len(got) != len(want) { + return fmt.Sprintf("got %d rows, want %d", len(got), len(want)), false + } + if compareTotalRows && len(got) != int(totalRows) { + return fmt.Sprintf("got %d rows, but totalRows = %d", len(got), totalRows), false + } + sort.Sort(byCol0(got)) + for i, r := range got { + gotRow := []Value(r) + wantRow := want[i] + if !testutil.Equal(gotRow, wantRow) { + return fmt.Sprintf("#%d: got %#v, want %#v", i, gotRow, wantRow), false + } + } + return "", true +} + +func readAll(it *RowIterator) ([][]Value, Schema, uint64, error) { + var ( + rows [][]Value + schema Schema + totalRows uint64 + ) + for { + var vals []Value + err := it.Next(&vals) + if err == iterator.Done { + return rows, schema, totalRows, nil + } + if err != nil { + return nil, nil, 0, err + } + rows = append(rows, vals) + schema = it.Schema + totalRows = it.TotalRows + } +} + +type byCol0 [][]Value + +func (b byCol0) Len() int { return len(b) } +func (b byCol0) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCol0) Less(i, j int) bool { + switch a := b[i][0].(type) { + case string: + return a < b[j][0].(string) + case civil.Date: + return a.Before(b[j][0].(civil.Date)) + default: + panic("unknown type") + } +} + +func hasStatusCode(err error, code int) bool { + if e, ok := err.(*googleapi.Error); ok && e.Code == code { + return true + } + return false +} + +// wait polls the job until it is complete or an error is returned. +func wait(ctx context.Context, job *Job) error { + status, err := job.Wait(ctx) + if err != nil { + return err + } + if status.Err() != nil { + return fmt.Errorf("job status error: %#v", status.Err()) + } + if status.Statistics == nil { + return errors.New("nil Statistics") + } + if status.Statistics.EndTime.IsZero() { + return errors.New("EndTime is zero") + } + if status.Statistics.Details == nil { + return errors.New("nil Statistics.Details") + } + return nil +} + +// waitForRow polls the table until it contains a row. +// TODO(jba): use internal.Retry. +func waitForRow(ctx context.Context, table *Table) error { + for { + it := table.Read(ctx) + var v []Value + err := it.Next(&v) + if err == nil { + return nil + } + if err != iterator.Done { + return err + } + time.Sleep(1 * time.Second) + } +} + +func putError(err error) string { + pme, ok := err.(PutMultiError) + if !ok { + return err.Error() + } + var msgs []string + for _, err := range pme { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "\n") +} diff --git a/vendor/cloud.google.com/go/bigquery/iterator.go b/vendor/cloud.google.com/go/bigquery/iterator.go new file mode 100644 index 0000000..2c10f7a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/iterator.go @@ -0,0 +1,215 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/iterator" +) + +func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator { + it := &RowIterator{ + ctx: ctx, + table: t, + pf: pf, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.rows) }, + func() interface{} { r := it.rows; it.rows = nil; return r }) + return it +} + +// A RowIterator provides access to the result of a BigQuery lookup. +type RowIterator struct { + ctx context.Context + table *Table + pf pageFetcher + pageInfo *iterator.PageInfo + nextFunc func() error + + // StartIndex can be set before the first call to Next. If PageInfo().Token + // is also set, StartIndex is ignored. + StartIndex uint64 + + // The schema of the table. Available after the first call to Next. + Schema Schema + + // The total number of rows in the result. Available after the first call to Next. + // May be zero just after rows were inserted. + TotalRows uint64 + + rows [][]Value + structLoader structLoader // used to populate a pointer to a struct +} + +// Next loads the next row into dst. Its return value is iterator.Done if there +// are no more results. Once Next returns iterator.Done, all subsequent calls +// will return iterator.Done. +// +// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer. +// +// If dst is a *[]Value, it will be set to to new []Value whose i'th element +// will be populated with the i'th column of the row. +// +// If dst is a *map[string]Value, a new map will be created if dst is nil. Then +// for each schema column name, the map key of that name will be set to the column's +// value. STRUCT types (RECORD types or nested schemas) become nested maps. +// +// If dst is pointer to a struct, each column in the schema will be matched +// with an exported field of the struct that has the same name, ignoring case. +// Unmatched schema columns and struct fields will be ignored. +// +// Each BigQuery column type corresponds to one or more Go types; a matching struct +// field must be of the correct type. The correspondences are: +// +// STRING string +// BOOL bool +// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 +// FLOAT float32, float64 +// BYTES []byte +// TIMESTAMP time.Time +// DATE civil.Date +// TIME civil.Time +// DATETIME civil.DateTime +// +// A repeated field corresponds to a slice or array of the element type. A STRUCT +// type (RECORD or nested schema) corresponds to a nested struct or struct pointer. +// All calls to Next on the same iterator must use the same struct type. +// +// It is an error to attempt to read a BigQuery NULL value into a struct field, +// unless the field is of type []byte or is one of the special Null types: NullInt64, +// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or +// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a +// table with NULLs. +func (it *RowIterator) Next(dst interface{}) error { + var vl ValueLoader + switch dst := dst.(type) { + case ValueLoader: + vl = dst + case *[]Value: + vl = (*valueList)(dst) + case *map[string]Value: + vl = (*valueMap)(dst) + default: + if !isStructPtr(dst) { + return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst) + } + } + if err := it.nextFunc(); err != nil { + return err + } + row := it.rows[0] + it.rows = it.rows[1:] + + if vl == nil { + // This can only happen if dst is a pointer to a struct. We couldn't + // set vl above because we need the schema. + if err := it.structLoader.set(dst, it.Schema); err != nil { + return err + } + vl = &it.structLoader + } + return vl.Load(row, it.Schema) +} + +func isStructPtr(x interface{}) bool { + t := reflect.TypeOf(x) + return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) { + res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken) + if err != nil { + return "", err + } + it.rows = append(it.rows, res.rows...) + it.Schema = res.schema + it.TotalRows = res.totalRows + return res.pageToken, nil +} + +// A pageFetcher returns a page of rows from a destination table. +type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) + +type fetchPageResult struct { + pageToken string + rows [][]Value + totalRows uint64 + schema Schema +} + +// fetchPage gets a page of rows from t. +func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) { + // Fetch the table schema in the background, if necessary. + errc := make(chan error, 1) + if schema != nil { + errc <- nil + } else { + go func() { + var bqt *bq.Table + err := runWithRetry(ctx, func() (err error) { + bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID). + Fields("schema"). + Context(ctx). + Do() + return err + }) + if err == nil && bqt.Schema != nil { + schema = bqToSchema(bqt.Schema) + } + errc <- err + }() + } + call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID) + setClientHeader(call.Header()) + if pageToken != "" { + call.PageToken(pageToken) + } else { + call.StartIndex(startIndex) + } + if pageSize > 0 { + call.MaxResults(pageSize) + } + var res *bq.TableDataList + err := runWithRetry(ctx, func() (err error) { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + err = <-errc + if err != nil { + return nil, err + } + rows, err := convertRows(res.Rows, schema) + if err != nil { + return nil, err + } + return &fetchPageResult{ + pageToken: res.PageToken, + rows: rows, + totalRows: uint64(res.TotalRows), + schema: schema, + }, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/iterator_test.go b/vendor/cloud.google.com/go/bigquery/iterator_test.go new file mode 100644 index 0000000..50cf94f --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/iterator_test.go @@ -0,0 +1,363 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "testing" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +type fetchResponse struct { + result *fetchPageResult // The result to return. + err error // The error to return. +} + +// pageFetcherStub services fetch requests by returning data from an in-memory list of values. +type pageFetcherStub struct { + fetchResponses map[string]fetchResponse + err error +} + +func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) { + call, ok := pf.fetchResponses[pageToken] + if !ok { + pf.err = fmt.Errorf("Unexpected page token: %q", pageToken) + } + return call.result, call.err +} + +func TestIterator(t *testing.T) { + var ( + iiSchema = Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + } + siSchema = Schema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + } + ) + fetchFailure := errors.New("fetch failure") + + testCases := []struct { + desc string + pageToken string + fetchResponses map[string]fetchResponse + want [][]Value + wantErr error + wantSchema Schema + wantTotalRows uint64 + }{ + { + desc: "Iteration over single empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{}, + schema: Schema{}, + }, + }, + }, + want: [][]Value{}, + wantSchema: Schema{}, + }, + { + desc: "Iteration over single page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + totalRows: 4, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + wantSchema: iiSchema, + wantTotalRows: 4, + }, + { + desc: "Iteration over single page with different schema", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{"1", 2}, {"11", 12}}, + schema: siSchema, + }, + }, + }, + want: [][]Value{{"1", 2}, {"11", 12}}, + wantSchema: siSchema, + }, + { + desc: "Iteration over two pages", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + totalRows: 4, + }, + }, + "a": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + totalRows: 4, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + wantSchema: iiSchema, + wantTotalRows: 4, + }, + { + desc: "Server response includes empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &fetchPageResult{ + pageToken: "b", + rows: [][]Value{}, + schema: iiSchema, + }, + }, + "b": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + wantSchema: iiSchema, + }, + { + desc: "Fetch error", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + // We returns some data from this fetch, but also an error. + // So the end result should include only data from the previous fetch. + err: fetchFailure, + result: &fetchPageResult{ + pageToken: "b", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + wantErr: fetchFailure, + wantSchema: iiSchema, + }, + + { + desc: "Skip over an entire page", + pageToken: "a", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{101, 102}, {111, 112}}, + wantSchema: iiSchema, + }, + + { + desc: "Skip beyond all data", + pageToken: "b", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &fetchPageResult{ + pageToken: "b", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + "b": { + result: &fetchPageResult{}, + }, + }, + // In this test case, Next will return false on its first call, + // so we won't even attempt to call Get. + want: [][]Value{}, + wantSchema: Schema{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newRowIterator(context.Background(), nil, pf.fetchPage) + it.PageInfo().Token = tc.pageToken + values, schema, totalRows, err := consumeRowIterator(it) + if err != tc.wantErr { + t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr) + } + if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) { + t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) + } + if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) { + t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema) + } + if totalRows != tc.wantTotalRows { + t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows) + } + } +} + +// consumeRowIterator reads the schema and all values from a RowIterator and returns them. +func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) { + var ( + got [][]Value + schema Schema + totalRows uint64 + ) + for { + var vls []Value + err := it.Next(&vls) + if err == iterator.Done { + return got, schema, totalRows, nil + } + if err != nil { + return got, schema, totalRows, err + } + got = append(got, vls) + schema = it.Schema + totalRows = it.TotalRows + } +} + +func TestNextDuringErrorState(t *testing.T) { + pf := &pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": {err: errors.New("bang")}, + }, + } + it := newRowIterator(context.Background(), nil, pf.fetchPage) + var vals []Value + if err := it.Next(&vals); err == nil { + t.Errorf("Expected error after calling Next") + } + if err := it.Next(&vals); err == nil { + t.Errorf("Expected error calling Next again when iterator has a non-nil error.") + } +} + +func TestNextAfterFinished(t *testing.T) { + testCases := []struct { + fetchResponses map[string]fetchResponse + want [][]Value + }{ + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + }, + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{}, + }, + }, + }, + want: [][]Value{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newRowIterator(context.Background(), nil, pf.fetchPage) + + values, _, _, err := consumeRowIterator(it) + if err != nil { + t.Fatal(err) + } + if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) { + t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) + } + // Try calling Get again. + var vals []Value + if err := it.Next(&vals); err != iterator.Done { + t.Errorf("Expected Done calling Next when there are no more values") + } + } +} + +func TestIteratorNextTypes(t *testing.T) { + it := newRowIterator(context.Background(), nil, nil) + for _, v := range []interface{}{3, "s", []int{}, &[]int{}, + map[string]Value{}, &map[string]interface{}{}, + struct{}{}, + } { + if err := it.Next(v); err == nil { + t.Errorf("%v: want error, got nil", v) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/job.go b/vendor/cloud.google.com/go/bigquery/job.go new file mode 100644 index 0000000..f01a031 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/job.go @@ -0,0 +1,711 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "math/rand" + "os" + "sync" + "time" + + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/trace" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" +) + +// A Job represents an operation which has been submitted to BigQuery for processing. +type Job struct { + c *Client + projectID string + jobID string + location string + + config *bq.JobConfiguration + lastStatus *JobStatus +} + +// JobFromID creates a Job which refers to an existing BigQuery job. The job +// need not have been created by this package. For example, the job may have +// been created in the BigQuery console. +// +// For jobs whose location is other than "US" or "EU", set Client.Location or use +// JobFromIDLocation. +func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { + return c.JobFromIDLocation(ctx, id, c.Location) +} + +// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job +// need not have been created by this package (for example, it may have +// been created in the BigQuery console), but it must exist in the specified location. +func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (j *Job, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.JobFromIDLocation") + defer func() { trace.EndSpan(ctx, err) }() + + bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics") + if err != nil { + return nil, err + } + return bqToJob(bqjob, c) +} + +// ID returns the job's ID. +func (j *Job) ID() string { + return j.jobID +} + +// Location returns the job's location. +func (j *Job) Location() string { + return j.location +} + +// State is one of a sequence of states that a Job progresses through as it is processed. +type State int + +const ( + StateUnspecified State = iota // used only as a default in JobIterator + Pending + Running + Done +) + +// JobStatus contains the current State of a job, and errors encountered while processing that job. +type JobStatus struct { + State State + + err error + + // All errors encountered during the running of the job. + // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. + Errors []*Error + + // Statistics about the job. + Statistics *JobStatistics +} + +// JobConfig contains configuration information for a job. It is implemented by +// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig. +type JobConfig interface { + isJobConfig() +} + +func (*CopyConfig) isJobConfig() {} +func (*ExtractConfig) isJobConfig() {} +func (*LoadConfig) isJobConfig() {} +func (*QueryConfig) isJobConfig() {} + +// Config returns the configuration information for j. +func (j *Job) Config() (JobConfig, error) { + return bqToJobConfig(j.config, j.c) +} + +func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) { + switch { + case q == nil: + return nil, nil + case q.Copy != nil: + return bqToCopyConfig(q, c), nil + case q.Extract != nil: + return bqToExtractConfig(q, c), nil + case q.Load != nil: + return bqToLoadConfig(q, c), nil + case q.Query != nil: + return bqToQueryConfig(q, c) + default: + return nil, nil + } +} + +// JobIDConfig describes how to create an ID for a job. +type JobIDConfig struct { + // JobID is the ID to use for the job. If empty, a random job ID will be generated. + JobID string + + // If AddJobIDSuffix is true, then a random string will be appended to JobID. + AddJobIDSuffix bool + + // Location is the location for the job. + Location string +} + +// createJobRef creates a JobReference. +func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference { + // We don't check whether projectID is empty; the server will return an + // error when it encounters the resulting JobReference. + loc := j.Location + if loc == "" { // Use Client.Location as a default. + loc = c.Location + } + jr := &bq.JobReference{ProjectId: c.projectID, Location: loc} + if j.JobID == "" { + jr.JobId = randomIDFn() + } else if j.AddJobIDSuffix { + jr.JobId = j.JobID + "-" + randomIDFn() + } else { + jr.JobId = j.JobID + } + return jr +} + +const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var ( + rngMu sync.Mutex + rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid()))) +) + +// For testing. +var randomIDFn = randomID + +// As of August 2017, the BigQuery service uses 27 alphanumeric characters for +// suffixes. +const randomIDLen = 27 + +func randomID() string { + // This is used for both job IDs and insert IDs. + var b [randomIDLen]byte + rngMu.Lock() + for i := 0; i < len(b); i++ { + b[i] = alphanum[rng.Intn(len(alphanum))] + } + rngMu.Unlock() + return string(b[:]) +} + +// Done reports whether the job has completed. +// After Done returns true, the Err method will return an error if the job completed unsuccesfully. +func (s *JobStatus) Done() bool { + return s.State == Done +} + +// Err returns the error that caused the job to complete unsuccesfully (if any). +func (s *JobStatus) Err() error { + return s.err +} + +// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined. +func (j *Job) Status(ctx context.Context) (js *JobStatus, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Status") + defer func() { trace.EndSpan(ctx, err) }() + + bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics") + if err != nil { + return nil, err + } + if err := j.setStatus(bqjob.Status); err != nil { + return nil, err + } + j.setStatistics(bqjob.Statistics, j.c) + return j.lastStatus, nil +} + +// LastStatus returns the most recently retrieved status of the job. The status is +// retrieved when a new job is created, or when JobFromID or Job.Status is called. +// Call Job.Status to get the most up-to-date information about a job. +func (j *Job) LastStatus() *JobStatus { + return j.lastStatus +} + +// Cancel requests that a job be cancelled. This method returns without waiting for +// cancellation to take effect. To check whether the job has terminated, use Job.Status. +// Cancelled jobs may still incur costs. +func (j *Job) Cancel(ctx context.Context) error { + // Jobs.Cancel returns a job entity, but the only relevant piece of + // data it may contain (the status of the job) is unreliable. From the + // docs: "This call will return immediately, and the client will need + // to poll for the job status to see if the cancel completed + // successfully". So it would be misleading to return a status. + call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID). + Location(j.location). + Fields(). // We don't need any of the response data. + Context(ctx) + setClientHeader(call.Header()) + return runWithRetry(ctx, func() error { + _, err := call.Do() + return err + }) +} + +// Wait blocks until the job or the context is done. It returns the final status +// of the job. +// If an error occurs while retrieving the status, Wait returns that error. But +// Wait returns nil if the status was retrieved successfully, even if +// status.Err() != nil. So callers must check both errors. See the example. +func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Wait") + defer func() { trace.EndSpan(ctx, err) }() + + if j.isQuery() { + // We can avoid polling for query jobs. + if _, err := j.waitForQuery(ctx, j.projectID); err != nil { + return nil, err + } + // Note: extra RPC even if you just want to wait for the query to finish. + js, err := j.Status(ctx) + if err != nil { + return nil, err + } + return js, nil + } + // Non-query jobs must poll. + err = internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + js, err = j.Status(ctx) + if err != nil { + return true, err + } + if js.Done() { + return true, nil + } + return false, nil + }) + if err != nil { + return nil, err + } + return js, nil +} + +// Read fetches the results of a query job. +// If j is not a query job, Read returns an error. +func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Read") + defer func() { trace.EndSpan(ctx, err) }() + + return j.read(ctx, j.waitForQuery, fetchPage) +} + +func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) { + if !j.isQuery() { + return nil, errors.New("bigquery: cannot read from a non-query job") + } + destTable := j.config.Query.DestinationTable + // The destination table should only be nil if there was a query error. + projectID := j.projectID + if destTable != nil && projectID != destTable.ProjectId { + return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId) + } + schema, err := waitForQuery(ctx, projectID) + if err != nil { + return nil, err + } + if destTable == nil { + return nil, errors.New("bigquery: query job missing destination table") + } + dt := bqToTable(destTable, j.c) + it := newRowIterator(ctx, dt, pf) + it.Schema = schema + return it, nil +} + +// waitForQuery waits for the query job to complete and returns its schema. +func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) { + // Use GetQueryResults only to wait for completion, not to read results. + call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0) + setClientHeader(call.Header()) + backoff := gax.Backoff{ + Initial: 1 * time.Second, + Multiplier: 2, + Max: 60 * time.Second, + } + var res *bq.GetQueryResultsResponse + err := internal.Retry(ctx, backoff, func() (stop bool, err error) { + res, err = call.Do() + if err != nil { + return !retryableError(err), err + } + if !res.JobComplete { // GetQueryResults may return early without error; retry. + return false, nil + } + return true, nil + }) + if err != nil { + return nil, err + } + return bqToSchema(res.Schema), nil +} + +// JobStatistics contains statistics about a job. +type JobStatistics struct { + CreationTime time.Time + StartTime time.Time + EndTime time.Time + TotalBytesProcessed int64 + + Details Statistics +} + +// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics. +type Statistics interface { + implementsStatistics() +} + +// ExtractStatistics contains statistics about an extract job. +type ExtractStatistics struct { + // The number of files per destination URI or URI pattern specified in the + // extract configuration. These values will be in the same order as the + // URIs specified in the 'destinationUris' field. + DestinationURIFileCounts []int64 +} + +// LoadStatistics contains statistics about a load job. +type LoadStatistics struct { + // The number of bytes of source data in a load job. + InputFileBytes int64 + + // The number of source files in a load job. + InputFiles int64 + + // Size of the loaded data in bytes. Note that while a load job is in the + // running state, this value may change. + OutputBytes int64 + + // The number of rows imported in a load job. Note that while an import job is + // in the running state, this value may change. + OutputRows int64 +} + +// QueryStatistics contains statistics about a query job. +type QueryStatistics struct { + // Billing tier for the job. + BillingTier int64 + + // Whether the query result was fetched from the query cache. + CacheHit bool + + // The type of query statement, if valid. + StatementType string + + // Total bytes billed for the job. + TotalBytesBilled int64 + + // Total bytes processed for the job. + TotalBytesProcessed int64 + + // Describes execution plan for the query. + QueryPlan []*ExplainQueryStage + + // The number of rows affected by a DML statement. Present only for DML + // statements INSERT, UPDATE or DELETE. + NumDMLAffectedRows int64 + + // ReferencedTables: [Output-only, Experimental] Referenced tables for + // the job. Queries that reference more than 50 tables will not have a + // complete list. + ReferencedTables []*Table + + // The schema of the results. Present only for successful dry run of + // non-legacy SQL queries. + Schema Schema + + // Standard SQL: list of undeclared query parameter names detected during a + // dry run validation. + UndeclaredQueryParameterNames []string +} + +// ExplainQueryStage describes one stage of a query. +type ExplainQueryStage struct { + // Relative amount of the total time the average shard spent on CPU-bound tasks. + ComputeRatioAvg float64 + + // Relative amount of the total time the slowest shard spent on CPU-bound tasks. + ComputeRatioMax float64 + + // Unique ID for stage within plan. + ID int64 + + // Human-readable name for stage. + Name string + + // Relative amount of the total time the average shard spent reading input. + ReadRatioAvg float64 + + // Relative amount of the total time the slowest shard spent reading input. + ReadRatioMax float64 + + // Number of records read into the stage. + RecordsRead int64 + + // Number of records written by the stage. + RecordsWritten int64 + + // Current status for the stage. + Status string + + // List of operations within the stage in dependency order (approximately + // chronological). + Steps []*ExplainQueryStep + + // Relative amount of the total time the average shard spent waiting to be scheduled. + WaitRatioAvg float64 + + // Relative amount of the total time the slowest shard spent waiting to be scheduled. + WaitRatioMax float64 + + // Relative amount of the total time the average shard spent on writing output. + WriteRatioAvg float64 + + // Relative amount of the total time the slowest shard spent on writing output. + WriteRatioMax float64 +} + +// ExplainQueryStep describes one step of a query stage. +type ExplainQueryStep struct { + // Machine-readable operation type. + Kind string + + // Human-readable stage descriptions. + Substeps []string +} + +func (*ExtractStatistics) implementsStatistics() {} +func (*LoadStatistics) implementsStatistics() {} +func (*QueryStatistics) implementsStatistics() {} + +// Jobs lists jobs within a project. +func (c *Client) Jobs(ctx context.Context) *JobIterator { + it := &JobIterator{ + ctx: ctx, + c: c, + ProjectID: c.projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// JobIterator iterates over jobs in a project. +type JobIterator struct { + ProjectID string // Project ID of the jobs to list. Default is the client's project. + AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller. + State State // List only jobs in the given state. Defaults to all states. + + ctx context.Context + c *Client + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Job +} + +func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *JobIterator) Next() (*Job, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) { + var st string + switch it.State { + case StateUnspecified: + st = "" + case Pending: + st = "pending" + case Running: + st = "running" + case Done: + st = "done" + default: + return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State) + } + + req := it.c.bqs.Jobs.List(it.ProjectID). + Context(it.ctx). + PageToken(pageToken). + Projection("full"). + AllUsers(it.AllUsers) + if st != "" { + req.StateFilter(st) + } + setClientHeader(req.Header()) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + res, err := req.Do() + if err != nil { + return "", err + } + for _, j := range res.Jobs { + job, err := convertListedJob(j, it.c) + if err != nil { + return "", err + } + it.items = append(it.items, job) + } + return res.NextPageToken, nil +} + +func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) { + return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c) +} + +func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) { + var job *bq.Job + call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx) + if location != "" { + call = call.Location(location) + } + if len(fields) > 0 { + call = call.Fields(fields...) + } + setClientHeader(call.Header()) + err := runWithRetry(ctx, func() (err error) { + job, err = call.Do() + return err + }) + if err != nil { + return nil, err + } + return job, nil +} + +func bqToJob(q *bq.Job, c *Client) (*Job, error) { + return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c) +} + +func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) { + j := &Job{ + projectID: qr.ProjectId, + jobID: qr.JobId, + location: qr.Location, + c: c, + } + j.setConfig(qc) + if err := j.setStatus(qs); err != nil { + return nil, err + } + j.setStatistics(qt, c) + return j, nil +} + +func (j *Job) setConfig(config *bq.JobConfiguration) { + if config == nil { + return + } + j.config = config +} + +func (j *Job) isQuery() bool { + return j.config != nil && j.config.Query != nil +} + +var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} + +func (j *Job) setStatus(qs *bq.JobStatus) error { + if qs == nil { + return nil + } + state, ok := stateMap[qs.State] + if !ok { + return fmt.Errorf("unexpected job state: %v", qs.State) + } + j.lastStatus = &JobStatus{ + State: state, + err: nil, + } + if err := bqToError(qs.ErrorResult); state == Done && err != nil { + j.lastStatus.err = err + } + for _, ep := range qs.Errors { + j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep)) + } + return nil +} + +func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) { + if s == nil || j.lastStatus == nil { + return + } + js := &JobStatistics{ + CreationTime: unixMillisToTime(s.CreationTime), + StartTime: unixMillisToTime(s.StartTime), + EndTime: unixMillisToTime(s.EndTime), + TotalBytesProcessed: s.TotalBytesProcessed, + } + switch { + case s.Extract != nil: + js.Details = &ExtractStatistics{ + DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts), + } + case s.Load != nil: + js.Details = &LoadStatistics{ + InputFileBytes: s.Load.InputFileBytes, + InputFiles: s.Load.InputFiles, + OutputBytes: s.Load.OutputBytes, + OutputRows: s.Load.OutputRows, + } + case s.Query != nil: + var names []string + for _, qp := range s.Query.UndeclaredQueryParameters { + names = append(names, qp.Name) + } + var tables []*Table + for _, tr := range s.Query.ReferencedTables { + tables = append(tables, bqToTable(tr, c)) + } + js.Details = &QueryStatistics{ + BillingTier: s.Query.BillingTier, + CacheHit: s.Query.CacheHit, + StatementType: s.Query.StatementType, + TotalBytesBilled: s.Query.TotalBytesBilled, + TotalBytesProcessed: s.Query.TotalBytesProcessed, + NumDMLAffectedRows: s.Query.NumDmlAffectedRows, + QueryPlan: queryPlanFromProto(s.Query.QueryPlan), + Schema: bqToSchema(s.Query.Schema), + ReferencedTables: tables, + UndeclaredQueryParameterNames: names, + } + } + j.lastStatus.Statistics = js +} + +func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage { + var res []*ExplainQueryStage + for _, s := range stages { + var steps []*ExplainQueryStep + for _, p := range s.Steps { + steps = append(steps, &ExplainQueryStep{ + Kind: p.Kind, + Substeps: p.Substeps, + }) + } + res = append(res, &ExplainQueryStage{ + ComputeRatioAvg: s.ComputeRatioAvg, + ComputeRatioMax: s.ComputeRatioMax, + ID: s.Id, + Name: s.Name, + ReadRatioAvg: s.ReadRatioAvg, + ReadRatioMax: s.ReadRatioMax, + RecordsRead: s.RecordsRead, + RecordsWritten: s.RecordsWritten, + Status: s.Status, + Steps: steps, + WaitRatioAvg: s.WaitRatioAvg, + WaitRatioMax: s.WaitRatioMax, + WriteRatioAvg: s.WriteRatioAvg, + WriteRatioMax: s.WriteRatioMax, + }) + } + return res +} diff --git a/vendor/cloud.google.com/go/bigquery/job_test.go b/vendor/cloud.google.com/go/bigquery/job_test.go new file mode 100644 index 0000000..d2d8bec --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/job_test.go @@ -0,0 +1,95 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + bq "google.golang.org/api/bigquery/v2" +) + +func TestCreateJobRef(t *testing.T) { + defer fixRandomID("RANDOM")() + cNoLoc := &Client{projectID: "projectID"} + cLoc := &Client{projectID: "projectID", Location: "defaultLoc"} + for _, test := range []struct { + in JobIDConfig + client *Client + want *bq.JobReference + }{ + { + in: JobIDConfig{JobID: "foo"}, + want: &bq.JobReference{JobId: "foo"}, + }, + { + in: JobIDConfig{}, + want: &bq.JobReference{JobId: "RANDOM"}, + }, + { + in: JobIDConfig{AddJobIDSuffix: true}, + want: &bq.JobReference{JobId: "RANDOM"}, + }, + { + in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true}, + want: &bq.JobReference{JobId: "foo-RANDOM"}, + }, + { + in: JobIDConfig{JobID: "foo", Location: "loc"}, + want: &bq.JobReference{JobId: "foo", Location: "loc"}, + }, + { + in: JobIDConfig{JobID: "foo"}, + client: cLoc, + want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"}, + }, + { + in: JobIDConfig{JobID: "foo", Location: "loc"}, + client: cLoc, + want: &bq.JobReference{JobId: "foo", Location: "loc"}, + }, + } { + client := test.client + if client == nil { + client = cNoLoc + } + got := test.in.createJobRef(client) + test.want.ProjectId = "projectID" + if !testutil.Equal(got, test.want) { + t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want) + } + } +} + +func fixRandomID(s string) func() { + prev := randomIDFn + randomIDFn = func() string { return s } + return func() { randomIDFn = prev } +} + +func checkJob(t *testing.T, i int, got, want *bq.Job) { + if got.JobReference == nil { + t.Errorf("#%d: empty job reference", i) + return + } + if got.JobReference.JobId == "" { + t.Errorf("#%d: empty job ID", i) + return + } + d := testutil.Diff(got, want) + if d != "" { + t.Errorf("#%d: (got=-, want=+) %s", i, d) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/load.go b/vendor/cloud.google.com/go/bigquery/load.go new file mode 100644 index 0000000..c09d4cc --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/load.go @@ -0,0 +1,141 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "io" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// LoadConfig holds the configuration for a load job. +type LoadConfig struct { + // Src is the source from which data will be loaded. + Src LoadSource + + // Dst is the table into which the data will be loaded. + Dst *Table + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteAppend. + WriteDisposition TableWriteDisposition + + // The labels associated with this job. + Labels map[string]string + + // If non-nil, the destination table is partitioned by time. + TimePartitioning *TimePartitioning + + // Custom encryption configuration (e.g., Cloud KMS keys). + DestinationEncryptionConfig *EncryptionConfig + + // SchemaUpdateOptions allows the schema of the destination table to be + // updated as a side effect of the load job. + SchemaUpdateOptions []string +} + +func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) { + config := &bq.JobConfiguration{ + Labels: l.Labels, + Load: &bq.JobConfigurationLoad{ + CreateDisposition: string(l.CreateDisposition), + WriteDisposition: string(l.WriteDisposition), + DestinationTable: l.Dst.toBQ(), + TimePartitioning: l.TimePartitioning.toBQ(), + DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(), + SchemaUpdateOptions: l.SchemaUpdateOptions, + }, + } + media := l.Src.populateLoadConfig(config.Load) + return config, media +} + +func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig { + lc := &LoadConfig{ + Labels: q.Labels, + CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition), + WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition), + Dst: bqToTable(q.Load.DestinationTable, c), + TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning), + DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration), + SchemaUpdateOptions: q.Load.SchemaUpdateOptions, + } + var fc *FileConfig + if len(q.Load.SourceUris) == 0 { + s := NewReaderSource(nil) + fc = &s.FileConfig + lc.Src = s + } else { + s := NewGCSReference(q.Load.SourceUris...) + fc = &s.FileConfig + lc.Src = s + } + bqPopulateFileConfig(q.Load, fc) + return lc +} + +// A Loader loads data from Google Cloud Storage into a BigQuery table. +type Loader struct { + JobIDConfig + LoadConfig + c *Client +} + +// A LoadSource represents a source of data that can be loaded into +// a BigQuery table. +// +// This package defines two LoadSources: GCSReference, for Google Cloud Storage +// objects, and ReaderSource, for data read from an io.Reader. +type LoadSource interface { + // populates config, returns media + populateLoadConfig(*bq.JobConfigurationLoad) io.Reader +} + +// LoaderFrom returns a Loader which can be used to load data into a BigQuery table. +// The returned Loader may optionally be further configured before its Run method is called. +// See GCSReference and ReaderSource for additional configuration options that +// affect loading. +func (t *Table) LoaderFrom(src LoadSource) *Loader { + return &Loader{ + c: t.c, + LoadConfig: LoadConfig{ + Src: src, + Dst: t, + }, + } +} + +// Run initiates a load job. +func (l *Loader) Run(ctx context.Context) (j *Job, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Load.Run") + defer func() { trace.EndSpan(ctx, err) }() + + job, media := l.newJob() + return l.c.insertJob(ctx, job, media) +} + +func (l *Loader) newJob() (*bq.Job, io.Reader) { + config, media := l.LoadConfig.toBQ() + return &bq.Job{ + JobReference: l.JobIDConfig.createJobRef(l.c), + Configuration: config, + }, media +} diff --git a/vendor/cloud.google.com/go/bigquery/load_test.go b/vendor/cloud.google.com/go/bigquery/load_test.go new file mode 100644 index 0000000..385269f --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/load_test.go @@ -0,0 +1,260 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "strings" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultLoadJob() *bq.Job { + return &bq.Job{ + JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, + Configuration: &bq.JobConfiguration{ + Load: &bq.JobConfigurationLoad{ + DestinationTable: &bq.TableReference{ + ProjectId: "client-project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + SourceUris: []string{"uri"}, + }, + }, + } +} + +func stringFieldSchema() *FieldSchema { + return &FieldSchema{Name: "fieldname", Type: StringFieldType} +} + +func nestedFieldSchema() *FieldSchema { + return &FieldSchema{ + Name: "nested", + Type: RecordFieldType, + Schema: Schema{stringFieldSchema()}, + } +} + +func bqStringFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "fieldname", + Type: "STRING", + } +} + +func bqNestedFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "nested", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, + } +} + +func TestLoad(t *testing.T) { + defer fixRandomID("RANDOM")() + c := &Client{projectID: "client-project-id"} + + testCases := []struct { + dst *Table + src LoadSource + jobID string + location string + config LoadConfig + want *bq.Job + }{ + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: NewGCSReference("uri"), + want: defaultLoadJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: NewGCSReference("uri"), + location: "loc", + want: func() *bq.Job { + j := defaultLoadJob() + j.JobReference.Location = "loc" + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + jobID: "ajob", + config: LoadConfig{ + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + Labels: map[string]string{"a": "b"}, + TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond}, + DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, + SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"}, + }, + src: NewGCSReference("uri"), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Labels = map[string]string{"a": "b"} + j.Configuration.Load.CreateDisposition = "CREATE_NEVER" + j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: 1234, + } + j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"} + j.JobReference = &bq.JobReference{ + JobId: "ajob", + ProjectId: "client-project-id", + } + j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"} + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.MaxBadRecords = 1 + g.AllowJaggedRows = true + g.AllowQuotedNewlines = true + g.IgnoreUnknownValues = true + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.MaxBadRecords = 1 + j.Configuration.Load.AllowJaggedRows = true + j.Configuration.Load.AllowQuotedNewlines = true + j.Configuration.Load.IgnoreUnknownValues = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.Schema = Schema{ + stringFieldSchema(), + nestedFieldSchema(), + } + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.Schema = &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }} + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.SkipLeadingRows = 1 + g.SourceFormat = JSON + g.Encoding = UTF_8 + g.FieldDelimiter = "\t" + g.Quote = "-" + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.SkipLeadingRows = 1 + j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Load.Encoding = "UTF-8" + j.Configuration.Load.FieldDelimiter = "\t" + hyphen := "-" + j.Configuration.Load.Quote = &hyphen + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: NewGCSReference("uri"), + want: func() *bq.Job { + j := defaultLoadJob() + // Quote is left unset in GCSReference, so should be nil here. + j.Configuration.Load.Quote = nil + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.ForceZeroQuote = true + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + empty := "" + j.Configuration.Load.Quote = &empty + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *ReaderSource { + r := NewReaderSource(strings.NewReader("foo")) + r.SkipLeadingRows = 1 + r.SourceFormat = JSON + r.Encoding = UTF_8 + r.FieldDelimiter = "\t" + r.Quote = "-" + return r + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.SourceUris = nil + j.Configuration.Load.SkipLeadingRows = 1 + j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Load.Encoding = "UTF-8" + j.Configuration.Load.FieldDelimiter = "\t" + hyphen := "-" + j.Configuration.Load.Quote = &hyphen + return j + }(), + }, + } + + for i, tc := range testCases { + loader := tc.dst.LoaderFrom(tc.src) + loader.JobID = tc.jobID + loader.Location = tc.location + tc.config.Src = tc.src + tc.config.Dst = tc.dst + loader.LoadConfig = tc.config + got, _ := loader.newJob() + checkJob(t, i, got, tc.want) + + jc, err := bqToJobConfig(got.Configuration, c) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig, + cmp.AllowUnexported(Table{}, Client{}), + cmpopts.IgnoreUnexported(ReaderSource{})) + if diff != "" { + t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/nulls.go b/vendor/cloud.google.com/go/bigquery/nulls.go new file mode 100644 index 0000000..ae30455 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/nulls.go @@ -0,0 +1,299 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + "time" + + "cloud.google.com/go/civil" +) + +// NullInt64 represents a BigQuery INT64 that may be NULL. +type NullInt64 struct { + Int64 int64 + Valid bool // Valid is true if Int64 is not NULL. +} + +func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) } + +// NullString represents a BigQuery STRING that may be NULL. +type NullString struct { + StringVal string + Valid bool // Valid is true if StringVal is not NULL. +} + +func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) } + +// NullFloat64 represents a BigQuery FLOAT64 that may be NULL. +type NullFloat64 struct { + Float64 float64 + Valid bool // Valid is true if Float64 is not NULL. +} + +func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) } + +// NullBool represents a BigQuery BOOL that may be NULL. +type NullBool struct { + Bool bool + Valid bool // Valid is true if Bool is not NULL. +} + +func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) } + +// NullTimestamp represents a BigQuery TIMESTAMP that may be null. +type NullTimestamp struct { + Timestamp time.Time + Valid bool // Valid is true if Time is not NULL. +} + +func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) } + +// NullDate represents a BigQuery DATE that may be null. +type NullDate struct { + Date civil.Date + Valid bool // Valid is true if Date is not NULL. +} + +func (n NullDate) String() string { return nullstr(n.Valid, n.Date) } + +// NullTime represents a BigQuery TIME that may be null. +type NullTime struct { + Time civil.Time + Valid bool // Valid is true if Time is not NULL. +} + +func (n NullTime) String() string { + if !n.Valid { + return "" + } + return CivilTimeString(n.Time) +} + +// NullDateTime represents a BigQuery DATETIME that may be null. +type NullDateTime struct { + DateTime civil.DateTime + Valid bool // Valid is true if DateTime is not NULL. +} + +func (n NullDateTime) String() string { + if !n.Valid { + return "" + } + return CivilDateTimeString(n.DateTime) +} + +func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) } +func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) } +func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) } +func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) } +func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) } +func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) } + +func (n NullTime) MarshalJSON() ([]byte, error) { + if !n.Valid { + return jsonNull, nil + } + return []byte(`"` + CivilTimeString(n.Time) + `"`), nil +} + +func (n NullDateTime) MarshalJSON() ([]byte, error) { + if !n.Valid { + return jsonNull, nil + } + return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil +} + +func nullstr(valid bool, v interface{}) string { + if !valid { + return "NULL" + } + return fmt.Sprint(v) +} + +var jsonNull = []byte("null") + +func nulljson(valid bool, v interface{}) ([]byte, error) { + if !valid { + return jsonNull, nil + } + return json.Marshal(v) +} + +func (n *NullInt64) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Int64 = 0 + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Int64); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullFloat64) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Float64 = 0 + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Float64); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullBool) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Bool = false + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Bool); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullString) UnmarshalJSON(b []byte) error { + n.Valid = false + n.StringVal = "" + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.StringVal); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullTimestamp) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Timestamp = time.Time{} + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Timestamp); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullDate) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Date = civil.Date{} + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Date); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullTime) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Time = civil.Time{} + if bytes.Equal(b, jsonNull) { + return nil + } + + s, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + + t, err := civil.ParseTime(s) + if err != nil { + return err + } + n.Time = t + + n.Valid = true + return nil +} + +func (n *NullDateTime) UnmarshalJSON(b []byte) error { + n.Valid = false + n.DateTime = civil.DateTime{} + if bytes.Equal(b, jsonNull) { + return nil + } + + s, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + + dt, err := parseCivilDateTime(s) + if err != nil { + return err + } + n.DateTime = dt + + n.Valid = true + return nil +} + +var ( + typeOfNullInt64 = reflect.TypeOf(NullInt64{}) + typeOfNullFloat64 = reflect.TypeOf(NullFloat64{}) + typeOfNullBool = reflect.TypeOf(NullBool{}) + typeOfNullString = reflect.TypeOf(NullString{}) + typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{}) + typeOfNullDate = reflect.TypeOf(NullDate{}) + typeOfNullTime = reflect.TypeOf(NullTime{}) + typeOfNullDateTime = reflect.TypeOf(NullDateTime{}) +) + +func nullableFieldType(t reflect.Type) FieldType { + switch t { + case typeOfNullInt64: + return IntegerFieldType + case typeOfNullFloat64: + return FloatFieldType + case typeOfNullBool: + return BooleanFieldType + case typeOfNullString: + return StringFieldType + case typeOfNullTimestamp: + return TimestampFieldType + case typeOfNullDate: + return DateFieldType + case typeOfNullTime: + return TimeFieldType + case typeOfNullDateTime: + return DateTimeFieldType + default: + return "" + } +} diff --git a/vendor/cloud.google.com/go/bigquery/nulls_test.go b/vendor/cloud.google.com/go/bigquery/nulls_test.go new file mode 100644 index 0000000..87fcfb2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/nulls_test.go @@ -0,0 +1,73 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/json" + "reflect" + "testing" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" +) + +var ( + nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000} + nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime} +) + +func TestNullsJSON(t *testing.T) { + for _, test := range []struct { + in interface{} + want string + }{ + {&NullInt64{Valid: true, Int64: 3}, `3`}, + {&NullFloat64{Valid: true, Float64: 3.14}, `3.14`}, + {&NullBool{Valid: true, Bool: true}, `true`}, + {&NullString{Valid: true, StringVal: "foo"}, `"foo"`}, + {&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`}, + {&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`}, + {&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`}, + {&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`}, + + {&NullInt64{}, `null`}, + {&NullFloat64{}, `null`}, + {&NullBool{}, `null`}, + {&NullString{}, `null`}, + {&NullTimestamp{}, `null`}, + {&NullDate{}, `null`}, + {&NullTime{}, `null`}, + {&NullDateTime{}, `null`}, + } { + bytes, err := json.Marshal(test.in) + if err != nil { + t.Fatal(err) + } + if got, want := string(bytes), test.want; got != want { + t.Errorf("%#v: got %s, want %s", test.in, got, want) + } + + typ := reflect.Indirect(reflect.ValueOf(test.in)).Type() + value := reflect.New(typ).Interface() + err = json.Unmarshal(bytes, value) + if err != nil { + t.Fatal(err) + } + + if !testutil.Equal(value, test.in) { + t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/oc_test.go b/vendor/cloud.google.com/go/bigquery/oc_test.go new file mode 100644 index 0000000..5ea53ba --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/oc_test.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package bigquery + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" +) + +func TestOCTracing(t *testing.T) { + ctx := context.Background() + client := getClient(t) + defer client.Close() + + te := testutil.NewTestExporter() + defer te.Unregister() + + q := client.Query("select *") + q.Run(ctx) // Doesn't matter if we get an error; span should be created either way + + if len(te.Spans) == 0 { + t.Fatalf("Expected some spans to be created, but got %d", 0) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/params.go b/vendor/cloud.google.com/go/bigquery/params.go new file mode 100644 index 0000000..bb9fa27 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/params.go @@ -0,0 +1,346 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "errors" + "fmt" + "reflect" + "regexp" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/fields" + + bq "google.golang.org/api/bigquery/v2" +) + +var ( + // See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type. + timestampFormat = "2006-01-02 15:04:05.999999-07:00" + + // See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name + validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$") +) + +const nullableTagOption = "nullable" + +func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + name, keep, opts, err := fields.ParseStandardTag("bigquery", t) + if err != nil { + return "", false, nil, err + } + if name != "" && !validFieldName.MatchString(name) { + return "", false, nil, errInvalidFieldName + } + for _, opt := range opts { + if opt != nullableTagOption { + return "", false, nil, fmt.Errorf( + "bigquery: invalid tag option %q. The only valid option is %q", + opt, nullableTagOption) + } + } + return name, keep, opts, nil +} + +var fieldCache = fields.NewCache(bqTagParser, nil, nil) + +var ( + int64ParamType = &bq.QueryParameterType{Type: "INT64"} + float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"} + boolParamType = &bq.QueryParameterType{Type: "BOOL"} + stringParamType = &bq.QueryParameterType{Type: "STRING"} + bytesParamType = &bq.QueryParameterType{Type: "BYTES"} + dateParamType = &bq.QueryParameterType{Type: "DATE"} + timeParamType = &bq.QueryParameterType{Type: "TIME"} + dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"} + timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"} +) + +var ( + typeOfDate = reflect.TypeOf(civil.Date{}) + typeOfTime = reflect.TypeOf(civil.Time{}) + typeOfDateTime = reflect.TypeOf(civil.DateTime{}) + typeOfGoTime = reflect.TypeOf(time.Time{}) +) + +// A QueryParameter is a parameter to a query. +type QueryParameter struct { + // Name is used for named parameter mode. + // It must match the name in the query case-insensitively. + Name string + + // Value is the value of the parameter. + // + // When you create a QueryParameter to send to BigQuery, the following Go types + // are supported, with their corresponding Bigquery types: + // int, int8, int16, int32, int64, uint8, uint16, uint32: INT64 + // Note that uint, uint64 and uintptr are not supported, because + // they may contain values that cannot fit into a 64-bit signed integer. + // float32, float64: FLOAT64 + // bool: BOOL + // string: STRING + // []byte: BYTES + // time.Time: TIMESTAMP + // Arrays and slices of the above. + // Structs of the above. Only the exported fields are used. + // + // When a QueryParameter is returned inside a QueryConfig from a call to + // Job.Config: + // Integers are of type int64. + // Floating-point values are of type float64. + // Arrays are of type []interface{}, regardless of the array element type. + // Structs are of type map[string]interface{}. + Value interface{} +} + +func (p QueryParameter) toBQ() (*bq.QueryParameter, error) { + pv, err := paramValue(reflect.ValueOf(p.Value)) + if err != nil { + return nil, err + } + pt, err := paramType(reflect.TypeOf(p.Value)) + if err != nil { + return nil, err + } + return &bq.QueryParameter{ + Name: p.Name, + ParameterValue: &pv, + ParameterType: pt, + }, nil +} + +func paramType(t reflect.Type) (*bq.QueryParameterType, error) { + if t == nil { + return nil, errors.New("bigquery: nil parameter") + } + switch t { + case typeOfDate: + return dateParamType, nil + case typeOfTime: + return timeParamType, nil + case typeOfDateTime: + return dateTimeParamType, nil + case typeOfGoTime: + return timestampParamType, nil + } + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64ParamType, nil + + case reflect.Float32, reflect.Float64: + return float64ParamType, nil + + case reflect.Bool: + return boolParamType, nil + + case reflect.String: + return stringParamType, nil + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return bytesParamType, nil + } + fallthrough + + case reflect.Array: + et, err := paramType(t.Elem()) + if err != nil { + return nil, err + } + return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil + + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + break + } + t = t.Elem() + fallthrough + + case reflect.Struct: + var fts []*bq.QueryParameterTypeStructTypes + fields, err := fieldCache.Fields(t) + if err != nil { + return nil, err + } + for _, f := range fields { + pt, err := paramType(f.Type) + if err != nil { + return nil, err + } + fts = append(fts, &bq.QueryParameterTypeStructTypes{ + Name: f.Name, + Type: pt, + }) + } + return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil + } + return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t) +} + +func paramValue(v reflect.Value) (bq.QueryParameterValue, error) { + var res bq.QueryParameterValue + if !v.IsValid() { + return res, errors.New("bigquery: nil parameter") + } + t := v.Type() + switch t { + case typeOfDate: + res.Value = v.Interface().(civil.Date).String() + return res, nil + + case typeOfTime: + // civil.Time has nanosecond resolution, but BigQuery TIME only microsecond. + // (If we send nanoseconds, then when we try to read the result we get "query job + // missing destination table"). + res.Value = CivilTimeString(v.Interface().(civil.Time)) + return res, nil + + case typeOfDateTime: + res.Value = CivilDateTimeString(v.Interface().(civil.DateTime)) + return res, nil + + case typeOfGoTime: + res.Value = v.Interface().(time.Time).Format(timestampFormat) + return res, nil + } + switch t.Kind() { + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte)) + return res, nil + } + fallthrough + + case reflect.Array: + var vals []*bq.QueryParameterValue + for i := 0; i < v.Len(); i++ { + val, err := paramValue(v.Index(i)) + if err != nil { + return bq.QueryParameterValue{}, err + } + vals = append(vals, &val) + } + return bq.QueryParameterValue{ArrayValues: vals}, nil + + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t) + } + t = t.Elem() + v = v.Elem() + if !v.IsValid() { + // nil pointer becomes empty value + return res, nil + } + fallthrough + + case reflect.Struct: + fields, err := fieldCache.Fields(t) + if err != nil { + return bq.QueryParameterValue{}, err + } + res.StructValues = map[string]bq.QueryParameterValue{} + for _, f := range fields { + fv := v.FieldByIndex(f.Index) + fp, err := paramValue(fv) + if err != nil { + return bq.QueryParameterValue{}, err + } + res.StructValues[f.Name] = fp + } + return res, nil + } + // None of the above: assume a scalar type. (If it's not a valid type, + // paramType will catch the error.) + res.Value = fmt.Sprint(v.Interface()) + return res, nil +} + +func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) { + p := QueryParameter{Name: q.Name} + val, err := convertParamValue(q.ParameterValue, q.ParameterType) + if err != nil { + return QueryParameter{}, err + } + p.Value = val + return p, nil +} + +var paramTypeToFieldType = map[string]FieldType{ + int64ParamType.Type: IntegerFieldType, + float64ParamType.Type: FloatFieldType, + boolParamType.Type: BooleanFieldType, + stringParamType.Type: StringFieldType, + bytesParamType.Type: BytesFieldType, + dateParamType.Type: DateFieldType, + timeParamType.Type: TimeFieldType, +} + +// Convert a parameter value from the service to a Go value. This is similar to, but +// not quite the same as, converting data values. +func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) { + switch qtype.Type { + case "ARRAY": + if qval == nil { + return []interface{}(nil), nil + } + return convertParamArray(qval.ArrayValues, qtype.ArrayType) + case "STRUCT": + if qval == nil { + return map[string]interface{}(nil), nil + } + return convertParamStruct(qval.StructValues, qtype.StructTypes) + case "TIMESTAMP": + return time.Parse(timestampFormat, qval.Value) + case "DATETIME": + return parseCivilDateTime(qval.Value) + default: + return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type]) + } +} + +// convertParamArray converts a query parameter array value to a Go value. It +// always returns a []interface{}. +func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) { + var vals []interface{} + for _, el := range elVals { + val, err := convertParamValue(el, elType) + if err != nil { + return nil, err + } + vals = append(vals, val) + } + return vals, nil +} + +// convertParamValue converts a query parameter struct value into a Go value. It +// always returns a map[string]interface{}. +func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) { + vals := map[string]interface{}{} + for _, st := range sTypes { + if sv, ok := sVals[st.Name]; ok { + val, err := convertParamValue(&sv, st.Type) + if err != nil { + return nil, err + } + vals[st.Name] = val + } else { + vals[st.Name] = nil + } + } + return vals, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/params_test.go b/vendor/cloud.google.com/go/bigquery/params_test.go new file mode 100644 index 0000000..327b1c7 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/params_test.go @@ -0,0 +1,361 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "math" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +var scalarTests = []struct { + val interface{} // The Go value + wantVal string // paramValue's desired output + wantType *bq.QueryParameterType // paramType's desired output +}{ + {int64(0), "0", int64ParamType}, + {3.14, "3.14", float64ParamType}, + {3.14159e-87, "3.14159e-87", float64ParamType}, + {true, "true", boolParamType}, + {"string", "string", stringParamType}, + {"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType}, + {math.NaN(), "NaN", float64ParamType}, + {[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo" + {time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)), + "2016-03-20 04:22:09.000005-01:02", + timestampParamType}, + {civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType}, + {civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType}, + {civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}}, + "2016-03-20 04:05:06.789000", + dateTimeParamType}, +} + +type ( + S1 struct { + A int + B *S2 + C bool + } + S2 struct { + D string + e int + } +) + +var ( + s1 = S1{ + A: 1, + B: &S2{D: "s"}, + C: true, + } + + s1ParamType = &bq.QueryParameterType{ + Type: "STRUCT", + StructTypes: []*bq.QueryParameterTypeStructTypes{ + {Name: "A", Type: int64ParamType}, + {Name: "B", Type: &bq.QueryParameterType{ + Type: "STRUCT", + StructTypes: []*bq.QueryParameterTypeStructTypes{ + {Name: "D", Type: stringParamType}, + }, + }}, + {Name: "C", Type: boolParamType}, + }, + } + + s1ParamValue = bq.QueryParameterValue{ + StructValues: map[string]bq.QueryParameterValue{ + "A": sval("1"), + "B": bq.QueryParameterValue{ + StructValues: map[string]bq.QueryParameterValue{ + "D": sval("s"), + }, + }, + "C": sval("true"), + }, + } + + s1ParamReturnValue = map[string]interface{}{ + "A": int64(1), + "B": map[string]interface{}{"D": "s"}, + "C": true, + } +) + +func sval(s string) bq.QueryParameterValue { + return bq.QueryParameterValue{Value: s} +} + +func TestParamValueScalar(t *testing.T) { + for _, test := range scalarTests { + got, err := paramValue(reflect.ValueOf(test.val)) + if err != nil { + t.Errorf("%v: got %v, want nil", test.val, err) + continue + } + want := sval(test.wantVal) + if !testutil.Equal(got, want) { + t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want) + } + } +} + +func TestParamValueArray(t *testing.T) { + qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{ + {Value: "1"}, + {Value: "2"}, + }, + } + for _, test := range []struct { + val interface{} + want bq.QueryParameterValue + }{ + {[]int(nil), bq.QueryParameterValue{}}, + {[]int{}, bq.QueryParameterValue{}}, + {[]int{1, 2}, qpv}, + {[2]int{1, 2}, qpv}, + } { + got, err := paramValue(reflect.ValueOf(test.val)) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want) + } + } +} + +func TestParamValueStruct(t *testing.T) { + got, err := paramValue(reflect.ValueOf(s1)) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, s1ParamValue) { + t.Errorf("got %+v\nwant %+v", got, s1ParamValue) + } +} + +func TestParamValueErrors(t *testing.T) { + // paramValue lets a few invalid types through, but paramType catches them. + // Since we never call one without the other that's fine. + for _, val := range []interface{}{nil, new([]int)} { + _, err := paramValue(reflect.ValueOf(val)) + if err == nil { + t.Errorf("%v (%T): got nil, want error", val, val) + } + } +} + +func TestParamType(t *testing.T) { + for _, test := range scalarTests { + got, err := paramType(reflect.TypeOf(test.val)) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.wantType) { + t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType) + } + } + for _, test := range []struct { + val interface{} + want *bq.QueryParameterType + }{ + {uint32(32767), int64ParamType}, + {[]byte("foo"), bytesParamType}, + {[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}}, + {[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}}, + {S1{}, s1ParamType}, + } { + got, err := paramType(reflect.TypeOf(test.val)) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want) + } + } +} + +func TestParamTypeErrors(t *testing.T) { + for _, val := range []interface{}{ + nil, uint(0), new([]int), make(chan int), + } { + _, err := paramType(reflect.TypeOf(val)) + if err == nil { + t.Errorf("%v (%T): got nil, want error", val, val) + } + } +} + +func TestConvertParamValue(t *testing.T) { + // Scalars. + for _, test := range scalarTests { + pval, err := paramValue(reflect.ValueOf(test.val)) + if err != nil { + t.Fatal(err) + } + ptype, err := paramType(reflect.TypeOf(test.val)) + if err != nil { + t.Fatal(err) + } + got, err := convertParamValue(&pval, ptype) + if err != nil { + t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err) + } + if !testutil.Equal(got, test.val) { + t.Errorf("%#v: got %#v", test.val, got) + } + } + // Arrays. + for _, test := range []struct { + pval *bq.QueryParameterValue + want []interface{} + }{ + { + &bq.QueryParameterValue{}, + nil, + }, + { + &bq.QueryParameterValue{ + ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}}, + }, + []interface{}{int64(1), int64(2)}, + }, + } { + ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType} + got, err := convertParamValue(test.pval, ptype) + if err != nil { + t.Fatalf("%+v: %v", test.pval, err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want) + } + } + // Structs. + got, err := convertParamValue(&s1ParamValue, s1ParamType) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, s1ParamReturnValue) { + t.Errorf("got %+v, want %+v", got, s1ParamReturnValue) + } +} + +func TestIntegration_ScalarParam(t *testing.T) { + roundToMicros := cmp.Transformer("RoundToMicros", + func(t time.Time) time.Time { return t.Round(time.Microsecond) }) + c := getClient(t) + for _, test := range scalarTests { + gotData, gotParam, err := paramRoundTrip(c, test.val) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(gotData, test.val, roundToMicros) { + t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val) + } + if !testutil.Equal(gotParam, test.val, roundToMicros) { + t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val) + } + } +} + +func TestIntegration_OtherParam(t *testing.T) { + c := getClient(t) + for _, test := range []struct { + val interface{} + wantData interface{} + wantParam interface{} + }{ + {[]int(nil), []Value(nil), []interface{}(nil)}, + {[]int{}, []Value(nil), []interface{}(nil)}, + { + []int{1, 2}, + []Value{int64(1), int64(2)}, + []interface{}{int64(1), int64(2)}, + }, + { + [3]int{1, 2, 3}, + []Value{int64(1), int64(2), int64(3)}, + []interface{}{int64(1), int64(2), int64(3)}, + }, + { + S1{}, + []Value{int64(0), nil, false}, + map[string]interface{}{ + "A": int64(0), + "B": nil, + "C": false, + }, + }, + { + s1, + []Value{int64(1), []Value{"s"}, true}, + s1ParamReturnValue, + }, + } { + gotData, gotParam, err := paramRoundTrip(c, test.val) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(gotData, test.wantData) { + t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)", + test.val, gotData, gotData, test.wantData, test.wantData) + } + if !testutil.Equal(gotParam, test.wantParam) { + t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)", + test.val, gotParam, gotParam, test.wantParam, test.wantParam) + } + } +} + +// paramRoundTrip passes x as a query parameter to BigQuery. It returns +// the resulting data value from running the query and the parameter value from +// the returned job configuration. +func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) { + ctx := context.Background() + q := c.Query("select ?") + q.Parameters = []QueryParameter{{Value: x}} + job, err := q.Run(ctx) + if err != nil { + return nil, nil, err + } + it, err := job.Read(ctx) + if err != nil { + return nil, nil, err + } + var val []Value + err = it.Next(&val) + if err != nil { + return nil, nil, err + } + if len(val) != 1 { + return nil, nil, errors.New("wrong number of values") + } + conf, err := job.Config() + if err != nil { + return nil, nil, err + } + return val[0], conf.(*QueryConfig).Parameters[0].Value, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/query.go b/vendor/cloud.google.com/go/bigquery/query.go new file mode 100644 index 0000000..b0a473b --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query.go @@ -0,0 +1,298 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// QueryConfig holds the configuration for a query job. +type QueryConfig struct { + // Dst is the table into which the results of the query will be written. + // If this field is nil, a temporary table will be created. + Dst *Table + + // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. + Q string + + // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. + // If DefaultProjectID is set, DefaultDatasetID must also be set. + DefaultProjectID string + DefaultDatasetID string + + // TableDefinitions describes data sources outside of BigQuery. + // The map keys may be used as table names in the query string. + // + // When a QueryConfig is returned from Job.Config, the map values + // are always of type *ExternalDataConfig. + TableDefinitions map[string]ExternalData + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteEmpty. + WriteDisposition TableWriteDisposition + + // DisableQueryCache prevents results being fetched from the query cache. + // If this field is false, results are fetched from the cache if they are available. + // The query cache is a best-effort cache that is flushed whenever tables in the query are modified. + // Cached results are only available when TableID is unspecified in the query's destination Table. + // For more information, see https://cloud.google.com/bigquery/querying-data#querycaching + DisableQueryCache bool + + // DisableFlattenedResults prevents results being flattened. + // If this field is false, results from nested and repeated fields are flattened. + // DisableFlattenedResults implies AllowLargeResults + // For more information, see https://cloud.google.com/bigquery/docs/data#nested + DisableFlattenedResults bool + + // AllowLargeResults allows the query to produce arbitrarily large result tables. + // The destination must be a table. + // When using this option, queries will take longer to execute, even if the result set is small. + // For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults + AllowLargeResults bool + + // Priority specifies the priority with which to schedule the query. + // The default priority is InteractivePriority. + // For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries + Priority QueryPriority + + // MaxBillingTier sets the maximum billing tier for a Query. + // Queries that have resource usage beyond this tier will fail (without + // incurring a charge). If this field is zero, the project default will be used. + MaxBillingTier int + + // MaxBytesBilled limits the number of bytes billed for + // this job. Queries that would exceed this limit will fail (without incurring + // a charge). + // If this field is less than 1, the project default will be + // used. + MaxBytesBilled int64 + + // UseStandardSQL causes the query to use standard SQL. The default. + // Deprecated: use UseLegacySQL. + UseStandardSQL bool + + // UseLegacySQL causes the query to use legacy SQL. + UseLegacySQL bool + + // Parameters is a list of query parameters. The presence of parameters + // implies the use of standard SQL. + // If the query uses positional syntax ("?"), then no parameter may have a name. + // If the query uses named syntax ("@p"), then all parameters must have names. + // It is illegal to mix positional and named syntax. + Parameters []QueryParameter + + // TimePartitioning specifies time-based partitioning + // for the destination table. + TimePartitioning *TimePartitioning + + // The labels associated with this job. + Labels map[string]string + + // If true, don't actually run this job. A valid query will return a mostly + // empty response with some processing statistics, while an invalid query will + // return the same error it would if it wasn't a dry run. + // + // Query.Read will fail with dry-run queries. Call Query.Run instead, and then + // call LastStatus on the returned job to get statistics. Calling Status on a + // dry-run job will fail. + DryRun bool + + // Custom encryption configuration (e.g., Cloud KMS keys). + DestinationEncryptionConfig *EncryptionConfig +} + +func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) { + qconf := &bq.JobConfigurationQuery{ + Query: qc.Q, + CreateDisposition: string(qc.CreateDisposition), + WriteDisposition: string(qc.WriteDisposition), + AllowLargeResults: qc.AllowLargeResults, + Priority: string(qc.Priority), + MaximumBytesBilled: qc.MaxBytesBilled, + TimePartitioning: qc.TimePartitioning.toBQ(), + DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(), + } + if len(qc.TableDefinitions) > 0 { + qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration) + } + for name, data := range qc.TableDefinitions { + qconf.TableDefinitions[name] = data.toBQ() + } + if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" { + qconf.DefaultDataset = &bq.DatasetReference{ + DatasetId: qc.DefaultDatasetID, + ProjectId: qc.DefaultProjectID, + } + } + if tier := int64(qc.MaxBillingTier); tier > 0 { + qconf.MaximumBillingTier = &tier + } + f := false + if qc.DisableQueryCache { + qconf.UseQueryCache = &f + } + if qc.DisableFlattenedResults { + qconf.FlattenResults = &f + // DisableFlattenResults implies AllowLargeResults. + qconf.AllowLargeResults = true + } + if qc.UseStandardSQL && qc.UseLegacySQL { + return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL") + } + if len(qc.Parameters) > 0 && qc.UseLegacySQL { + return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL") + } + if qc.UseLegacySQL { + qconf.UseLegacySql = true + } else { + qconf.UseLegacySql = false + qconf.ForceSendFields = append(qconf.ForceSendFields, "UseLegacySql") + } + if qc.Dst != nil && !qc.Dst.implicitTable() { + qconf.DestinationTable = qc.Dst.toBQ() + } + for _, p := range qc.Parameters { + qp, err := p.toBQ() + if err != nil { + return nil, err + } + qconf.QueryParameters = append(qconf.QueryParameters, qp) + } + return &bq.JobConfiguration{ + Labels: qc.Labels, + DryRun: qc.DryRun, + Query: qconf, + }, nil +} + +func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) { + qq := q.Query + qc := &QueryConfig{ + Labels: q.Labels, + DryRun: q.DryRun, + Q: qq.Query, + CreateDisposition: TableCreateDisposition(qq.CreateDisposition), + WriteDisposition: TableWriteDisposition(qq.WriteDisposition), + AllowLargeResults: qq.AllowLargeResults, + Priority: QueryPriority(qq.Priority), + MaxBytesBilled: qq.MaximumBytesBilled, + UseLegacySQL: qq.UseLegacySql, + UseStandardSQL: !qq.UseLegacySql, + TimePartitioning: bqToTimePartitioning(qq.TimePartitioning), + } + if len(qq.TableDefinitions) > 0 { + qc.TableDefinitions = make(map[string]ExternalData) + } + for name, qedc := range qq.TableDefinitions { + edc, err := bqToExternalDataConfig(&qedc) + if err != nil { + return nil, err + } + qc.TableDefinitions[name] = edc + } + if qq.DefaultDataset != nil { + qc.DefaultProjectID = qq.DefaultDataset.ProjectId + qc.DefaultDatasetID = qq.DefaultDataset.DatasetId + } + if qq.MaximumBillingTier != nil { + qc.MaxBillingTier = int(*qq.MaximumBillingTier) + } + if qq.UseQueryCache != nil && !*qq.UseQueryCache { + qc.DisableQueryCache = true + } + if qq.FlattenResults != nil && !*qq.FlattenResults { + qc.DisableFlattenedResults = true + } + if qq.DestinationTable != nil { + qc.Dst = bqToTable(qq.DestinationTable, c) + } + for _, qp := range qq.QueryParameters { + p, err := bqToQueryParameter(qp) + if err != nil { + return nil, err + } + qc.Parameters = append(qc.Parameters, p) + } + return qc, nil +} + +// QueryPriority specifies a priority with which a query is to be executed. +type QueryPriority string + +const ( + BatchPriority QueryPriority = "BATCH" + InteractivePriority QueryPriority = "INTERACTIVE" +) + +// A Query queries data from a BigQuery table. Use Client.Query to create a Query. +type Query struct { + JobIDConfig + QueryConfig + client *Client +} + +// Query creates a query with string q. +// The returned Query may optionally be further configured before its Run method is called. +func (c *Client) Query(q string) *Query { + return &Query{ + client: c, + QueryConfig: QueryConfig{Q: q}, + } +} + +// Run initiates a query job. +func (q *Query) Run(ctx context.Context) (j *Job, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Query.Run") + defer func() { trace.EndSpan(ctx, err) }() + + job, err := q.newJob() + if err != nil { + return nil, err + } + j, err = q.client.insertJob(ctx, job, nil) + if err != nil { + return nil, err + } + return j, nil +} + +func (q *Query) newJob() (*bq.Job, error) { + config, err := q.QueryConfig.toBQ() + if err != nil { + return nil, err + } + return &bq.Job{ + JobReference: q.JobIDConfig.createJobRef(q.client), + Configuration: config, + }, nil +} + +// Read submits a query for execution and returns the results via a RowIterator. +// It is a shorthand for Query.Run followed by Job.Read. +func (q *Query) Read(ctx context.Context) (*RowIterator, error) { + job, err := q.Run(ctx) + if err != nil { + return nil, err + } + return job.Read(ctx) +} diff --git a/vendor/cloud.google.com/go/bigquery/query_test.go b/vendor/cloud.google.com/go/bigquery/query_test.go new file mode 100644 index 0000000..68ed63e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query_test.go @@ -0,0 +1,402 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultQueryJob() *bq.Job { + return &bq.Job{ + JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + DestinationTable: &bq.TableReference{ + ProjectId: "client-project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + Query: "query string", + DefaultDataset: &bq.DatasetReference{ + ProjectId: "def-project-id", + DatasetId: "def-dataset-id", + }, + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + } +} + +var defaultQuery = &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", +} + +func TestQuery(t *testing.T) { + defer fixRandomID("RANDOM")() + c := &Client{ + projectID: "client-project-id", + } + testCases := []struct { + dst *Table + src *QueryConfig + jobIDConfig JobIDConfig + want *bq.Job + }{ + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: defaultQuery, + want: defaultQueryJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + Labels: map[string]string{"a": "b"}, + DryRun: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Labels = map[string]string{"a": "b"} + j.Configuration.DryRun = true + j.Configuration.Query.DefaultDataset = nil + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true}, + src: &QueryConfig{Q: "query string"}, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DefaultDataset = nil + j.JobReference.JobId = "jobID-RANDOM" + return j + }(), + }, + { + dst: &Table{}, + src: defaultQuery, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DestinationTable = nil + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + TableDefinitions: map[string]ExternalData{ + "atable": func() *GCSReference { + g := NewGCSReference("uri") + g.AllowJaggedRows = true + g.AllowQuotedNewlines = true + g.Compression = Gzip + g.Encoding = UTF_8 + g.FieldDelimiter = ";" + g.IgnoreUnknownValues = true + g.MaxBadRecords = 1 + g.Quote = "'" + g.SkipLeadingRows = 2 + g.Schema = Schema{{Name: "name", Type: StringFieldType}} + return g + }(), + }, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DefaultDataset = nil + td := make(map[string]bq.ExternalDataConfiguration) + quote := "'" + td["atable"] = bq.ExternalDataConfiguration{ + Compression: "GZIP", + IgnoreUnknownValues: true, + MaxBadRecords: 1, + SourceFormat: "CSV", // must be explicitly set. + SourceUris: []string{"uri"}, + CsvOptions: &bq.CsvOptions{ + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: "UTF-8", + FieldDelimiter: ";", + SkipLeadingRows: 2, + Quote: "e, + }, + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + {Name: "name", Type: "STRING"}, + }, + }, + } + j.Configuration.Query.TableDefinitions = td + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DestinationTable.ProjectId = "project-id" + j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Query.CreateDisposition = "CREATE_NEVER" + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + DisableQueryCache: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.UseQueryCache = &f + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + AllowLargeResults: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.AllowLargeResults = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + DisableFlattenedResults: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.FlattenResults = &f + j.Configuration.Query.AllowLargeResults = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + Priority: QueryPriority("low"), + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.Priority = "low" + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + MaxBillingTier: 3, + MaxBytesBilled: 5, + }, + want: func() *bq.Job { + j := defaultQueryJob() + tier := int64(3) + j.Configuration.Query.MaximumBillingTier = &tier + j.Configuration.Query.MaximumBytesBilled = 5 + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + UseStandardSQL: true, + }, + want: defaultQueryJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + UseLegacySQL: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.UseLegacySql = true + j.Configuration.Query.ForceSendFields = nil + return j + }(), + }, + } + for i, tc := range testCases { + query := c.Query("") + query.JobIDConfig = tc.jobIDConfig + query.QueryConfig = *tc.src + query.Dst = tc.dst + got, err := query.newJob() + if err != nil { + t.Errorf("#%d: err calling query: %v", i, err) + continue + } + checkJob(t, i, got, tc.want) + + // Round-trip. + jc, err := bqToJobConfig(got.Configuration, c) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + wantConfig := query.QueryConfig + // We set AllowLargeResults to true when DisableFlattenedResults is true. + if wantConfig.DisableFlattenedResults { + wantConfig.AllowLargeResults = true + } + // A QueryConfig with neither UseXXXSQL field set is equivalent + // to one where UseStandardSQL = true. + if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL { + wantConfig.UseStandardSQL = true + } + // Treat nil and empty tables the same, and ignore the client. + tableEqual := func(t1, t2 *Table) bool { + if t1 == nil { + t1 = &Table{} + } + if t2 == nil { + t2 = &Table{} + } + return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID + } + // A table definition that is a GCSReference round-trips as an ExternalDataConfig. + // TODO(jba): see if there is a way to express this with a transformer. + gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig { + q := g.toBQ() + e, _ := bqToExternalDataConfig(&q) + return e + } + externalDataEqual := func(e1, e2 ExternalData) bool { + if r, ok := e1.(*GCSReference); ok { + e1 = gcsRefToEDC(r) + } + if r, ok := e2.(*GCSReference); ok { + e2 = gcsRefToEDC(r) + } + return cmp.Equal(e1, e2) + } + diff := testutil.Diff(jc.(*QueryConfig), &wantConfig, + cmp.Comparer(tableEqual), + cmp.Comparer(externalDataEqual), + ) + if diff != "" { + t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) + } + } +} + +func TestConfiguringQuery(t *testing.T) { + c := &Client{ + projectID: "project-id", + } + + query := c.Query("q") + query.JobID = "ajob" + query.DefaultProjectID = "def-project-id" + query.DefaultDatasetID = "def-dataset-id" + query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"} + query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"} + // Note: Other configuration fields are tested in other tests above. + // A lot of that can be consolidated once Client.Copy is gone. + + want := &bq.Job{ + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + Query: "q", + DefaultDataset: &bq.DatasetReference{ + ProjectId: "def-project-id", + DatasetId: "def-dataset-id", + }, + UseLegacySql: false, + TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"}, + DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + JobReference: &bq.JobReference{ + JobId: "ajob", + ProjectId: "project-id", + }, + } + + got, err := query.newJob() + if err != nil { + t.Fatalf("err calling Query.newJob: %v", err) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("querying: -got +want:\n%s", diff) + } +} + +func TestQueryLegacySQL(t *testing.T) { + c := &Client{projectID: "project-id"} + q := c.Query("q") + q.UseStandardSQL = true + q.UseLegacySQL = true + _, err := q.newJob() + if err == nil { + t.Error("UseStandardSQL and UseLegacySQL: got nil, want error") + } + q = c.Query("q") + q.Parameters = []QueryParameter{{Name: "p", Value: 3}} + q.UseLegacySQL = true + _, err = q.newJob() + if err == nil { + t.Error("Parameters and UseLegacySQL: got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/bigquery/read_test.go b/vendor/cloud.google.com/go/bigquery/read_test.go new file mode 100644 index 0000000..129348a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/read_test.go @@ -0,0 +1,235 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/iterator" +) + +type pageFetcherArgs struct { + table *Table + schema Schema + startIndex uint64 + pageSize int64 + pageToken string +} + +// pageFetcherReadStub services read requests by returning data from an in-memory list of values. +type pageFetcherReadStub struct { + // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. + values [][][]Value // contains pages / rows / columns. + pageTokens map[string]string // maps incoming page token to returned page token. + + // arguments are recorded for later inspection. + calls []pageFetcherArgs +} + +func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) { + s.calls = append(s.calls, + pageFetcherArgs{t, schema, startIndex, pageSize, pageToken}) + result := &fetchPageResult{ + pageToken: s.pageTokens[pageToken], + rows: s.values[0], + } + s.values = s.values[1:] + return result, nil +} + +func waitForQueryStub(context.Context, string) (Schema, error) { + return nil, nil +} + +func TestRead(t *testing.T) { + // The data for the service stub to return is populated for each test case in the testCases for loop. + ctx := context.Background() + c := &Client{projectID: "project-id"} + pf := &pageFetcherReadStub{} + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + c: c, + config: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + DestinationTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + }, + }, + } + + for _, readFunc := range []func() *RowIterator{ + func() *RowIterator { + return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage) + }, + func() *RowIterator { + it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage) + if err != nil { + t.Fatal(err) + } + return it + }, + } { + testCases := []struct { + data [][][]Value + pageTokens map[string]string + want [][]Value + }{ + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": "a", "a": ""}, + want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, + }, + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": ""}, // no more pages after first one. + want: [][]Value{{1, 2}, {11, 12}}, + }, + } + for _, tc := range testCases { + pf.values = tc.data + pf.pageTokens = tc.pageTokens + if got, ok := collectValues(t, readFunc()); ok { + if !testutil.Equal(got, tc.want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) + } + } + } + } +} + +func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) { + var got [][]Value + for { + var vals []Value + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("err calling Next: %v", err) + return nil, false + } + got = append(got, vals) + } + return got, true +} + +func TestNoMoreValues(t *testing.T) { + c := &Client{projectID: "project-id"} + pf := &pageFetcherReadStub{ + values: [][][]Value{{{1, 2}, {11, 12}}}, + } + it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage) + var vals []Value + // We expect to retrieve two values and then fail on the next attempt. + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + if err := it.Next(&vals); err != iterator.Done { + t.Fatalf("Next: got: %v: want: iterator.Done", err) + } +} + +var errBang = errors.New("bang!") + +func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) { + return nil, errBang +} + +func TestReadError(t *testing.T) { + // test that service read errors are propagated back to the caller. + c := &Client{projectID: "project-id"} + it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage) + var vals []Value + if err := it.Next(&vals); err != errBang { + t.Fatalf("Get: got: %v: want: %v", err, errBang) + } +} + +func TestReadTabledataOptions(t *testing.T) { + // test that read options are propagated. + s := &pageFetcherReadStub{ + values: [][][]Value{{{1, 2}}}, + } + c := &Client{projectID: "project-id"} + tr := c.Dataset("dataset-id").Table("table-id") + it := tr.read(context.Background(), s.fetchPage) + it.PageInfo().MaxSize = 5 + var vals []Value + if err := it.Next(&vals); err != nil { + t.Fatal(err) + } + want := []pageFetcherArgs{{ + table: tr, + pageSize: 5, + pageToken: "", + }} + if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" { + t.Errorf("reading (got=-, want=+):\n%s", diff) + } +} + +func TestReadQueryOptions(t *testing.T) { + // test that read options are propagated. + c := &Client{projectID: "project-id"} + pf := &pageFetcherReadStub{ + values: [][][]Value{{{1, 2}}}, + } + tr := &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + } + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + c: c, + config: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{DestinationTable: tr}, + }, + } + it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage) + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + it.PageInfo().MaxSize = 5 + var vals []Value + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + + want := []pageFetcherArgs{{ + table: bqToTable(tr, c), + pageSize: 5, + pageToken: "", + }} + if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) { + t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/schema.go b/vendor/cloud.google.com/go/bigquery/schema.go new file mode 100644 index 0000000..7371f84 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/schema.go @@ -0,0 +1,387 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "reflect" + + "cloud.google.com/go/internal/atomiccache" + bq "google.golang.org/api/bigquery/v2" +) + +// Schema describes the fields in a table or query result. +type Schema []*FieldSchema + +type FieldSchema struct { + // The field name. + // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), + // and must start with a letter or underscore. + // The maximum length is 128 characters. + Name string + + // A description of the field. The maximum length is 16,384 characters. + Description string + + // Whether the field may contain multiple values. + Repeated bool + // Whether the field is required. Ignored if Repeated is true. + Required bool + + // The field data type. If Type is Record, then this field contains a nested schema, + // which is described by Schema. + Type FieldType + // Describes the nested schema if Type is set to Record. + Schema Schema +} + +func (fs *FieldSchema) toBQ() *bq.TableFieldSchema { + tfs := &bq.TableFieldSchema{ + Description: fs.Description, + Name: fs.Name, + Type: string(fs.Type), + } + + if fs.Repeated { + tfs.Mode = "REPEATED" + } else if fs.Required { + tfs.Mode = "REQUIRED" + } // else leave as default, which is interpreted as NULLABLE. + + for _, f := range fs.Schema { + tfs.Fields = append(tfs.Fields, f.toBQ()) + } + + return tfs +} + +func (s Schema) toBQ() *bq.TableSchema { + var fields []*bq.TableFieldSchema + for _, f := range s { + fields = append(fields, f.toBQ()) + } + return &bq.TableSchema{Fields: fields} +} + +func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { + fs := &FieldSchema{ + Description: tfs.Description, + Name: tfs.Name, + Repeated: tfs.Mode == "REPEATED", + Required: tfs.Mode == "REQUIRED", + Type: FieldType(tfs.Type), + } + + for _, f := range tfs.Fields { + fs.Schema = append(fs.Schema, bqToFieldSchema(f)) + } + return fs +} + +func bqToSchema(ts *bq.TableSchema) Schema { + if ts == nil { + return nil + } + var s Schema + for _, f := range ts.Fields { + s = append(s, bqToFieldSchema(f)) + } + return s +} + +type FieldType string + +const ( + StringFieldType FieldType = "STRING" + BytesFieldType FieldType = "BYTES" + IntegerFieldType FieldType = "INTEGER" + FloatFieldType FieldType = "FLOAT" + BooleanFieldType FieldType = "BOOLEAN" + TimestampFieldType FieldType = "TIMESTAMP" + RecordFieldType FieldType = "RECORD" + DateFieldType FieldType = "DATE" + TimeFieldType FieldType = "TIME" + DateTimeFieldType FieldType = "DATETIME" +) + +var ( + errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct") + errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct") + errInvalidFieldName = errors.New("bigquery: invalid name of field in struct") + errBadNullable = errors.New(`bigquery: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`) +) + +var typeOfByteSlice = reflect.TypeOf([]byte{}) + +// InferSchema tries to derive a BigQuery schema from the supplied struct value. +// Each exported struct field is mapped to a field in the schema. +// +// The following BigQuery types are inferred from the corresponding Go types. +// (This is the same mapping as that used for RowIterator.Next.) Fields inferred +// from these types are marked required (non-nullable). +// +// STRING string +// BOOL bool +// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 +// FLOAT float32, float64 +// BYTES []byte +// TIMESTAMP time.Time +// DATE civil.Date +// TIME civil.Time +// DATETIME civil.DateTime +// +// A Go slice or array type is inferred to be a BigQuery repeated field of the +// element type. The element type must be one of the above listed types. +// +// Nullable fields are inferred from the NullXXX types, declared in this package: +// +// STRING NullString +// BOOL NullBool +// INTEGER NullInt64 +// FLOAT NullFloat64 +// TIMESTAMP NullTimestamp +// DATE NullDate +// TIME NullTime +// DATETIME NullDateTime + +// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below). +// +// A struct field that is of struct type is inferred to be a required field of type +// RECORD with a schema inferred recursively. For backwards compatibility, a field of +// type pointer to struct is also inferred to be required. To get a nullable RECORD +// field, use the "nullable" tag (see below). +// +// InferSchema returns an error if any of the examined fields is of type uint, +// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future +// versions may handle these cases without error. +// +// Recursively defined structs are also disallowed. +// +// Struct fields may be tagged in a way similar to the encoding/json package. +// A tag of the form +// bigquery:"name" +// uses "name" instead of the struct field name as the BigQuery field name. +// A tag of the form +// bigquery:"-" +// omits the field from the inferred schema. +// The "nullable" option marks the field as nullable (not required). It is only +// needed for []byte and pointer-to-struct fields, and cannot appear on other +// fields. In this example, the Go name of the field is retained: +// bigquery:",nullable" +func InferSchema(st interface{}) (Schema, error) { + return inferSchemaReflectCached(reflect.TypeOf(st)) +} + +// TODO(jba): replace with sync.Map for Go 1.9. +var schemaCache atomiccache.Cache + +type cacheVal struct { + schema Schema + err error +} + +func inferSchemaReflectCached(t reflect.Type) (Schema, error) { + cv := schemaCache.Get(t, func() interface{} { + s, err := inferSchemaReflect(t) + return cacheVal{s, err} + }).(cacheVal) + return cv.schema, cv.err +} + +func inferSchemaReflect(t reflect.Type) (Schema, error) { + rec, err := hasRecursiveType(t, nil) + if err != nil { + return nil, err + } + if rec { + return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t) + } + return inferStruct(t) +} + +func inferStruct(t reflect.Type) (Schema, error) { + switch t.Kind() { + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + return nil, errNoStruct + } + t = t.Elem() + fallthrough + + case reflect.Struct: + return inferFields(t) + default: + return nil, errNoStruct + } +} + +// inferFieldSchema infers the FieldSchema for a Go type +func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) { + // Only []byte and struct pointers can be tagged nullable. + if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) { + return nil, errBadNullable + } + switch rt { + case typeOfByteSlice: + return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil + case typeOfGoTime: + return &FieldSchema{Required: true, Type: TimestampFieldType}, nil + case typeOfDate: + return &FieldSchema{Required: true, Type: DateFieldType}, nil + case typeOfTime: + return &FieldSchema{Required: true, Type: TimeFieldType}, nil + case typeOfDateTime: + return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil + } + if ft := nullableFieldType(rt); ft != "" { + return &FieldSchema{Required: false, Type: ft}, nil + } + if isSupportedIntType(rt) || isSupportedUintType(rt) { + return &FieldSchema{Required: true, Type: IntegerFieldType}, nil + } + switch rt.Kind() { + case reflect.Slice, reflect.Array: + et := rt.Elem() + if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) { + // Multi dimensional slices/arrays are not supported by BigQuery + return nil, errUnsupportedFieldType + } + if nullableFieldType(et) != "" { + // Repeated nullable types are not supported by BigQuery. + return nil, errUnsupportedFieldType + } + f, err := inferFieldSchema(et, false) + if err != nil { + return nil, err + } + f.Repeated = true + f.Required = false + return f, nil + case reflect.Ptr: + if rt.Elem().Kind() != reflect.Struct { + return nil, errUnsupportedFieldType + } + fallthrough + case reflect.Struct: + nested, err := inferStruct(rt) + if err != nil { + return nil, err + } + return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil + case reflect.String: + return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil + case reflect.Bool: + return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil + case reflect.Float32, reflect.Float64: + return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil + default: + return nil, errUnsupportedFieldType + } +} + +// inferFields extracts all exported field types from struct type. +func inferFields(rt reflect.Type) (Schema, error) { + var s Schema + fields, err := fieldCache.Fields(rt) + if err != nil { + return nil, err + } + for _, field := range fields { + var nullable bool + for _, opt := range field.ParsedTag.([]string) { + if opt == nullableTagOption { + nullable = true + break + } + } + f, err := inferFieldSchema(field.Type, nullable) + if err != nil { + return nil, err + } + f.Name = field.Name + s = append(s, f) + } + return s, nil +} + +// isSupportedIntType reports whether t is an int type that can be properly +// represented by the BigQuery INTEGER/INT64 type. +func isSupportedIntType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + default: + return false + } +} + +// isSupportedIntType reports whether t is a uint type that can be properly +// represented by the BigQuery INTEGER/INT64 type. +func isSupportedUintType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return true + default: + return false + } +} + +// typeList is a linked list of reflect.Types. +type typeList struct { + t reflect.Type + next *typeList +} + +func (l *typeList) has(t reflect.Type) bool { + for l != nil { + if l.t == t { + return true + } + l = l.next + } + return false +} + +// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly, +// via exported fields. (Schema inference ignores unexported fields.) +func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) { + for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return false, nil + } + if seen.has(t) { + return true, nil + } + fields, err := fieldCache.Fields(t) + if err != nil { + return false, err + } + seen = &typeList{t, seen} + // Because seen is a linked list, additions to it from one field's + // recursive call will not affect the value for subsequent fields' calls. + for _, field := range fields { + ok, err := hasRecursiveType(field.Type, seen) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + return false, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/schema_test.go b/vendor/cloud.google.com/go/bigquery/schema_test.go new file mode 100644 index 0000000..e30e2c3 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/schema_test.go @@ -0,0 +1,897 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func (fs *FieldSchema) GoString() string { + if fs == nil { + return "" + } + + return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}", + fs.Name, + fs.Description, + fs.Repeated, + fs.Required, + fs.Type, + fmt.Sprintf("%#v", fs.Schema), + ) +} + +func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Description: desc, + Name: name, + Mode: mode, + Type: typ, + } +} + +func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema { + return &FieldSchema{ + Description: desc, + Name: name, + Repeated: repeated, + Required: required, + Type: FieldType(typ), + } +} + +func TestSchemaConversion(t *testing.T) { + testCases := []struct { + schema Schema + bqSchema *bq.TableSchema + }{ + { + // required + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, true), + }, + }, + { + // repeated + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", true, false), + }, + }, + { + // nullable, string + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, false), + }, + }, + { + // integer + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "INTEGER", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "INTEGER", false, false), + }, + }, + { + // float + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "FLOAT", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "FLOAT", false, false), + }, + }, + { + // boolean + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "BOOLEAN", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "BOOLEAN", false, false), + }, + }, + { + // timestamp + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "TIMESTAMP", false, false), + }, + }, + { + // civil times + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "f1", "TIME", ""), + bqTableFieldSchema("desc", "f2", "DATE", ""), + bqTableFieldSchema("desc", "f3", "DATETIME", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "f1", "TIME", false, false), + fieldSchema("desc", "f2", "DATE", false, false), + fieldSchema("desc", "f3", "DATETIME", false, false), + }, + }, + { + // nested + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + { + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Mode: "REQUIRED", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("inner field", "inner", "STRING", ""), + }, + }, + }, + }, + schema: Schema{ + &FieldSchema{ + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Required: true, + Type: "RECORD", + Schema: Schema{ + { + Description: "inner field", + Name: "inner", + Type: "STRING", + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + bqSchema := tc.schema.toBQ() + if !testutil.Equal(bqSchema, tc.bqSchema) { + t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", + pretty.Value(bqSchema), pretty.Value(tc.bqSchema)) + } + schema := bqToSchema(tc.bqSchema) + if !testutil.Equal(schema, tc.schema) { + t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) + } + } +} + +type allStrings struct { + String string + ByteSlice []byte +} + +type allSignedIntegers struct { + Int64 int64 + Int32 int32 + Int16 int16 + Int8 int8 + Int int +} + +type allUnsignedIntegers struct { + Uint32 uint32 + Uint16 uint16 + Uint8 uint8 +} + +type allFloat struct { + Float64 float64 + Float32 float32 + // NOTE: Complex32 and Complex64 are unsupported by BigQuery +} + +type allBoolean struct { + Bool bool +} + +type allTime struct { + Timestamp time.Time + Time civil.Time + Date civil.Date + DateTime civil.DateTime +} + +func reqField(name, typ string) *FieldSchema { + return &FieldSchema{ + Name: name, + Type: FieldType(typ), + Required: true, + } +} + +func optField(name, typ string) *FieldSchema { + return &FieldSchema{ + Name: name, + Type: FieldType(typ), + Required: false, + } +} + +func TestSimpleInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: allSignedIntegers{}, + want: Schema{ + reqField("Int64", "INTEGER"), + reqField("Int32", "INTEGER"), + reqField("Int16", "INTEGER"), + reqField("Int8", "INTEGER"), + reqField("Int", "INTEGER"), + }, + }, + { + in: allUnsignedIntegers{}, + want: Schema{ + reqField("Uint32", "INTEGER"), + reqField("Uint16", "INTEGER"), + reqField("Uint8", "INTEGER"), + }, + }, + { + in: allFloat{}, + want: Schema{ + reqField("Float64", "FLOAT"), + reqField("Float32", "FLOAT"), + }, + }, + { + in: allBoolean{}, + want: Schema{ + reqField("Bool", "BOOLEAN"), + }, + }, + { + in: &allBoolean{}, + want: Schema{ + reqField("Bool", "BOOLEAN"), + }, + }, + { + in: allTime{}, + want: Schema{ + reqField("Timestamp", "TIMESTAMP"), + reqField("Time", "TIME"), + reqField("Date", "DATE"), + reqField("DateTime", "DATETIME"), + }, + }, + { + in: allStrings{}, + want: Schema{ + reqField("String", "STRING"), + reqField("ByteSlice", "BYTES"), + }, + }, + } + for _, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) + } + if !testutil.Equal(got, tc.want) { + t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type containsNested struct { + hidden string + NotNested int + Nested struct { + Inside int + } +} + +type containsDoubleNested struct { + NotNested int + Nested struct { + InsideNested struct { + Inside int + } + } +} + +type ptrNested struct { + Ptr *struct{ Inside int } +} + +type dup struct { // more than one field of the same struct type + A, B allBoolean +} + +func TestNestedInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: containsNested{}, + want: Schema{ + reqField("NotNested", "INTEGER"), + &FieldSchema{ + Name: "Nested", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + { + in: containsDoubleNested{}, + want: Schema{ + reqField("NotNested", "INTEGER"), + &FieldSchema{ + Name: "Nested", + Required: true, + Type: "RECORD", + Schema: Schema{ + { + Name: "InsideNested", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + }, + }, + { + in: ptrNested{}, + want: Schema{ + &FieldSchema{ + Name: "Ptr", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + { + in: dup{}, + want: Schema{ + &FieldSchema{ + Name: "A", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Bool", "BOOLEAN")}, + }, + &FieldSchema{ + Name: "B", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Bool", "BOOLEAN")}, + }, + }, + }, + } + + for _, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) + } + if !testutil.Equal(got, tc.want) { + t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type repeated struct { + NotRepeated []byte + RepeatedByteSlice [][]byte + Slice []int + Array [5]bool +} + +type nestedRepeated struct { + NotRepeated int + Repeated []struct { + Inside int + } + RepeatedPtr []*struct{ Inside int } +} + +func repField(name, typ string) *FieldSchema { + return &FieldSchema{ + Name: name, + Type: FieldType(typ), + Repeated: true, + } +} + +func TestRepeatedInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: repeated{}, + want: Schema{ + reqField("NotRepeated", "BYTES"), + repField("RepeatedByteSlice", "BYTES"), + repField("Slice", "INTEGER"), + repField("Array", "BOOLEAN"), + }, + }, + { + in: nestedRepeated{}, + want: Schema{ + reqField("NotRepeated", "INTEGER"), + { + Name: "Repeated", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + { + Name: "RepeatedPtr", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + } + + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !testutil.Equal(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type allNulls struct { + A NullInt64 + B NullFloat64 + C NullBool + D NullString + E NullTimestamp + F NullTime + G NullDate + H NullDateTime +} + +func TestNullInference(t *testing.T) { + got, err := InferSchema(allNulls{}) + if err != nil { + t.Fatal(err) + } + want := Schema{ + optField("A", "INTEGER"), + optField("B", "FLOAT"), + optField("C", "BOOLEAN"), + optField("D", "STRING"), + optField("E", "TIMESTAMP"), + optField("F", "TIME"), + optField("G", "DATE"), + optField("H", "DATETIME"), + } + if diff := testutil.Diff(got, want); diff != "" { + t.Error(diff) + } +} + +type Embedded struct { + Embedded int +} + +type embedded struct { + Embedded2 int +} + +type nestedEmbedded struct { + Embedded + embedded +} + +func TestEmbeddedInference(t *testing.T) { + got, err := InferSchema(nestedEmbedded{}) + if err != nil { + t.Fatal(err) + } + want := Schema{ + reqField("Embedded", "INTEGER"), + reqField("Embedded2", "INTEGER"), + } + if !testutil.Equal(got, want) { + t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want)) + } +} + +func TestRecursiveInference(t *testing.T) { + type List struct { + Val int + Next *List + } + + _, err := InferSchema(List{}) + if err == nil { + t.Fatal("got nil, want error") + } +} + +type withTags struct { + NoTag int + ExcludeTag int `bigquery:"-"` + SimpleTag int `bigquery:"simple_tag"` + UnderscoreTag int `bigquery:"_id"` + MixedCase int `bigquery:"MIXEDcase"` + Nullable []byte `bigquery:",nullable"` +} + +type withTagsNested struct { + Nested withTags `bigquery:"nested"` + NestedAnonymous struct { + ExcludeTag int `bigquery:"-"` + Inside int `bigquery:"inside"` + } `bigquery:"anon"` + PNested *struct{ X int } // not nullable, for backwards compatibility + PNestedNullable *struct{ X int } `bigquery:",nullable"` +} + +type withTagsRepeated struct { + Repeated []withTags `bigquery:"repeated"` + RepeatedAnonymous []struct { + ExcludeTag int `bigquery:"-"` + Inside int `bigquery:"inside"` + } `bigquery:"anon"` +} + +type withTagsEmbedded struct { + withTags +} + +var withTagsSchema = Schema{ + reqField("NoTag", "INTEGER"), + reqField("simple_tag", "INTEGER"), + reqField("_id", "INTEGER"), + reqField("MIXEDcase", "INTEGER"), + optField("Nullable", "BYTES"), +} + +func TestTagInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: withTags{}, + want: withTagsSchema, + }, + { + in: withTagsNested{}, + want: Schema{ + &FieldSchema{ + Name: "nested", + Required: true, + Type: "RECORD", + Schema: withTagsSchema, + }, + &FieldSchema{ + Name: "anon", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("inside", "INTEGER")}, + }, + &FieldSchema{ + Name: "PNested", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("X", "INTEGER")}, + }, + &FieldSchema{ + Name: "PNestedNullable", + Required: false, + Type: "RECORD", + Schema: Schema{reqField("X", "INTEGER")}, + }, + }, + }, + { + in: withTagsRepeated{}, + want: Schema{ + &FieldSchema{ + Name: "repeated", + Repeated: true, + Type: "RECORD", + Schema: withTagsSchema, + }, + &FieldSchema{ + Name: "anon", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("inside", "INTEGER")}, + }, + }, + }, + { + in: withTagsEmbedded{}, + want: withTagsSchema, + }, + } + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !testutil.Equal(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +func TestTagInferenceErrors(t *testing.T) { + testCases := []struct { + in interface{} + err error + }{ + { + in: struct { + LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupporedStartChar int `bigquery:"øab"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupportedEndChar int `bigquery:"abø"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupportedMiddleChar int `bigquery:"aøb"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + StartInt int `bigquery:"1abc"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + Hyphens int `bigquery:"a-b"` + }{}, + err: errInvalidFieldName, + }, + } + for i, tc := range testCases { + want := tc.err + _, got := InferSchema(tc.in) + if got != want { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want) + } + } + + _, err := InferSchema(struct { + X int `bigquery:",optional"` + }{}) + if err == nil { + t.Error("got nil, want error") + } +} + +func TestSchemaErrors(t *testing.T) { + testCases := []struct { + in interface{} + err error + }{ + { + in: []byte{}, + err: errNoStruct, + }, + { + in: new(int), + err: errNoStruct, + }, + { + in: struct{ Uint uint }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Uint64 uint64 }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Uintptr uintptr }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Complex complex64 }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Map map[string]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Chan chan bool }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Ptr *int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Interface interface{} }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ MultiDimensional [][]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ MultiDimensional [][][]byte }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ SliceOfPointer []*int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ SliceOfNull []NullInt64 }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ ChanSlice []chan bool }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ NestedChan struct{ Chan []chan bool } }{}, + err: errUnsupportedFieldType, + }, + { + in: struct { + X int `bigquery:",nullable"` + }{}, + err: errBadNullable, + }, + { + in: struct { + X bool `bigquery:",nullable"` + }{}, + err: errBadNullable, + }, + { + in: struct { + X struct{ N int } `bigquery:",nullable"` + }{}, + err: errBadNullable, + }, + { + in: struct { + X []int `bigquery:",nullable"` + }{}, + err: errBadNullable, + }, + { + in: struct{ X *[]byte }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ X *[]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ X *int }{}, + err: errUnsupportedFieldType, + }, + } + for _, tc := range testCases { + want := tc.err + _, got := InferSchema(tc.in) + if got != want { + t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want) + } + } +} + +func TestHasRecursiveType(t *testing.T) { + type ( + nonStruct int + nonRec struct{ A string } + dup struct{ A, B nonRec } + rec struct { + A int + B *rec + } + recUnexported struct { + A int + b *rec + } + hasRec struct { + A int + R *rec + } + recSlicePointer struct { + A []*recSlicePointer + } + ) + for _, test := range []struct { + in interface{} + want bool + }{ + {nonStruct(0), false}, + {nonRec{}, false}, + {dup{}, false}, + {rec{}, true}, + {recUnexported{}, false}, + {hasRec{}, true}, + {&recSlicePointer{}, true}, + } { + got, err := hasRecursiveType(reflect.TypeOf(test.in), nil) + if err != nil { + t.Fatal(err) + } + if got != test.want { + t.Errorf("%T: got %t, want %t", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/table.go b/vendor/cloud.google.com/go/bigquery/table.go new file mode 100644 index 0000000..7707b29 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/table.go @@ -0,0 +1,531 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "time" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + + "cloud.google.com/go/internal/optional" + bq "google.golang.org/api/bigquery/v2" +) + +// A Table is a reference to a BigQuery table. +type Table struct { + // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. + // In this case the result will be stored in an ephemeral table. + ProjectID string + DatasetID string + // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + // The maximum length is 1,024 characters. + TableID string + + c *Client +} + +// TableMetadata contains information about a BigQuery table. +type TableMetadata struct { + // The following fields can be set when creating a table. + + // The user-friendly name for the table. + Name string + + // The user-friendly description of the table. + Description string + + // The table schema. If provided on create, ViewQuery must be empty. + Schema Schema + + // The query to use for a view. If provided on create, Schema must be nil. + ViewQuery string + + // Use Legacy SQL for the view query. + // At most one of UseLegacySQL and UseStandardSQL can be true. + UseLegacySQL bool + + // Use Legacy SQL for the view query. The default. + // At most one of UseLegacySQL and UseStandardSQL can be true. + // Deprecated: use UseLegacySQL. + UseStandardSQL bool + + // If non-nil, the table is partitioned by time. + TimePartitioning *TimePartitioning + + // The time when this table expires. If not set, the table will persist + // indefinitely. Expired tables will be deleted and their storage reclaimed. + ExpirationTime time.Time + + // User-provided labels. + Labels map[string]string + + // Information about a table stored outside of BigQuery. + ExternalDataConfig *ExternalDataConfig + + // Custom encryption configuration (e.g., Cloud KMS keys). + EncryptionConfig *EncryptionConfig + + // All the fields below are read-only. + + FullID string // An opaque ID uniquely identifying the table. + Type TableType + CreationTime time.Time + LastModifiedTime time.Time + + // The size of the table in bytes. + // This does not include data that is being buffered during a streaming insert. + NumBytes int64 + + // The number of rows of data in this table. + // This does not include data that is being buffered during a streaming insert. + NumRows uint64 + + // Contains information regarding this table's streaming buffer, if one is + // present. This field will be nil if the table is not being streamed to or if + // there is no data in the streaming buffer. + StreamingBuffer *StreamingBuffer + + // ETag is the ETag obtained when reading metadata. Pass it to Table.Update to + // ensure that the metadata hasn't changed since it was read. + ETag string +} + +// TableCreateDisposition specifies the circumstances under which destination table will be created. +// Default is CreateIfNeeded. +type TableCreateDisposition string + +const ( + // CreateIfNeeded will create the table if it does not already exist. + // Tables are created atomically on successful completion of a job. + CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" + + // CreateNever ensures the table must already exist and will not be + // automatically created. + CreateNever TableCreateDisposition = "CREATE_NEVER" +) + +// TableWriteDisposition specifies how existing data in a destination table is treated. +// Default is WriteAppend. +type TableWriteDisposition string + +const ( + // WriteAppend will append to any existing data in the destination table. + // Data is appended atomically on successful completion of a job. + WriteAppend TableWriteDisposition = "WRITE_APPEND" + + // WriteTruncate overrides the existing data in the destination table. + // Data is overwritten atomically on successful completion of a job. + WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" + + // WriteEmpty fails writes if the destination table already contains data. + WriteEmpty TableWriteDisposition = "WRITE_EMPTY" +) + +// TableType is the type of table. +type TableType string + +const ( + RegularTable TableType = "TABLE" + ViewTable TableType = "VIEW" + ExternalTable TableType = "EXTERNAL" +) + +// TimePartitioning describes the time-based date partitioning on a table. +// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables. +type TimePartitioning struct { + // The amount of time to keep the storage for a partition. + // If the duration is empty (0), the data in the partitions do not expire. + Expiration time.Duration + + // If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the + // table is partitioned by this field. The field must be a top-level TIMESTAMP or + // DATE field. Its mode must be NULLABLE or REQUIRED. + Field string +} + +func (p *TimePartitioning) toBQ() *bq.TimePartitioning { + if p == nil { + return nil + } + return &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: int64(p.Expiration / time.Millisecond), + Field: p.Field, + } +} + +func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning { + if q == nil { + return nil + } + return &TimePartitioning{ + Expiration: time.Duration(q.ExpirationMs) * time.Millisecond, + Field: q.Field, + } +} + +// EncryptionConfig configures customer-managed encryption on tables. +type EncryptionConfig struct { + // Describes the Cloud KMS encryption key that will be used to protect + // destination BigQuery table. The BigQuery Service Account associated with your + // project requires access to this encryption key. + KMSKeyName string +} + +func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration { + if e == nil { + return nil + } + return &bq.EncryptionConfiguration{ + KmsKeyName: e.KMSKeyName, + } +} + +func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig { + if q == nil { + return nil + } + return &EncryptionConfig{ + KMSKeyName: q.KmsKeyName, + } +} + +// StreamingBuffer holds information about the streaming buffer. +type StreamingBuffer struct { + // A lower-bound estimate of the number of bytes currently in the streaming + // buffer. + EstimatedBytes uint64 + + // A lower-bound estimate of the number of rows currently in the streaming + // buffer. + EstimatedRows uint64 + + // The time of the oldest entry in the streaming buffer. + OldestEntryTime time.Time +} + +func (t *Table) toBQ() *bq.TableReference { + return &bq.TableReference{ + ProjectId: t.ProjectID, + DatasetId: t.DatasetID, + TableId: t.TableID, + } +} + +// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. +func (t *Table) FullyQualifiedName() string { + return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) +} + +// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. +func (t *Table) implicitTable() bool { + return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" +} + +// Create creates a table in the BigQuery service. +// Pass in a TableMetadata value to configure the table. +// If tm.View.Query is non-empty, the created table will be of type VIEW. +// Expiration can only be set during table creation. +// After table creation, a view can be modified only if its table was initially created +// with a view. +func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Create") + defer func() { trace.EndSpan(ctx, err) }() + + table, err := tm.toBQ() + if err != nil { + return err + } + table.TableReference = &bq.TableReference{ + ProjectId: t.ProjectID, + DatasetId: t.DatasetID, + TableId: t.TableID, + } + req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx) + setClientHeader(req.Header()) + _, err = req.Do() + return err +} + +func (tm *TableMetadata) toBQ() (*bq.Table, error) { + t := &bq.Table{} + if tm == nil { + return t, nil + } + if tm.Schema != nil && tm.ViewQuery != "" { + return nil, errors.New("bigquery: provide Schema or ViewQuery, not both") + } + t.FriendlyName = tm.Name + t.Description = tm.Description + t.Labels = tm.Labels + if tm.Schema != nil { + t.Schema = tm.Schema.toBQ() + } + if tm.ViewQuery != "" { + if tm.UseStandardSQL && tm.UseLegacySQL { + return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL") + } + t.View = &bq.ViewDefinition{Query: tm.ViewQuery} + if tm.UseLegacySQL { + t.View.UseLegacySql = true + } else { + t.View.UseLegacySql = false + t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql") + } + } else if tm.UseLegacySQL || tm.UseStandardSQL { + return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery") + } + t.TimePartitioning = tm.TimePartitioning.toBQ() + if !tm.ExpirationTime.IsZero() { + t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6 + } + if tm.ExternalDataConfig != nil { + edc := tm.ExternalDataConfig.toBQ() + t.ExternalDataConfiguration = &edc + } + t.EncryptionConfiguration = tm.EncryptionConfig.toBQ() + if tm.FullID != "" { + return nil, errors.New("cannot set FullID on create") + } + if tm.Type != "" { + return nil, errors.New("cannot set Type on create") + } + if !tm.CreationTime.IsZero() { + return nil, errors.New("cannot set CreationTime on create") + } + if !tm.LastModifiedTime.IsZero() { + return nil, errors.New("cannot set LastModifiedTime on create") + } + if tm.NumBytes != 0 { + return nil, errors.New("cannot set NumBytes on create") + } + if tm.NumRows != 0 { + return nil, errors.New("cannot set NumRows on create") + } + if tm.StreamingBuffer != nil { + return nil, errors.New("cannot set StreamingBuffer on create") + } + if tm.ETag != "" { + return nil, errors.New("cannot set ETag on create") + } + return t, nil +} + +// Metadata fetches the metadata for the table. +func (t *Table) Metadata(ctx context.Context) (md *TableMetadata, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Metadata") + defer func() { trace.EndSpan(ctx, err) }() + + req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx) + setClientHeader(req.Header()) + var table *bq.Table + err = runWithRetry(ctx, func() (err error) { + table, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return bqToTableMetadata(table) +} + +func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) { + md := &TableMetadata{ + Description: t.Description, + Name: t.FriendlyName, + Type: TableType(t.Type), + FullID: t.Id, + Labels: t.Labels, + NumBytes: t.NumBytes, + NumRows: t.NumRows, + ExpirationTime: unixMillisToTime(t.ExpirationTime), + CreationTime: unixMillisToTime(t.CreationTime), + LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)), + ETag: t.Etag, + EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration), + } + if t.Schema != nil { + md.Schema = bqToSchema(t.Schema) + } + if t.View != nil { + md.ViewQuery = t.View.Query + md.UseLegacySQL = t.View.UseLegacySql + } + md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning) + if t.StreamingBuffer != nil { + md.StreamingBuffer = &StreamingBuffer{ + EstimatedBytes: t.StreamingBuffer.EstimatedBytes, + EstimatedRows: t.StreamingBuffer.EstimatedRows, + OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)), + } + } + if t.ExternalDataConfiguration != nil { + edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration) + if err != nil { + return nil, err + } + md.ExternalDataConfig = edc + } + return md, nil +} + +// Delete deletes the table. +func (t *Table) Delete(ctx context.Context) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx) + setClientHeader(req.Header()) + return req.Do() +} + +// Read fetches the contents of the table. +func (t *Table) Read(ctx context.Context) *RowIterator { + return t.read(ctx, fetchPage) +} + +func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator { + return newRowIterator(ctx, t, pf) +} + +// Update modifies specific Table metadata fields. +func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update") + defer func() { trace.EndSpan(ctx, err) }() + + bqt := tm.toBQ() + call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx) + setClientHeader(call.Header()) + if etag != "" { + call.Header().Set("If-Match", etag) + } + var res *bq.Table + if err := runWithRetry(ctx, func() (err error) { + res, err = call.Do() + return err + }); err != nil { + return nil, err + } + return bqToTableMetadata(res) +} + +func (tm *TableMetadataToUpdate) toBQ() *bq.Table { + t := &bq.Table{} + forceSend := func(field string) { + t.ForceSendFields = append(t.ForceSendFields, field) + } + + if tm.Description != nil { + t.Description = optional.ToString(tm.Description) + forceSend("Description") + } + if tm.Name != nil { + t.FriendlyName = optional.ToString(tm.Name) + forceSend("FriendlyName") + } + if tm.Schema != nil { + t.Schema = tm.Schema.toBQ() + forceSend("Schema") + } + if !tm.ExpirationTime.IsZero() { + t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6 + forceSend("ExpirationTime") + } + if tm.ViewQuery != nil { + t.View = &bq.ViewDefinition{ + Query: optional.ToString(tm.ViewQuery), + ForceSendFields: []string{"Query"}, + } + } + if tm.UseLegacySQL != nil { + if t.View == nil { + t.View = &bq.ViewDefinition{} + } + t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL) + t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql") + } + labels, forces, nulls := tm.update() + t.Labels = labels + t.ForceSendFields = append(t.ForceSendFields, forces...) + t.NullFields = append(t.NullFields, nulls...) + return t +} + +// TableMetadataToUpdate is used when updating a table's metadata. +// Only non-nil fields will be updated. +type TableMetadataToUpdate struct { + // The user-friendly description of this table. + Description optional.String + + // The user-friendly name for this table. + Name optional.String + + // The table's schema. + // When updating a schema, you can add columns but not remove them. + Schema Schema + + // The time when this table expires. + ExpirationTime time.Time + + // The query to use for a view. + ViewQuery optional.String + + // Use Legacy SQL for the view query. + UseLegacySQL optional.Bool + + labelUpdater +} + +// labelUpdater contains common code for updating labels. +type labelUpdater struct { + setLabels map[string]string + deleteLabels map[string]bool +} + +// SetLabel causes a label to be added or modified on a call to Update. +func (u *labelUpdater) SetLabel(name, value string) { + if u.setLabels == nil { + u.setLabels = map[string]string{} + } + u.setLabels[name] = value +} + +// DeleteLabel causes a label to be deleted on a call to Update. +func (u *labelUpdater) DeleteLabel(name string) { + if u.deleteLabels == nil { + u.deleteLabels = map[string]bool{} + } + u.deleteLabels[name] = true +} + +func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) { + if u.setLabels == nil && u.deleteLabels == nil { + return nil, nil, nil + } + labels = map[string]string{} + for k, v := range u.setLabels { + labels[k] = v + } + if len(labels) == 0 && len(u.deleteLabels) > 0 { + forces = []string{"Labels"} + } + for l := range u.deleteLabels { + nulls = append(nulls, "Labels."+l) + } + return labels, forces, nulls +} diff --git a/vendor/cloud.google.com/go/bigquery/table_test.go b/vendor/cloud.google.com/go/bigquery/table_test.go new file mode 100644 index 0000000..553c1e3 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/table_test.go @@ -0,0 +1,295 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + bq "google.golang.org/api/bigquery/v2" +) + +func TestBQToTableMetadata(t *testing.T) { + aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + aTimeMillis := aTime.UnixNano() / 1e6 + for _, test := range []struct { + in *bq.Table + want *TableMetadata + }{ + {&bq.Table{}, &TableMetadata{}}, // test minimal case + { + &bq.Table{ + CreationTime: aTimeMillis, + Description: "desc", + Etag: "etag", + ExpirationTime: aTimeMillis, + FriendlyName: "fname", + Id: "id", + LastModifiedTime: uint64(aTimeMillis), + Location: "loc", + NumBytes: 123, + NumLongTermBytes: 23, + NumRows: 7, + StreamingBuffer: &bq.Streamingbuffer{ + EstimatedBytes: 11, + EstimatedRows: 3, + OldestEntryTime: uint64(aTimeMillis), + }, + TimePartitioning: &bq.TimePartitioning{ + ExpirationMs: 7890, + Type: "DAY", + Field: "pfield", + }, + EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, + Type: "EXTERNAL", + View: &bq.ViewDefinition{Query: "view-query"}, + Labels: map[string]string{"a": "b"}, + ExternalDataConfiguration: &bq.ExternalDataConfiguration{ + SourceFormat: "GOOGLE_SHEETS", + }, + }, + &TableMetadata{ + Description: "desc", + Name: "fname", + ViewQuery: "view-query", + FullID: "id", + Type: ExternalTable, + Labels: map[string]string{"a": "b"}, + ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets}, + ExpirationTime: aTime.Truncate(time.Millisecond), + CreationTime: aTime.Truncate(time.Millisecond), + LastModifiedTime: aTime.Truncate(time.Millisecond), + NumBytes: 123, + NumRows: 7, + TimePartitioning: &TimePartitioning{ + Expiration: 7890 * time.Millisecond, + Field: "pfield", + }, + StreamingBuffer: &StreamingBuffer{ + EstimatedBytes: 11, + EstimatedRows: 3, + OldestEntryTime: aTime, + }, + EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, + ETag: "etag", + }, + }, + } { + got, err := bqToTableMetadata(test.in) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, test.want); diff != "" { + t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff) + } + } +} + +func TestTableMetadataToBQ(t *testing.T) { + aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + aTimeMillis := aTime.UnixNano() / 1e6 + sc := Schema{fieldSchema("desc", "name", "STRING", false, true)} + + for _, test := range []struct { + in *TableMetadata + want *bq.Table + }{ + {nil, &bq.Table{}}, + {&TableMetadata{}, &bq.Table{}}, + { + &TableMetadata{ + Name: "n", + Description: "d", + Schema: sc, + ExpirationTime: aTime, + Labels: map[string]string{"a": "b"}, + ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable}, + EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, + }, + &bq.Table{ + FriendlyName: "n", + Description: "d", + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + ExpirationTime: aTimeMillis, + Labels: map[string]string{"a": "b"}, + ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"}, + EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, + }, + }, + { + &TableMetadata{ViewQuery: "q"}, + &bq.Table{ + View: &bq.ViewDefinition{ + Query: "q", + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + }, + { + &TableMetadata{ + ViewQuery: "q", + UseLegacySQL: true, + TimePartitioning: &TimePartitioning{}, + }, + &bq.Table{ + View: &bq.ViewDefinition{ + Query: "q", + UseLegacySql: true, + }, + TimePartitioning: &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: 0, + }, + }, + }, + { + &TableMetadata{ + ViewQuery: "q", + UseStandardSQL: true, + TimePartitioning: &TimePartitioning{ + Expiration: time.Second, + Field: "ofDreams", + }, + }, + &bq.Table{ + View: &bq.ViewDefinition{ + Query: "q", + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + TimePartitioning: &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: 1000, + Field: "ofDreams", + }, + }, + }, + } { + got, err := test.in.toBQ() + if err != nil { + t.Fatalf("%+v: %v", test.in, err) + } + if diff := testutil.Diff(got, test.want); diff != "" { + t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff) + } + } + + // Errors + for _, in := range []*TableMetadata{ + {Schema: sc, ViewQuery: "q"}, // can't have both schema and query + {UseLegacySQL: true}, // UseLegacySQL without query + {UseStandardSQL: true}, // UseStandardSQL without query + // read-only fields + {FullID: "x"}, + {Type: "x"}, + {CreationTime: aTime}, + {LastModifiedTime: aTime}, + {NumBytes: 1}, + {NumRows: 1}, + {StreamingBuffer: &StreamingBuffer{}}, + {ETag: "x"}, + } { + _, err := in.toBQ() + if err == nil { + t.Errorf("%+v: got nil, want error", in) + } + } +} + +func TestTableMetadataToUpdateToBQ(t *testing.T) { + aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + for _, test := range []struct { + tm TableMetadataToUpdate + want *bq.Table + }{ + { + tm: TableMetadataToUpdate{}, + want: &bq.Table{}, + }, + { + tm: TableMetadataToUpdate{ + Description: "d", + Name: "n", + }, + want: &bq.Table{ + Description: "d", + FriendlyName: "n", + ForceSendFields: []string{"Description", "FriendlyName"}, + }, + }, + { + tm: TableMetadataToUpdate{ + Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)}, + ExpirationTime: aTime, + }, + want: &bq.Table{ + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + ExpirationTime: aTime.UnixNano() / 1e6, + ForceSendFields: []string{"Schema", "ExpirationTime"}, + }, + }, + { + tm: TableMetadataToUpdate{ViewQuery: "q"}, + want: &bq.Table{ + View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}}, + }, + }, + { + tm: TableMetadataToUpdate{UseLegacySQL: false}, + want: &bq.Table{ + View: &bq.ViewDefinition{ + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + }, + { + tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true}, + want: &bq.Table{ + View: &bq.ViewDefinition{ + Query: "q", + UseLegacySql: true, + ForceSendFields: []string{"Query", "UseLegacySql"}, + }, + }, + }, + { + tm: func() (tm TableMetadataToUpdate) { + tm.SetLabel("L", "V") + tm.DeleteLabel("D") + return tm + }(), + want: &bq.Table{ + Labels: map[string]string{"L": "V"}, + NullFields: []string{"Labels.D"}, + }, + }, + } { + got := test.tm.toBQ() + if !testutil.Equal(got, test.want) { + t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/uploader.go b/vendor/cloud.google.com/go/bigquery/uploader.go new file mode 100644 index 0000000..de16456 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/uploader.go @@ -0,0 +1,231 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "reflect" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// An Uploader does streaming inserts into a BigQuery table. +// It is safe for concurrent use. +type Uploader struct { + t *Table + + // SkipInvalidRows causes rows containing invalid data to be silently + // ignored. The default value is false, which causes the entire request to + // fail if there is an attempt to insert an invalid row. + SkipInvalidRows bool + + // IgnoreUnknownValues causes values not matching the schema to be ignored. + // The default value is false, which causes records containing such values + // to be treated as invalid records. + IgnoreUnknownValues bool + + // A TableTemplateSuffix allows Uploaders to create tables automatically. + // + // Experimental: this option is experimental and may be modified or removed in future versions, + // regardless of any other documented package stability guarantees. + // + // When you specify a suffix, the table you upload data to + // will be used as a template for creating a new table, with the same schema, + // called + . + // + // More information is available at + // https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables + TableTemplateSuffix string +} + +// Uploader returns an Uploader that can be used to append rows to t. +// The returned Uploader may optionally be further configured before its Put method is called. +// +// To stream rows into a date-partitioned table at a particular date, add the +// $yyyymmdd suffix to the table name when constructing the Table. +func (t *Table) Uploader() *Uploader { + return &Uploader{t: t} +} + +// Put uploads one or more rows to the BigQuery service. +// +// If src is ValueSaver, then its Save method is called to produce a row for uploading. +// +// If src is a struct or pointer to a struct, then a schema is inferred from it +// and used to create a StructSaver. The InsertID of the StructSaver will be +// empty. +// +// If src is a slice of ValueSavers, structs, or struct pointers, then each +// element of the slice is treated as above, and multiple rows are uploaded. +// +// Put returns a PutMultiError if one or more rows failed to be uploaded. +// The PutMultiError contains a RowInsertionError for each failed row. +// +// Put will retry on temporary errors (see +// https://cloud.google.com/bigquery/troubleshooting-errors). This can result +// in duplicate rows if you do not use insert IDs. Also, if the error persists, +// the call will run indefinitely. Pass a context with a timeout to prevent +// hanging calls. +func (u *Uploader) Put(ctx context.Context, src interface{}) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Uploader.Put") + defer func() { trace.EndSpan(ctx, err) }() + + savers, err := valueSavers(src) + if err != nil { + return err + } + return u.putMulti(ctx, savers) +} + +func valueSavers(src interface{}) ([]ValueSaver, error) { + saver, ok, err := toValueSaver(src) + if err != nil { + return nil, err + } + if ok { + return []ValueSaver{saver}, nil + } + srcVal := reflect.ValueOf(src) + if srcVal.Kind() != reflect.Slice { + return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src) + + } + var savers []ValueSaver + for i := 0; i < srcVal.Len(); i++ { + s := srcVal.Index(i).Interface() + saver, ok, err := toValueSaver(s) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s) + } + savers = append(savers, saver) + } + return savers, nil +} + +// Make a ValueSaver from x, which must implement ValueSaver already +// or be a struct or pointer to struct. +func toValueSaver(x interface{}) (ValueSaver, bool, error) { + if _, ok := x.(StructSaver); ok { + return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver") + } + var insertID string + // Handle StructSavers specially so we can infer the schema if necessary. + if ss, ok := x.(*StructSaver); ok && ss.Schema == nil { + x = ss.Struct + insertID = ss.InsertID + // Fall through so we can infer the schema. + } + if saver, ok := x.(ValueSaver); ok { + return saver, ok, nil + } + v := reflect.ValueOf(x) + // Support Put with []interface{} + if v.Kind() == reflect.Interface { + v = v.Elem() + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, false, nil + } + schema, err := inferSchemaReflectCached(v.Type()) + if err != nil { + return nil, false, err + } + return &StructSaver{ + Struct: x, + InsertID: insertID, + Schema: schema, + }, true, nil +} + +func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error { + req, err := u.newInsertRequest(src) + if err != nil { + return err + } + if req == nil { + return nil + } + call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req) + call = call.Context(ctx) + setClientHeader(call.Header()) + var res *bq.TableDataInsertAllResponse + err = runWithRetry(ctx, func() (err error) { + res, err = call.Do() + return err + }) + if err != nil { + return err + } + return handleInsertErrors(res.InsertErrors, req.Rows) +} + +func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) { + if savers == nil { // If there are no rows, do nothing. + return nil, nil + } + req := &bq.TableDataInsertAllRequest{ + TemplateSuffix: u.TableTemplateSuffix, + IgnoreUnknownValues: u.IgnoreUnknownValues, + SkipInvalidRows: u.SkipInvalidRows, + } + for _, saver := range savers { + row, insertID, err := saver.Save() + if err != nil { + return nil, err + } + if insertID == "" { + insertID = randomIDFn() + } + m := make(map[string]bq.JsonValue) + for k, v := range row { + m[k] = bq.JsonValue(v) + } + req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{ + InsertId: insertID, + Json: m, + }) + } + return req, nil +} + +func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error { + if len(ierrs) == 0 { + return nil + } + var errs PutMultiError + for _, e := range ierrs { + if int(e.Index) > len(rows) { + return fmt.Errorf("internal error: unexpected row index: %v", e.Index) + } + rie := RowInsertionError{ + InsertID: rows[e.Index].InsertId, + RowIndex: int(e.Index), + } + for _, errp := range e.Errors { + rie.Errors = append(rie.Errors, bqToError(errp)) + } + errs = append(errs, rie) + } + return errs +} diff --git a/vendor/cloud.google.com/go/bigquery/uploader_test.go b/vendor/cloud.google.com/go/bigquery/uploader_test.go new file mode 100644 index 0000000..d0fbab2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/uploader_test.go @@ -0,0 +1,211 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "strconv" + "testing" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + bq "google.golang.org/api/bigquery/v2" +) + +type testSaver struct { + row map[string]Value + insertID string + err error +} + +func (ts testSaver) Save() (map[string]Value, string, error) { + return ts.row, ts.insertID, ts.err +} + +func TestNewInsertRequest(t *testing.T) { + prev := randomIDFn + n := 0 + randomIDFn = func() string { n++; return strconv.Itoa(n) } + defer func() { randomIDFn = prev }() + + tests := []struct { + ul *Uploader + savers []ValueSaver + req *bq.TableDataInsertAllRequest + }{ + { + ul: &Uploader{}, + savers: nil, + req: nil, + }, + { + ul: &Uploader{}, + savers: []ValueSaver{ + testSaver{row: map[string]Value{"one": 1}}, + testSaver{row: map[string]Value{"two": 2}}, + }, + req: &bq.TableDataInsertAllRequest{ + Rows: []*bq.TableDataInsertAllRequestRows{ + {InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}}, + {InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}}, + }, + }, + }, + { + ul: &Uploader{ + TableTemplateSuffix: "suffix", + IgnoreUnknownValues: true, + SkipInvalidRows: true, + }, + savers: []ValueSaver{ + testSaver{insertID: "a", row: map[string]Value{"one": 1}}, + testSaver{insertID: "", row: map[string]Value{"two": 2}}, + }, + req: &bq.TableDataInsertAllRequest{ + Rows: []*bq.TableDataInsertAllRequestRows{ + {InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}}, + {InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}}, + }, + TemplateSuffix: "suffix", + SkipInvalidRows: true, + IgnoreUnknownValues: true, + }, + }, + } + for i, tc := range tests { + got, err := tc.ul.newInsertRequest(tc.savers) + if err != nil { + t.Fatal(err) + } + want := tc.req + if !testutil.Equal(got, want) { + t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want) + } + } +} + +func TestNewInsertRequestErrors(t *testing.T) { + var u Uploader + _, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("!")}}) + if err == nil { + t.Error("got nil, want error") + } +} + +func TestHandleInsertErrors(t *testing.T) { + rows := []*bq.TableDataInsertAllRequestRows{ + {InsertId: "a"}, + {InsertId: "b"}, + } + for _, test := range []struct { + in []*bq.TableDataInsertAllResponseInsertErrors + want error + }{ + { + in: nil, + want: nil, + }, + { + in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}}, + want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}}, + }, + { + in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}}, + want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}}, + }, + { + in: []*bq.TableDataInsertAllResponseInsertErrors{ + {Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0}, + {Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1}, + }, + want: PutMultiError{ + RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}}, + RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}}, + }, + }, + } { + got := handleInsertErrors(test.in, rows) + if !testutil.Equal(got, test.want) { + t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want) + } + } +} + +func TestValueSavers(t *testing.T) { + ts := &testSaver{} + type T struct{ I int } + schema, err := InferSchema(T{}) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + in interface{} + want []ValueSaver + }{ + {[]interface{}(nil), nil}, + {[]interface{}{}, nil}, + {ts, []ValueSaver{ts}}, + {T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}}, + {[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}}, + {[]interface{}{ts, ts}, []ValueSaver{ts, ts}}, + {[]T{{I: 1}, {I: 2}}, []ValueSaver{ + &StructSaver{Schema: schema, Struct: T{I: 1}}, + &StructSaver{Schema: schema, Struct: T{I: 2}}, + }}, + {[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{ + &StructSaver{Schema: schema, Struct: T{I: 1}}, + &StructSaver{Schema: schema, Struct: &T{I: 2}}, + }}, + {&StructSaver{Struct: T{I: 3}, InsertID: "foo"}, + []ValueSaver{ + &StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"}, + }}, + } { + got, err := valueSavers(test.in) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) { + t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want)) + } + // Make sure Save is successful. + for i, vs := range got { + _, _, err := vs.Save() + if err != nil { + t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err) + } + } + } +} + +func TestValueSaversErrors(t *testing.T) { + inputs := []interface{}{ + nil, + 1, + []int{1, 2}, + []interface{}{ + testSaver{row: map[string]Value{"one": 1}, insertID: "a"}, + 1, + }, + StructSaver{}, + } + for _, in := range inputs { + if _, err := valueSavers(in); err == nil { + t.Errorf("%#v: got nil, want error", in) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/value.go b/vendor/cloud.google.com/go/bigquery/value.go new file mode 100644 index 0000000..bf1d1f3 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/value.go @@ -0,0 +1,835 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/civil" + + bq "google.golang.org/api/bigquery/v2" +) + +// Value stores the contents of a single cell from a BigQuery result. +type Value interface{} + +// ValueLoader stores a slice of Values representing a result row from a Read operation. +// See RowIterator.Next for more information. +type ValueLoader interface { + Load(v []Value, s Schema) error +} + +// valueList converts a []Value to implement ValueLoader. +type valueList []Value + +// Load stores a sequence of values in a valueList. +// It resets the slice length to zero, then appends each value to it. +func (vs *valueList) Load(v []Value, _ Schema) error { + *vs = append((*vs)[:0], v...) + return nil +} + +// valueMap converts a map[string]Value to implement ValueLoader. +type valueMap map[string]Value + +// Load stores a sequence of values in a valueMap. +func (vm *valueMap) Load(v []Value, s Schema) error { + if *vm == nil { + *vm = map[string]Value{} + } + loadMap(*vm, v, s) + return nil +} + +func loadMap(m map[string]Value, vals []Value, s Schema) { + for i, f := range s { + val := vals[i] + var v interface{} + switch { + case val == nil: + v = val + case f.Schema == nil: + v = val + case !f.Repeated: + m2 := map[string]Value{} + loadMap(m2, val.([]Value), f.Schema) + v = m2 + default: // repeated and nested + sval := val.([]Value) + vs := make([]Value, len(sval)) + for j, e := range sval { + m2 := map[string]Value{} + loadMap(m2, e.([]Value), f.Schema) + vs[j] = m2 + } + v = vs + } + + m[f.Name] = v + } +} + +type structLoader struct { + typ reflect.Type // type of struct + err error + ops []structLoaderOp + + vstructp reflect.Value // pointer to current struct value; changed by set +} + +// A setFunc is a function that sets a struct field or slice/array +// element to a value. +type setFunc func(v reflect.Value, val interface{}) error + +// A structLoaderOp instructs the loader to set a struct field to a row value. +type structLoaderOp struct { + fieldIndex []int + valueIndex int + setFunc setFunc + repeated bool +} + +var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs") + +func setAny(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + v.Set(reflect.ValueOf(x)) + return nil +} + +func setInt(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + xx := x.(int64) + if v.OverflowInt(xx) { + return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) + } + v.SetInt(xx) + return nil +} + +func setUint(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + xx := x.(int64) + if xx < 0 || v.OverflowUint(uint64(xx)) { + return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) + } + v.SetUint(uint64(xx)) + return nil +} + +func setFloat(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + xx := x.(float64) + if v.OverflowFloat(xx) { + return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) + } + v.SetFloat(xx) + return nil +} + +func setBool(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + v.SetBool(x.(bool)) + return nil +} + +func setString(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + v.SetString(x.(string)) + return nil +} + +func setBytes(v reflect.Value, x interface{}) error { + if x == nil { + v.SetBytes(nil) + } else { + v.SetBytes(x.([]byte)) + } + return nil +} + +func setNull(v reflect.Value, x interface{}, build func() interface{}) error { + if x == nil { + v.Set(reflect.Zero(v.Type())) + } else { + n := build() + v.Set(reflect.ValueOf(n)) + } + return nil +} + +// set remembers a value for the next call to Load. The value must be +// a pointer to a struct. (This is checked in RowIterator.Next.) +func (sl *structLoader) set(structp interface{}, schema Schema) error { + if sl.err != nil { + return sl.err + } + sl.vstructp = reflect.ValueOf(structp) + typ := sl.vstructp.Type().Elem() + if sl.typ == nil { + // First call: remember the type and compile the schema. + sl.typ = typ + ops, err := compileToOps(typ, schema) + if err != nil { + sl.err = err + return err + } + sl.ops = ops + } else if sl.typ != typ { + return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ) + } + return nil +} + +// compileToOps produces a sequence of operations that will set the fields of a +// value of structType to the contents of a row with schema. +func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) { + var ops []structLoaderOp + fields, err := fieldCache.Fields(structType) + if err != nil { + return nil, err + } + for i, schemaField := range schema { + // Look for an exported struct field with the same name as the schema + // field, ignoring case (BigQuery column names are case-insensitive, + // and we want to act like encoding/json anyway). + structField := fields.Match(schemaField.Name) + if structField == nil { + // Ignore schema fields with no corresponding struct field. + continue + } + op := structLoaderOp{ + fieldIndex: structField.Index, + valueIndex: i, + } + t := structField.Type + if schemaField.Repeated { + if t.Kind() != reflect.Slice && t.Kind() != reflect.Array { + return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s", + schemaField.Name, structField.Name, t) + } + t = t.Elem() + op.repeated = true + } + if schemaField.Type == RecordFieldType { + // Field can be a struct or a pointer to a struct. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct", + structField.Name, structField.Type) + } + nested, err := compileToOps(t, schemaField.Schema) + if err != nil { + return nil, err + } + op.setFunc = func(v reflect.Value, val interface{}) error { + return setNested(nested, v, val) + } + } else { + op.setFunc = determineSetFunc(t, schemaField.Type) + if op.setFunc == nil { + return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s", + schemaField.Name, schemaField.Type, structField.Name, t) + } + } + ops = append(ops, op) + } + return ops, nil +} + +// determineSetFunc chooses the best function for setting a field of type ftype +// to a value whose schema field type is stype. It returns nil if stype +// is not assignable to ftype. +// determineSetFunc considers only basic types. See compileToOps for +// handling of repetition and nesting. +func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc { + switch stype { + case StringFieldType: + if ftype.Kind() == reflect.String { + return setString + } + if ftype == typeOfNullString { + return func(v reflect.Value, x interface{}) error { + return setNull(v, x, func() interface{} { + return NullString{StringVal: x.(string), Valid: true} + }) + } + } + + case BytesFieldType: + if ftype == typeOfByteSlice { + return setBytes + } + + case IntegerFieldType: + if isSupportedUintType(ftype) { + return setUint + } else if isSupportedIntType(ftype) { + return setInt + } + if ftype == typeOfNullInt64 { + return func(v reflect.Value, x interface{}) error { + return setNull(v, x, func() interface{} { + return NullInt64{Int64: x.(int64), Valid: true} + }) + } + } + + case FloatFieldType: + switch ftype.Kind() { + case reflect.Float32, reflect.Float64: + return setFloat + } + if ftype == typeOfNullFloat64 { + return func(v reflect.Value, x interface{}) error { + return setNull(v, x, func() interface{} { + return NullFloat64{Float64: x.(float64), Valid: true} + }) + } + } + + case BooleanFieldType: + if ftype.Kind() == reflect.Bool { + return setBool + } + if ftype == typeOfNullBool { + return func(v reflect.Value, x interface{}) error { + return setNull(v, x, func() interface{} { + return NullBool{Bool: x.(bool), Valid: true} + }) + } + } + + case TimestampFieldType: + if ftype == typeOfGoTime { + return setAny + } + if ftype == typeOfNullTimestamp { + return func(v reflect.Value, x interface{}) error { + return setNull(v, x, func() interface{} { + return NullTimestamp{Timestamp: x.(time.Time), Valid: true} + }) + } + } + + case DateFieldType: + if ftype == typeOfDate { + return setAny + } + if ftype == typeOfNullDate { + return func(v reflect.Value, x interface{}) error { + return setNull(v, x, func() interface{} { + return NullDate{Date: x.(civil.Date), Valid: true} + }) + } + } + + case TimeFieldType: + if ftype == typeOfTime { + return setAny + } + if ftype == typeOfNullTime { + return func(v reflect.Value, x interface{}) error { + return setNull(v, x, func() interface{} { + return NullTime{Time: x.(civil.Time), Valid: true} + }) + } + } + + case DateTimeFieldType: + if ftype == typeOfDateTime { + return setAny + } + if ftype == typeOfNullDateTime { + return func(v reflect.Value, x interface{}) error { + return setNull(v, x, func() interface{} { + return NullDateTime{DateTime: x.(civil.DateTime), Valid: true} + }) + } + } + } + return nil +} + +func (sl *structLoader) Load(values []Value, _ Schema) error { + if sl.err != nil { + return sl.err + } + return runOps(sl.ops, sl.vstructp.Elem(), values) +} + +// runOps executes a sequence of ops, setting the fields of vstruct to the +// supplied values. +func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error { + for _, op := range ops { + field := vstruct.FieldByIndex(op.fieldIndex) + var err error + if op.repeated { + err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc) + } else { + err = op.setFunc(field, values[op.valueIndex]) + } + if err != nil { + return err + } + } + return nil +} + +func setNested(ops []structLoaderOp, v reflect.Value, val interface{}) error { + // v is either a struct or a pointer to a struct. + if v.Kind() == reflect.Ptr { + // If the value is nil, set the pointer to nil. + if val == nil { + v.Set(reflect.Zero(v.Type())) + return nil + } + // If the pointer is nil, set it to a zero struct value. + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return runOps(ops, v, val.([]Value)) +} + +func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error { + vlen := len(vslice) + var flen int + switch field.Type().Kind() { + case reflect.Slice: + // Make a slice of the right size, avoiding allocation if possible. + switch { + case field.Len() < vlen: + field.Set(reflect.MakeSlice(field.Type(), vlen, vlen)) + case field.Len() > vlen: + field.SetLen(vlen) + } + flen = vlen + + case reflect.Array: + flen = field.Len() + if flen > vlen { + // Set extra elements to their zero value. + z := reflect.Zero(field.Type().Elem()) + for i := vlen; i < flen; i++ { + field.Index(i).Set(z) + } + } + default: + return fmt.Errorf("bigquery: impossible field type %s", field.Type()) + } + for i, val := range vslice { + if i < flen { // avoid writing past the end of a short array + if err := setElem(field.Index(i), val); err != nil { + return err + } + } + } + return nil +} + +// A ValueSaver returns a row of data to be inserted into a table. +type ValueSaver interface { + // Save returns a row to be inserted into a BigQuery table, represented + // as a map from field name to Value. + // If insertID is non-empty, BigQuery will use it to de-duplicate + // insertions of this row on a best-effort basis. + Save() (row map[string]Value, insertID string, err error) +} + +// ValuesSaver implements ValueSaver for a slice of Values. +type ValuesSaver struct { + Schema Schema + + // If non-empty, BigQuery will use InsertID to de-duplicate insertions + // of this row on a best-effort basis. + InsertID string + + Row []Value +} + +// Save implements ValueSaver. +func (vls *ValuesSaver) Save() (map[string]Value, string, error) { + m, err := valuesToMap(vls.Row, vls.Schema) + return m, vls.InsertID, err +} + +func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { + if len(vs) != len(schema) { + return nil, errors.New("Schema does not match length of row to be inserted") + } + + m := make(map[string]Value) + for i, fieldSchema := range schema { + if vs[i] == nil { + m[fieldSchema.Name] = nil + continue + } + if fieldSchema.Type != RecordFieldType { + m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema) + continue + } + // Nested record, possibly repeated. + vals, ok := vs[i].([]Value) + if !ok { + return nil, errors.New("nested record is not a []Value") + } + if !fieldSchema.Repeated { + value, err := valuesToMap(vals, fieldSchema.Schema) + if err != nil { + return nil, err + } + m[fieldSchema.Name] = value + continue + } + // A repeated nested field is converted into a slice of maps. + var maps []Value + for _, v := range vals { + sv, ok := v.([]Value) + if !ok { + return nil, errors.New("nested record in slice is not a []Value") + } + value, err := valuesToMap(sv, fieldSchema.Schema) + if err != nil { + return nil, err + } + maps = append(maps, value) + } + m[fieldSchema.Name] = maps + } + return m, nil +} + +// StructSaver implements ValueSaver for a struct. +// The struct is converted to a map of values by using the values of struct +// fields corresponding to schema fields. Additional and missing +// fields are ignored, as are nested struct pointers that are nil. +type StructSaver struct { + // Schema determines what fields of the struct are uploaded. It should + // match the table's schema. + // Schema is optional for StructSavers that are passed to Uploader.Put. + Schema Schema + + // If non-empty, BigQuery will use InsertID to de-duplicate insertions + // of this row on a best-effort basis. + InsertID string + + // Struct should be a struct or a pointer to a struct. + Struct interface{} +} + +// Save implements ValueSaver. +func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) { + vstruct := reflect.ValueOf(ss.Struct) + row, err = structToMap(vstruct, ss.Schema) + if err != nil { + return nil, "", err + } + return row, ss.InsertID, nil +} + +func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) { + if vstruct.Kind() == reflect.Ptr { + vstruct = vstruct.Elem() + } + if !vstruct.IsValid() { + return nil, nil + } + m := map[string]Value{} + if vstruct.Kind() != reflect.Struct { + return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type()) + } + fields, err := fieldCache.Fields(vstruct.Type()) + if err != nil { + return nil, err + } + for _, schemaField := range schema { + // Look for an exported struct field with the same name as the schema + // field, ignoring case. + structField := fields.Match(schemaField.Name) + if structField == nil { + continue + } + val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField) + if err != nil { + return nil, err + } + // Add the value to the map, unless it is nil. + if val != nil { + m[schemaField.Name] = val + } + } + return m, nil +} + +// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using +// the schemaField as a guide. +// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its +// caller can easily identify a nil value. +func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) { + if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) { + return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s", + schemaField.Name, vfield.Type()) + } + + // A non-nested field can be represented by its Go value, except for civil times. + if schemaField.Type != RecordFieldType { + return toUploadValueReflect(vfield, schemaField), nil + } + // A non-repeated nested field is converted into a map[string]Value. + if !schemaField.Repeated { + m, err := structToMap(vfield, schemaField.Schema) + if err != nil { + return nil, err + } + if m == nil { + return nil, nil + } + return m, nil + } + // A repeated nested field is converted into a slice of maps. + if vfield.Len() == 0 { + return nil, nil + } + var vals []Value + for i := 0; i < vfield.Len(); i++ { + m, err := structToMap(vfield.Index(i), schemaField.Schema) + if err != nil { + return nil, err + } + vals = append(vals, m) + } + return vals, nil +} + +func toUploadValue(val interface{}, fs *FieldSchema) interface{} { + if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType { + return toUploadValueReflect(reflect.ValueOf(val), fs) + } + return val +} + +func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} { + switch fs.Type { + case TimeFieldType: + if v.Type() == typeOfNullTime { + return v.Interface() + } + return civilToUploadValue(v, fs, func(v reflect.Value) string { + return CivilTimeString(v.Interface().(civil.Time)) + }) + case DateTimeFieldType: + if v.Type() == typeOfNullDateTime { + return v.Interface() + } + return civilToUploadValue(v, fs, func(v reflect.Value) string { + return CivilDateTimeString(v.Interface().(civil.DateTime)) + }) + default: + if !fs.Repeated || v.Len() > 0 { + return v.Interface() + } + // The service treats a null repeated field as an error. Return + // nil to omit the field entirely. + return nil + } +} + +func civilToUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} { + if !fs.Repeated { + return cvt(v) + } + if v.Len() == 0 { + return nil + } + s := make([]string, v.Len()) + for i := 0; i < v.Len(); i++ { + s[i] = cvt(v.Index(i)) + } + return s +} + +// CivilTimeString returns a string representing a civil.Time in a format compatible +// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a +// string with six digits of sub-second precision. +// +// Use CivilTimeString when using civil.Time in DML, for example in INSERT +// statements. +func CivilTimeString(t civil.Time) string { + if t.Nanosecond == 0 { + return t.String() + } else { + micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond + t.Nanosecond = 0 + return t.String() + fmt.Sprintf(".%06d", micro) + } +} + +// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible +// with BigQuery SQL. It separate the date and time with a space, and formats the time +// with CivilTimeString. +// +// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT +// statements. +func CivilDateTimeString(dt civil.DateTime) string { + return dt.Date.String() + " " + CivilTimeString(dt.Time) +} + +// parseCivilDateTime parses a date-time represented in a BigQuery SQL +// compatible format and returns a civil.DateTime. +func parseCivilDateTime(s string) (civil.DateTime, error) { + parts := strings.Fields(s) + if len(parts) != 2 { + return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s) + } + return civil.ParseDateTime(parts[0] + "T" + parts[1]) +} + +// convertRows converts a series of TableRows into a series of Value slices. +// schema is used to interpret the data from rows; its length must match the +// length of each row. +func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) { + var rs [][]Value + for _, r := range rows { + row, err := convertRow(r, schema) + if err != nil { + return nil, err + } + rs = append(rs, row) + } + return rs, nil +} + +func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) { + if len(schema) != len(r.F) { + return nil, errors.New("schema length does not match row length") + } + var values []Value + for i, cell := range r.F { + fs := schema[i] + v, err := convertValue(cell.V, fs.Type, fs.Schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) { + switch val := val.(type) { + case nil: + return nil, nil + case []interface{}: + return convertRepeatedRecord(val, typ, schema) + case map[string]interface{}: + return convertNestedRecord(val, schema) + case string: + return convertBasicType(val, typ) + default: + return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ) + } +} + +func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) { + var values []Value + for _, cell := range vals { + // each cell contains a single entry, keyed by "v" + val := cell.(map[string]interface{})["v"] + v, err := convertValue(val, typ, schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) { + // convertNestedRecord is similar to convertRow, as a record has the same structure as a row. + + // Nested records are wrapped in a map with a single key, "f". + record := val["f"].([]interface{}) + if len(record) != len(schema) { + return nil, errors.New("schema length does not match record length") + } + + var values []Value + for i, cell := range record { + // each cell contains a single entry, keyed by "v" + val := cell.(map[string]interface{})["v"] + fs := schema[i] + v, err := convertValue(val, fs.Type, fs.Schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +// convertBasicType returns val as an interface with a concrete type specified by typ. +func convertBasicType(val string, typ FieldType) (Value, error) { + switch typ { + case StringFieldType: + return val, nil + case BytesFieldType: + return base64.StdEncoding.DecodeString(val) + case IntegerFieldType: + return strconv.ParseInt(val, 10, 64) + case FloatFieldType: + return strconv.ParseFloat(val, 64) + case BooleanFieldType: + return strconv.ParseBool(val) + case TimestampFieldType: + f, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, err + } + secs := math.Trunc(f) + nanos := (f - secs) * 1e9 + return Value(time.Unix(int64(secs), int64(nanos)).UTC()), nil + case DateFieldType: + return civil.ParseDate(val) + case TimeFieldType: + return civil.ParseTime(val) + case DateTimeFieldType: + return civil.ParseDateTime(val) + default: + return nil, fmt.Errorf("unrecognized type: %s", typ) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/value_test.go b/vendor/cloud.google.com/go/bigquery/value_test.go new file mode 100644 index 0000000..43c4cb5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/value_test.go @@ -0,0 +1,1166 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "fmt" + "math" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func TestConvertBasicValues(t *testing.T) { + schema := Schema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + {Type: FloatFieldType}, + {Type: BooleanFieldType}, + {Type: BytesFieldType}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: "a"}, + {V: "1"}, + {V: "1.2"}, + {V: "true"}, + {V: base64.StdEncoding.EncodeToString([]byte("foo"))}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{"a", int64(1), 1.2, true, []byte("foo")} + if !testutil.Equal(got, want) { + t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestConvertTime(t *testing.T) { + schema := Schema{ + {Type: TimestampFieldType}, + {Type: DateFieldType}, + {Type: TimeFieldType}, + {Type: DateTimeFieldType}, + } + ts := testTimestamp.Round(time.Millisecond) + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: fmt.Sprintf("%.10f", float64(ts.UnixNano())/1e9)}, + {V: testDate.String()}, + {V: testTime.String()}, + {V: testDateTime.String()}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{ts, testDate, testTime, testDateTime} + for i, g := range got { + w := want[i] + if !testutil.Equal(g, w) { + t.Errorf("#%d: got:\n%v\nwant:\n%v", i, g, w) + } + } + if got[0].(time.Time).Location() != time.UTC { + t.Errorf("expected time zone UTC: got:\n%v", got) + } +} + +func TestConvertSmallTimes(t *testing.T) { + for _, year := range []int{1600, 1066, 1} { + want := time.Date(year, time.January, 1, 0, 0, 0, 0, time.UTC) + s := fmt.Sprintf("%.10f", float64(want.Unix())) + got, err := convertBasicType(s, TimestampFieldType) + if err != nil { + t.Fatal(err) + } + if !got.(time.Time).Equal(want) { + t.Errorf("got %v, want %v", got, want) + } + } +} + +func TestConvertNullValues(t *testing.T) { + schema := Schema{{Type: StringFieldType}} + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: nil}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{nil} + if !testutil.Equal(got, want) { + t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestBasicRepetition(t *testing.T) { + schema := Schema{ + {Type: IntegerFieldType, Repeated: true}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + { + V: []interface{}{ + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "2", + }, + map[string]interface{}{ + "v": "3", + }, + }, + }, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{[]Value{int64(1), int64(2), int64(3)}} + if !testutil.Equal(got, want) { + t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestNestedRecordContainingRepetition(t *testing.T) { + schema := Schema{ + { + Type: RecordFieldType, + Schema: Schema{ + {Type: IntegerFieldType, Repeated: true}, + }, + }, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + { + V: map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": []interface{}{ + map[string]interface{}{"v": "1"}, + map[string]interface{}{"v": "2"}, + map[string]interface{}{"v": "3"}, + }, + }, + }, + }, + }, + }, + } + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}} + if !testutil.Equal(got, want) { + t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestRepeatedRecordContainingRepetition(t *testing.T) { + schema := Schema{ + { + Type: RecordFieldType, + Repeated: true, + Schema: Schema{ + {Type: IntegerFieldType, Repeated: true}, + }, + }, + } + row := &bq.TableRow{F: []*bq.TableCell{ + { + V: []interface{}{ // repeated records. + map[string]interface{}{ // first record. + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // list of record fields. + map[string]interface{}{ // only record (repeated ints) + "v": []interface{}{ // pointless wrapper. + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "2", + }, + map[string]interface{}{ + "v": "3", + }, + }, + }, + }, + }, + }, + map[string]interface{}{ // second record. + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": []interface{}{ + map[string]interface{}{ + "v": "4", + }, + map[string]interface{}{ + "v": "5", + }, + map[string]interface{}{ + "v": "6", + }, + }, + }, + }, + }, + }, + }, + }, + }} + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. + []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. + []Value{ // the record is a list of length 1, containing an entry for the repeated integer field. + []Value{int64(1), int64(2), int64(3)}, // the repeated integer field is a list of length 3. + }, + []Value{ // second record + []Value{int64(4), int64(5), int64(6)}, + }, + }, + } + if !testutil.Equal(got, want) { + t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestRepeatedRecordContainingRecord(t *testing.T) { + schema := Schema{ + { + Type: RecordFieldType, + Repeated: true, + Schema: Schema{ + { + Type: StringFieldType, + }, + { + Type: RecordFieldType, + Schema: Schema{ + {Type: IntegerFieldType}, + {Type: StringFieldType}, + }, + }, + }, + }, + } + row := &bq.TableRow{F: []*bq.TableCell{ + { + V: []interface{}{ // repeated records. + map[string]interface{}{ // first record. + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // list of record fields. + map[string]interface{}{ // first record field (name) + "v": "first repeated record", + }, + map[string]interface{}{ // second record field (nested record). + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // nested record fields + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "two", + }, + }, + }, + }, + }, + }, + }, + map[string]interface{}{ // second record. + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": "second repeated record", + }, + map[string]interface{}{ + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": "3", + }, + map[string]interface{}{ + "v": "four", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }} + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + // TODO: test with flattenresults. + want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. + []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. + []Value{ // record contains a string followed by a nested record. + "first repeated record", + []Value{ + int64(1), + "two", + }, + }, + []Value{ // second record. + "second repeated record", + []Value{ + int64(3), + "four", + }, + }, + }, + } + if !testutil.Equal(got, want) { + t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want) + } +} + +func TestConvertRowErrors(t *testing.T) { + // mismatched lengths + if _, err := convertRow(&bq.TableRow{F: []*bq.TableCell{{V: ""}}}, Schema{}); err == nil { + t.Error("got nil, want error") + } + v3 := map[string]interface{}{"v": 3} + for _, test := range []struct { + value interface{} + fs FieldSchema + }{ + {3, FieldSchema{Type: IntegerFieldType}}, // not a string + {[]interface{}{v3}, // not a string, repeated + FieldSchema{Type: IntegerFieldType, Repeated: true}}, + {map[string]interface{}{"f": []interface{}{v3}}, // not a string, nested + FieldSchema{Type: RecordFieldType, Schema: Schema{{Type: IntegerFieldType}}}}, + {map[string]interface{}{"f": []interface{}{v3}}, // wrong length, nested + FieldSchema{Type: RecordFieldType, Schema: Schema{}}}, + } { + _, err := convertRow( + &bq.TableRow{F: []*bq.TableCell{{V: test.value}}}, + Schema{&test.fs}) + if err == nil { + t.Errorf("value %v, fs %v: got nil, want error", test.value, test.fs) + } + } + + // bad field type + if _, err := convertBasicType("", FieldType("BAD")); err == nil { + t.Error("got nil, want error") + } +} + +func TestValuesSaverConvertsToMap(t *testing.T) { + testCases := []struct { + vs ValuesSaver + wantInsertID string + wantRow map[string]Value + }{ + { + vs: ValuesSaver{ + Schema: Schema{ + {Name: "intField", Type: IntegerFieldType}, + {Name: "strField", Type: StringFieldType}, + {Name: "dtField", Type: DateTimeFieldType}, + }, + InsertID: "iid", + Row: []Value{1, "a", + civil.DateTime{ + Date: civil.Date{Year: 1, Month: 2, Day: 3}, + Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 7000}}, + }, + }, + wantInsertID: "iid", + wantRow: map[string]Value{"intField": 1, "strField": "a", + "dtField": "0001-02-03 04:05:06.000007"}, + }, + { + vs: ValuesSaver{ + Schema: Schema{ + {Name: "intField", Type: IntegerFieldType}, + { + Name: "recordField", + Type: RecordFieldType, + Schema: Schema{ + {Name: "nestedInt", Type: IntegerFieldType, Repeated: true}, + }, + }, + }, + InsertID: "iid", + Row: []Value{1, []Value{[]Value{2, 3}}}, + }, + wantInsertID: "iid", + wantRow: map[string]Value{ + "intField": 1, + "recordField": map[string]Value{ + "nestedInt": []Value{2, 3}, + }, + }, + }, + { // repeated nested field + vs: ValuesSaver{ + Schema: Schema{ + { + Name: "records", + Type: RecordFieldType, + Schema: Schema{ + {Name: "x", Type: IntegerFieldType}, + {Name: "y", Type: IntegerFieldType}, + }, + Repeated: true, + }, + }, + InsertID: "iid", + Row: []Value{ // a row is a []Value + []Value{ // repeated field's value is a []Value + []Value{1, 2}, // first record of the repeated field + []Value{3, 4}, // second record + }, + }, + }, + wantInsertID: "iid", + wantRow: map[string]Value{ + "records": []Value{ + map[string]Value{"x": 1, "y": 2}, + map[string]Value{"x": 3, "y": 4}, + }, + }, + }, + } + for _, tc := range testCases { + gotRow, gotInsertID, err := tc.vs.Save() + if err != nil { + t.Errorf("Expected successful save; got: %v", err) + continue + } + if !testutil.Equal(gotRow, tc.wantRow) { + t.Errorf("%v row:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotRow, tc.wantRow) + } + if !testutil.Equal(gotInsertID, tc.wantInsertID) { + t.Errorf("%v ID:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotInsertID, tc.wantInsertID) + } + } +} + +func TestValuesToMapErrors(t *testing.T) { + for _, test := range []struct { + values []Value + schema Schema + }{ + { // mismatched length + []Value{1}, + Schema{}, + }, + { // nested record not a slice + []Value{1}, + Schema{{Type: RecordFieldType}}, + }, + { // nested record mismatched length + []Value{[]Value{1}}, + Schema{{Type: RecordFieldType}}, + }, + { // nested repeated record not a slice + []Value{[]Value{1}}, + Schema{{Type: RecordFieldType, Repeated: true}}, + }, + { // nested repeated record mismatched length + []Value{[]Value{[]Value{1}}}, + Schema{{Type: RecordFieldType, Repeated: true}}, + }, + } { + _, err := valuesToMap(test.values, test.schema) + if err == nil { + t.Errorf("%v, %v: got nil, want error", test.values, test.schema) + } + } +} + +func TestStructSaver(t *testing.T) { + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "r", Type: IntegerFieldType, Repeated: true}, + {Name: "t", Type: TimeFieldType}, + {Name: "tr", Type: TimeFieldType, Repeated: true}, + {Name: "nested", Type: RecordFieldType, Schema: Schema{ + {Name: "b", Type: BooleanFieldType}, + }}, + {Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{ + {Name: "b", Type: BooleanFieldType}, + }}, + {Name: "p", Type: IntegerFieldType, Required: false}, + } + + type ( + N struct{ B bool } + T struct { + S string + R []int + T civil.Time + TR []civil.Time + Nested *N + Rnested []*N + P NullInt64 + } + ) + + check := func(msg string, in interface{}, want map[string]Value) { + ss := StructSaver{ + Schema: schema, + InsertID: "iid", + Struct: in, + } + got, gotIID, err := ss.Save() + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + if wantIID := "iid"; gotIID != wantIID { + t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("%s: %s", msg, diff) + } + } + + ct1 := civil.Time{Hour: 1, Minute: 2, Second: 3, Nanosecond: 4000} + ct2 := civil.Time{Hour: 5, Minute: 6, Second: 7, Nanosecond: 8000} + in := T{ + S: "x", + R: []int{1, 2}, + T: ct1, + TR: []civil.Time{ct1, ct2}, + Nested: &N{B: true}, + Rnested: []*N{{true}, {false}}, + P: NullInt64{Valid: true, Int64: 17}, + } + want := map[string]Value{ + "s": "x", + "r": []int{1, 2}, + "t": "01:02:03.000004", + "tr": []string{"01:02:03.000004", "05:06:07.000008"}, + "nested": map[string]Value{"b": true}, + "rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}}, + "p": NullInt64{Valid: true, Int64: 17}, + } + check("all values", in, want) + check("all values, ptr", &in, want) + check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00", "p": NullInt64{}}) + + // Missing and extra fields ignored. + type T2 struct { + S string + // missing R, Nested, RNested + Extra int + } + check("missing and extra", T2{S: "x"}, map[string]Value{"s": "x"}) + + check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}}, + map[string]Value{ + "s": "", + "t": "00:00:00", + "p": NullInt64{}, + "rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}}, + }) +} + +func TestStructSaverErrors(t *testing.T) { + type ( + badField struct { + I int `bigquery:"@"` + } + badR struct{ R int } + badRN struct{ R []int } + ) + + for i, test := range []struct { + struct_ interface{} + schema Schema + }{ + {0, nil}, // not a struct + {&badField{}, nil}, // bad field name + {&badR{}, Schema{{Name: "r", Repeated: true}}}, // repeated field has bad type + {&badR{}, Schema{{Name: "r", Type: RecordFieldType}}}, // nested field has bad type + {&badRN{[]int{0}}, // nested repeated field has bad type + Schema{{Name: "r", Type: RecordFieldType, Repeated: true}}}, + } { + ss := &StructSaver{Struct: test.struct_, Schema: test.schema} + _, _, err := ss.Save() + if err == nil { + t.Errorf("#%d, %v, %v: got nil, want error", i, test.struct_, test.schema) + } + } +} + +func TestConvertRows(t *testing.T) { + schema := Schema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + {Type: FloatFieldType}, + {Type: BooleanFieldType}, + } + rows := []*bq.TableRow{ + {F: []*bq.TableCell{ + {V: "a"}, + {V: "1"}, + {V: "1.2"}, + {V: "true"}, + }}, + {F: []*bq.TableCell{ + {V: "b"}, + {V: "2"}, + {V: "2.2"}, + {V: "false"}, + }}, + } + want := [][]Value{ + {"a", int64(1), 1.2, true}, + {"b", int64(2), 2.2, false}, + } + got, err := convertRows(rows, schema) + if err != nil { + t.Fatalf("got %v, want nil", err) + } + if !testutil.Equal(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } + + rows[0].F[0].V = 1 + _, err = convertRows(rows, schema) + if err == nil { + t.Error("got nil, want error") + } +} + +func TestValueList(t *testing.T) { + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "i", Type: IntegerFieldType}, + {Name: "f", Type: FloatFieldType}, + {Name: "b", Type: BooleanFieldType}, + } + want := []Value{"x", 7, 3.14, true} + var got []Value + vl := (*valueList)(&got) + if err := vl.Load(want, schema); err != nil { + t.Fatal(err) + } + + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Load truncates, not appends. + // https://github.com/GoogleCloudPlatform/google-cloud-go/issues/437 + if err := vl.Load(want, schema); err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestValueMap(t *testing.T) { + ns := Schema{ + {Name: "x", Type: IntegerFieldType}, + {Name: "y", Type: IntegerFieldType}, + } + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "i", Type: IntegerFieldType}, + {Name: "f", Type: FloatFieldType}, + {Name: "b", Type: BooleanFieldType}, + {Name: "n", Type: RecordFieldType, Schema: ns}, + {Name: "rn", Type: RecordFieldType, Schema: ns, Repeated: true}, + } + in := []Value{"x", 7, 3.14, true, + []Value{1, 2}, + []Value{[]Value{3, 4}, []Value{5, 6}}, + } + var vm valueMap + if err := vm.Load(in, schema); err != nil { + t.Fatal(err) + } + want := map[string]Value{ + "s": "x", + "i": 7, + "f": 3.14, + "b": true, + "n": map[string]Value{"x": 1, "y": 2}, + "rn": []Value{ + map[string]Value{"x": 3, "y": 4}, + map[string]Value{"x": 5, "y": 6}, + }, + } + if !testutil.Equal(vm, valueMap(want)) { + t.Errorf("got\n%+v\nwant\n%+v", vm, want) + } + + in = make([]Value, len(schema)) + want = map[string]Value{ + "s": nil, + "i": nil, + "f": nil, + "b": nil, + "n": nil, + "rn": nil, + } + var vm2 valueMap + if err := vm2.Load(in, schema); err != nil { + t.Fatal(err) + } + if !testutil.Equal(vm2, valueMap(want)) { + t.Errorf("got\n%+v\nwant\n%+v", vm2, want) + } +} + +var ( + // For testing StructLoader + schema2 = Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "s2", Type: StringFieldType}, + {Name: "by", Type: BytesFieldType}, + {Name: "I", Type: IntegerFieldType}, + {Name: "U", Type: IntegerFieldType}, + {Name: "F", Type: FloatFieldType}, + {Name: "B", Type: BooleanFieldType}, + {Name: "TS", Type: TimestampFieldType}, + {Name: "D", Type: DateFieldType}, + {Name: "T", Type: TimeFieldType}, + {Name: "DT", Type: DateTimeFieldType}, + {Name: "nested", Type: RecordFieldType, Schema: Schema{ + {Name: "nestS", Type: StringFieldType}, + {Name: "nestI", Type: IntegerFieldType}, + }}, + {Name: "t", Type: StringFieldType}, + } + + testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC) + testDate = civil.Date{Year: 2016, Month: 11, Day: 5} + testTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 8} + testDateTime = civil.DateTime{Date: testDate, Time: testTime} + + testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), int64(8), 3.14, true, + testTimestamp, testDate, testTime, testDateTime, + []Value{"nested", int64(17)}, "z"} +) + +type testStruct1 struct { + B bool + I int + U uint16 + times + S string + S2 String + By []byte + s string + F float64 + Nested nested + Tagged string `bigquery:"t"` +} + +type String string + +type nested struct { + NestS string + NestI int +} + +type times struct { + TS time.Time + T civil.Time + D civil.Date + DT civil.DateTime +} + +func TestStructLoader(t *testing.T) { + var ts1 testStruct1 + mustLoad(t, &ts1, schema2, testValues) + // Note: the schema field named "s" gets matched to the exported struct + // field "S", not the unexported "s". + want := &testStruct1{ + B: true, + I: 7, + U: 8, + F: 3.14, + times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime}, + S: "x", + S2: "y", + By: []byte{1, 2, 3}, + Nested: nested{NestS: "nested", NestI: 17}, + Tagged: "z", + } + if diff := testutil.Diff(&ts1, want, cmp.AllowUnexported(testStruct1{})); diff != "" { + t.Error(diff) + } + + // Test pointers to nested structs. + type nestedPtr struct{ Nested *nested } + var np nestedPtr + mustLoad(t, &np, schema2, testValues) + want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}} + if diff := testutil.Diff(&np, want2); diff != "" { + t.Error(diff) + } + + // Existing values should be reused. + nst := &nested{NestS: "x", NestI: -10} + np = nestedPtr{Nested: nst} + mustLoad(t, &np, schema2, testValues) + if diff := testutil.Diff(&np, want2); diff != "" { + t.Error(diff) + } + if np.Nested != nst { + t.Error("nested struct pointers not equal") + } +} + +type repStruct struct { + Nums []int + ShortNums [2]int // to test truncation + LongNums [5]int // to test padding with zeroes + Nested []*nested +} + +var ( + repSchema = Schema{ + {Name: "nums", Type: IntegerFieldType, Repeated: true}, + {Name: "shortNums", Type: IntegerFieldType, Repeated: true}, + {Name: "longNums", Type: IntegerFieldType, Repeated: true}, + {Name: "nested", Type: RecordFieldType, Repeated: true, Schema: Schema{ + {Name: "nestS", Type: StringFieldType}, + {Name: "nestI", Type: IntegerFieldType}, + }}, + } + v123 = []Value{int64(1), int64(2), int64(3)} + repValues = []Value{v123, v123, v123, + []Value{ + []Value{"x", int64(1)}, + []Value{"y", int64(2)}, + }, + } +) + +func TestStructLoaderRepeated(t *testing.T) { + var r1 repStruct + mustLoad(t, &r1, repSchema, repValues) + want := repStruct{ + Nums: []int{1, 2, 3}, + ShortNums: [...]int{1, 2}, // extra values discarded + LongNums: [...]int{1, 2, 3, 0, 0}, + Nested: []*nested{{"x", 1}, {"y", 2}}, + } + if diff := testutil.Diff(r1, want); diff != "" { + t.Error(diff) + } + r2 := repStruct{ + Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to + LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed + } + mustLoad(t, &r2, repSchema, repValues) + if diff := testutil.Diff(r2, want); diff != "" { + t.Error(diff) + } + if got, want := cap(r2.Nums), 5; got != want { + t.Errorf("cap(r2.Nums) = %d, want %d", got, want) + } + + // Short slice case. + r3 := repStruct{Nums: []int{-1}} + mustLoad(t, &r3, repSchema, repValues) + if diff := testutil.Diff(r3, want); diff != "" { + t.Error(diff) + } + if got, want := cap(r3.Nums), 3; got != want { + t.Errorf("cap(r3.Nums) = %d, want %d", got, want) + } +} + +type testStructNullable struct { + String NullString + Bytes []byte + Integer NullInt64 + Float NullFloat64 + Boolean NullBool + Timestamp NullTimestamp + Date NullDate + Time NullTime + DateTime NullDateTime + Record *subNullable +} + +type subNullable struct { + X NullInt64 +} + +var testStructNullableSchema = Schema{ + {Name: "String", Type: StringFieldType, Required: false}, + {Name: "Bytes", Type: BytesFieldType, Required: false}, + {Name: "Integer", Type: IntegerFieldType, Required: false}, + {Name: "Float", Type: FloatFieldType, Required: false}, + {Name: "Boolean", Type: BooleanFieldType, Required: false}, + {Name: "Timestamp", Type: TimestampFieldType, Required: false}, + {Name: "Date", Type: DateFieldType, Required: false}, + {Name: "Time", Type: TimeFieldType, Required: false}, + {Name: "DateTime", Type: DateTimeFieldType, Required: false}, + {Name: "Record", Type: RecordFieldType, Required: false, Schema: Schema{ + {Name: "X", Type: IntegerFieldType, Required: false}, + }}, +} + +func TestStructLoaderNullable(t *testing.T) { + var ts testStructNullable + nilVals := []Value{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} + mustLoad(t, &ts, testStructNullableSchema, nilVals) + want := testStructNullable{} + if diff := testutil.Diff(ts, want); diff != "" { + t.Error(diff) + } + + nonnilVals := []Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, testTime, testDateTime, []Value{int64(4)}} + + // All ts fields are nil. Loading non-nil values will cause them all to + // be allocated. + mustLoad(t, &ts, testStructNullableSchema, nonnilVals) + want = testStructNullable{ + String: NullString{StringVal: "x", Valid: true}, + Bytes: []byte{1, 2, 3}, + Integer: NullInt64{Int64: 1, Valid: true}, + Float: NullFloat64{Float64: 2.3, Valid: true}, + Boolean: NullBool{Bool: true, Valid: true}, + Timestamp: NullTimestamp{Timestamp: testTimestamp, Valid: true}, + Date: NullDate{Date: testDate, Valid: true}, + Time: NullTime{Time: testTime, Valid: true}, + DateTime: NullDateTime{DateTime: testDateTime, Valid: true}, + Record: &subNullable{X: NullInt64{Int64: 4, Valid: true}}, + } + if diff := testutil.Diff(ts, want); diff != "" { + t.Error(diff) + } + + // Struct pointers are reused, byte slices are not. + want = ts + want.Bytes = []byte{17} + vals2 := []Value{nil, []byte{17}, nil, nil, nil, nil, nil, nil, nil, []Value{int64(7)}} + mustLoad(t, &ts, testStructNullableSchema, vals2) + if ts.Record != want.Record { + t.Error("record pointers not identical") + } +} + +func TestStructLoaderOverflow(t *testing.T) { + type S struct { + I int16 + U uint16 + F float32 + } + schema := Schema{ + {Name: "I", Type: IntegerFieldType}, + {Name: "U", Type: IntegerFieldType}, + {Name: "F", Type: FloatFieldType}, + } + var s S + z64 := int64(0) + for _, vals := range [][]Value{ + {int64(math.MaxInt16 + 1), z64, 0}, + {z64, int64(math.MaxInt32), 0}, + {z64, int64(-1), 0}, + {z64, z64, math.MaxFloat32 * 2}, + } { + if err := load(&s, schema, vals); err == nil { + t.Errorf("%+v: got nil, want error", vals) + } + } +} + +func TestStructLoaderFieldOverlap(t *testing.T) { + // It's OK if the struct has fields that the schema does not, and vice versa. + type S1 struct { + I int + X [][]int // not in the schema; does not even correspond to a valid BigQuery type + // many schema fields missing + } + var s1 S1 + if err := load(&s1, schema2, testValues); err != nil { + t.Fatal(err) + } + want1 := S1{I: 7} + if diff := testutil.Diff(s1, want1); diff != "" { + t.Error(diff) + } + + // It's even valid to have no overlapping fields at all. + type S2 struct{ Z int } + + var s2 S2 + mustLoad(t, &s2, schema2, testValues) + want2 := S2{} + if diff := testutil.Diff(s2, want2); diff != "" { + t.Error(diff) + } +} + +func TestStructLoaderErrors(t *testing.T) { + check := func(sp interface{}) { + var sl structLoader + err := sl.set(sp, schema2) + if err == nil { + t.Errorf("%T: got nil, want error", sp) + } + } + + type bad1 struct{ F int32 } // wrong type for FLOAT column + check(&bad1{}) + + type bad2 struct{ I uint } // unsupported integer type + check(&bad2{}) + + type bad3 struct { + I int `bigquery:"@"` + } // bad field name + check(&bad3{}) + + type bad4 struct{ Nested int } // non-struct for nested field + check(&bad4{}) + + type bad5 struct{ Nested struct{ NestS int } } // bad nested struct + check(&bad5{}) + + bad6 := &struct{ Nums int }{} // non-slice for repeated field + sl := structLoader{} + err := sl.set(bad6, repSchema) + if err == nil { + t.Errorf("%T: got nil, want error", bad6) + } + + // sl.set's error is sticky, even with good input. + err2 := sl.set(&repStruct{}, repSchema) + if err2 != err { + t.Errorf("%v != %v, expected equal", err2, err) + } + // sl.Load is similarly sticky + err2 = sl.Load(nil, nil) + if err2 != err { + t.Errorf("%v != %v, expected equal", err2, err) + } + + // Null values. + schema := Schema{ + {Name: "i", Type: IntegerFieldType}, + {Name: "f", Type: FloatFieldType}, + {Name: "b", Type: BooleanFieldType}, + {Name: "s", Type: StringFieldType}, + {Name: "d", Type: DateFieldType}, + {Name: "r", Type: RecordFieldType, Schema: Schema{{Name: "X", Type: IntegerFieldType}}}, + } + type s struct { + I int + F float64 + B bool + S string + D civil.Date + } + vals := []Value{int64(0), 0.0, false, "", testDate} + mustLoad(t, &s{}, schema, vals) + for i, e := range vals { + vals[i] = nil + got := load(&s{}, schema, vals) + if got != errNoNulls { + t.Errorf("#%d: got %v, want %v", i, got, errNoNulls) + } + vals[i] = e + } + + // Using more than one struct type with the same structLoader. + type different struct { + B bool + I int + times + S string + s string + Nums []int + } + + sl = structLoader{} + if err := sl.set(&testStruct1{}, schema2); err != nil { + t.Fatal(err) + } + err = sl.set(&different{}, schema2) + if err == nil { + t.Error("different struct types: got nil, want error") + } +} + +func mustLoad(t *testing.T, pval interface{}, schema Schema, vals []Value) { + if err := load(pval, schema, vals); err != nil { + t.Fatalf("loading: %v", err) + } +} + +func load(pval interface{}, schema Schema, vals []Value) error { + var sl structLoader + if err := sl.set(pval, schema); err != nil { + return err + } + return sl.Load(vals, nil) +} + +func BenchmarkStructLoader_NoCompile(b *testing.B) { + benchmarkStructLoader(b, false) +} + +func BenchmarkStructLoader_Compile(b *testing.B) { + benchmarkStructLoader(b, true) +} + +func benchmarkStructLoader(b *testing.B, compile bool) { + var ts1 testStruct1 + for i := 0; i < b.N; i++ { + var sl structLoader + for j := 0; j < 10; j++ { + if err := load(&ts1, schema2, testValues); err != nil { + b.Fatal(err) + } + if !compile { + sl.typ = nil + } + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/admin.go b/vendor/cloud.google.com/go/bigtable/admin.go new file mode 100644 index 0000000..2d81b32 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/admin.go @@ -0,0 +1,885 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "math" + "regexp" + "strings" + "time" + + "cloud.google.com/go/bigtable/internal/gax" + btopt "cloud.google.com/go/bigtable/internal/option" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + "golang.org/x/net/context" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const adminAddr = "bigtableadmin.googleapis.com:443" + +// AdminClient is a client type for performing admin operations within a specific instance. +type AdminClient struct { + conn *grpc.ClientConn + tClient btapb.BigtableTableAdminClient + lroClient *lroauto.OperationsClient + + project, instance string + + // Metadata to be sent with each request. + md metadata.MD +} + +// NewAdminClient creates a new AdminClient for a given project and instance. +func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) { + o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent) + if err != nil { + return nil, err + } + // Need to add scopes for long running operations (for create table & snapshots) + o = append(o, option.WithScopes(cloudresourcemanager.CloudPlatformScope)) + o = append(o, opts...) + conn, err := gtransport.Dial(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + + return &AdminClient{ + conn: conn, + tClient: btapb.NewBigtableTableAdminClient(conn), + lroClient: lroClient, + project: project, + instance: instance, + md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)), + }, nil +} + +// Close closes the AdminClient. +func (ac *AdminClient) Close() error { + return ac.conn.Close() +} + +func (ac *AdminClient) instancePrefix() string { + return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance) +} + +// Tables returns a list of the tables in the instance. +func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ListTablesRequest{ + Parent: prefix, + } + + var res *btapb.ListTablesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + res, err = ac.tClient.ListTables(ctx, req) + return err + }, retryOptions...) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(res.Tables)) + for _, tbl := range res.Tables { + names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) + } + return names, nil +} + +// TableConf contains all of the information necessary to create a table with column families. +type TableConf struct { + TableID string + SplitKeys []string + // Families is a map from family name to GCPolicy + Families map[string]GCPolicy +} + +// CreateTable creates a new table in the instance. +// This method may return before the table's creation is complete. +func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { + return ac.CreateTableFromConf(ctx, &TableConf{TableID: table}) +} + +// CreatePresplitTable creates a new table in the instance. +// The list of row keys will be used to initially split the table into multiple tablets. +// Given two split keys, "s1" and "s2", three tablets will be created, +// spanning the key ranges: [, s1), [s1, s2), [s2, ). +// This method may return before the table's creation is complete. +func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, splitKeys []string) error { + return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, SplitKeys: splitKeys}) +} + +// CreateTableFromConf creates a new table in the instance from the given configuration. +func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + var req_splits []*btapb.CreateTableRequest_Split + for _, split := range conf.SplitKeys { + req_splits = append(req_splits, &btapb.CreateTableRequest_Split{Key: []byte(split)}) + } + var tbl btapb.Table + if conf.Families != nil { + tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily) + for fam, policy := range conf.Families { + tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{GcRule: policy.proto()} + } + } + prefix := ac.instancePrefix() + req := &btapb.CreateTableRequest{ + Parent: prefix, + TableId: conf.TableID, + Table: &tbl, + InitialSplits: req_splits, + } + _, err := ac.tClient.CreateTable(ctx, req) + return err +} + +// CreateColumnFamily creates a new column family in a table. +func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error { + // TODO(dsymonds): Permit specifying gcexpr and any other family settings. + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// DeleteTable deletes a table and all of its data. +func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.DeleteTableRequest{ + Name: prefix + "/tables/" + table, + } + _, err := ac.tClient.DeleteTable(ctx, req) + return err +} + +// DeleteColumnFamily deletes a column family in a table and all of its data. +func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{Drop: true}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// TableInfo represents information about a table. +type TableInfo struct { + // DEPRECATED - This field is deprecated. Please use FamilyInfos instead. + Families []string + FamilyInfos []FamilyInfo +} + +// FamilyInfo represents information about a column family. +type FamilyInfo struct { + Name string + GCPolicy string +} + +// TableInfo retrieves information about a table. +func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.GetTableRequest{ + Name: prefix + "/tables/" + table, + } + + var res *btapb.Table + + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + res, err = ac.tClient.GetTable(ctx, req) + return err + }, retryOptions...) + if err != nil { + return nil, err + } + + ti := &TableInfo{} + for name, fam := range res.ColumnFamilies { + ti.Families = append(ti.Families, name) + ti.FamilyInfos = append(ti.FamilyInfos, FamilyInfo{Name: name, GCPolicy: GCRuleToString(fam.GcRule)}) + } + return ti, nil +} + +// SetGCPolicy specifies which cells in a column family should be garbage collected. +// GC executes opportunistically in the background; table reads may return data +// matching the GC policy. +func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// DropRowRange permanently deletes a row range from the specified table. +func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.DropRowRangeRequest{ + Name: prefix + "/tables/" + table, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte(rowKeyPrefix)}, + } + _, err := ac.tClient.DropRowRange(ctx, req) + return err +} + +// CreateTableFromSnapshot creates a table from snapshot. +// The table will be created in the same cluster as the snapshot. +// +// This is a private alpha release of Cloud Bigtable snapshots. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (ac *AdminClient) CreateTableFromSnapshot(ctx context.Context, table, cluster, snapshot string) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + snapshotPath := prefix + "/clusters/" + cluster + "/snapshots/" + snapshot + + req := &btapb.CreateTableFromSnapshotRequest{ + Parent: prefix, + TableId: table, + SourceSnapshot: snapshotPath, + } + op, err := ac.tClient.CreateTableFromSnapshot(ctx, req) + if err != nil { + return err + } + resp := btapb.Table{} + return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp) +} + +const DefaultSnapshotDuration time.Duration = 0 + +// Creates a new snapshot in the specified cluster from the specified source table. +// Setting the ttl to `DefaultSnapshotDuration` will use the server side default for the duration. +// +// This is a private alpha release of Cloud Bigtable snapshots. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (ac *AdminClient) SnapshotTable(ctx context.Context, table, cluster, snapshot string, ttl time.Duration) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + + var ttlProto *durpb.Duration + + if ttl > 0 { + ttlProto = ptypes.DurationProto(ttl) + } + + req := &btapb.SnapshotTableRequest{ + Name: prefix + "/tables/" + table, + Cluster: prefix + "/clusters/" + cluster, + SnapshotId: snapshot, + Ttl: ttlProto, + } + + op, err := ac.tClient.SnapshotTable(ctx, req) + if err != nil { + return err + } + resp := btapb.Snapshot{} + return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp) +} + +// Returns a SnapshotIterator for iterating over the snapshots in a cluster. +// To list snapshots across all of the clusters in the instance specify "-" as the cluster. +// +// This is a private alpha release of Cloud Bigtable snapshots. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (ac *AdminClient) ListSnapshots(ctx context.Context, cluster string) *SnapshotIterator { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + clusterPath := prefix + "/clusters/" + cluster + + it := &SnapshotIterator{} + req := &btapb.ListSnapshotsRequest{ + Parent: clusterPath, + } + + fetch := func(pageSize int, pageToken string) (string, error) { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + + resp, err := ac.tClient.ListSnapshots(ctx, req) + if err != nil { + return "", err + } + for _, s := range resp.Snapshots { + snapshotInfo, err := newSnapshotInfo(s) + if err != nil { + return "", fmt.Errorf("Failed to parse snapshot proto %v", err) + } + it.items = append(it.items, snapshotInfo) + } + return resp.NextPageToken, nil + } + bufLen := func() int { return len(it.items) } + takeBuf := func() interface{} { b := it.items; it.items = nil; return b } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, bufLen, takeBuf) + + return it +} + +func newSnapshotInfo(snapshot *btapb.Snapshot) (*SnapshotInfo, error) { + nameParts := strings.Split(snapshot.Name, "/") + name := nameParts[len(nameParts)-1] + tablePathParts := strings.Split(snapshot.SourceTable.Name, "/") + tableId := tablePathParts[len(tablePathParts)-1] + + createTime, err := ptypes.Timestamp(snapshot.CreateTime) + if err != nil { + return nil, fmt.Errorf("Invalid createTime: %v", err) + } + + deleteTime, err := ptypes.Timestamp(snapshot.DeleteTime) + if err != nil { + return nil, fmt.Errorf("Invalid deleteTime: %v", err) + } + + return &SnapshotInfo{ + Name: name, + SourceTable: tableId, + DataSize: snapshot.DataSizeBytes, + CreateTime: createTime, + DeleteTime: deleteTime, + }, nil +} + +// An EntryIterator iterates over log entries. +// +// This is a private alpha release of Cloud Bigtable snapshots. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +type SnapshotIterator struct { + items []*SnapshotInfo + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details. +func (it *SnapshotIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done +// (https://godoc.org/google.golang.org/api/iterator) if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SnapshotIterator) Next() (*SnapshotInfo, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +type SnapshotInfo struct { + Name string + SourceTable string + DataSize int64 + CreateTime time.Time + DeleteTime time.Time +} + +// Get snapshot metadata. +// +// This is a private alpha release of Cloud Bigtable snapshots. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (ac *AdminClient) SnapshotInfo(ctx context.Context, cluster, snapshot string) (*SnapshotInfo, error) { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + clusterPath := prefix + "/clusters/" + cluster + snapshotPath := clusterPath + "/snapshots/" + snapshot + + req := &btapb.GetSnapshotRequest{ + Name: snapshotPath, + } + + resp, err := ac.tClient.GetSnapshot(ctx, req) + if err != nil { + return nil, err + } + + return newSnapshotInfo(resp) +} + +// Delete a snapshot in a cluster. +// +// This is a private alpha release of Cloud Bigtable snapshots. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (ac *AdminClient) DeleteSnapshot(ctx context.Context, cluster, snapshot string) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + clusterPath := prefix + "/clusters/" + cluster + snapshotPath := clusterPath + "/snapshots/" + snapshot + + req := &btapb.DeleteSnapshotRequest{ + Name: snapshotPath, + } + _, err := ac.tClient.DeleteSnapshot(ctx, req) + return err +} + +// getConsistencyToken gets the consistency token for a table. +func (ac *AdminClient) getConsistencyToken(ctx context.Context, tableName string) (string, error) { + req := &btapb.GenerateConsistencyTokenRequest{ + Name: tableName, + } + resp, err := ac.tClient.GenerateConsistencyToken(ctx, req) + if err != nil { + return "", err + } + return resp.GetConsistencyToken(), nil +} + +// isConsistent checks if a token is consistent for a table. +func (ac *AdminClient) isConsistent(ctx context.Context, tableName, token string) (bool, error) { + req := &btapb.CheckConsistencyRequest{ + Name: tableName, + ConsistencyToken: token, + } + var resp *btapb.CheckConsistencyResponse + + // Retry calls on retryable errors to avoid losing the token gathered before. + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = ac.tClient.CheckConsistency(ctx, req) + return err + }, retryOptions...) + if err != nil { + return false, err + } + return resp.GetConsistent(), nil +} + +// WaitForReplication waits until all the writes committed before the call started have been propagated to all the clusters in the instance via replication. +// +// This is a private alpha release of Cloud Bigtable replication. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (ac *AdminClient) WaitForReplication(ctx context.Context, table string) error { + // Get the token. + prefix := ac.instancePrefix() + tableName := prefix + "/tables/" + table + token, err := ac.getConsistencyToken(ctx, tableName) + if err != nil { + return err + } + + // Periodically check if the token is consistent. + timer := time.NewTicker(time.Second * 10) + defer timer.Stop() + for { + consistent, err := ac.isConsistent(ctx, tableName, token) + if err != nil { + return err + } + if consistent { + return nil + } + // Sleep for a bit or until the ctx is cancelled. + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + } + } +} + +const instanceAdminAddr = "bigtableadmin.googleapis.com:443" + +// InstanceAdminClient is a client type for performing admin operations on instances. +// These operations can be substantially more dangerous than those provided by AdminClient. +type InstanceAdminClient struct { + conn *grpc.ClientConn + iClient btapb.BigtableInstanceAdminClient + lroClient *lroauto.OperationsClient + + project string + + // Metadata to be sent with each request. + md metadata.MD +} + +// NewInstanceAdminClient creates a new InstanceAdminClient for a given project. +func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) { + o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent) + if err != nil { + return nil, err + } + o = append(o, opts...) + conn, err := gtransport.Dial(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + + return &InstanceAdminClient{ + conn: conn, + iClient: btapb.NewBigtableInstanceAdminClient(conn), + lroClient: lroClient, + + project: project, + md: metadata.Pairs(resourcePrefixHeader, "projects/"+project), + }, nil +} + +// Close closes the InstanceAdminClient. +func (iac *InstanceAdminClient) Close() error { + return iac.conn.Close() +} + +// StorageType is the type of storage used for all tables in an instance +type StorageType int + +const ( + SSD StorageType = iota + HDD +) + +func (st StorageType) proto() btapb.StorageType { + if st == HDD { + return btapb.StorageType_HDD + } + return btapb.StorageType_SSD +} + +// InstanceType is the type of the instance +type InstanceType int32 + +const ( + PRODUCTION InstanceType = InstanceType(btapb.Instance_PRODUCTION) + DEVELOPMENT = InstanceType(btapb.Instance_DEVELOPMENT) +) + +// InstanceInfo represents information about an instance +type InstanceInfo struct { + Name string // name of the instance + DisplayName string // display name for UIs +} + +// InstanceConf contains the information necessary to create an Instance +type InstanceConf struct { + InstanceId, DisplayName, ClusterId, Zone string + // NumNodes must not be specified for DEVELOPMENT instance types + NumNodes int32 + StorageType StorageType + InstanceType InstanceType +} + +// InstanceWithClustersConfig contains the information necessary to create an Instance +type InstanceWithClustersConfig struct { + InstanceID, DisplayName string + Clusters []ClusterConfig + InstanceType InstanceType +} + +var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`) + +// CreateInstance creates a new instance in the project. +// This method will return when the instance has been created or when an error occurs. +func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error { + newConfig := InstanceWithClustersConfig{ + InstanceID: conf.InstanceId, + DisplayName: conf.DisplayName, + InstanceType: conf.InstanceType, + Clusters: []ClusterConfig{ + { + InstanceID: conf.InstanceId, + ClusterID: conf.ClusterId, + Zone: conf.Zone, + NumNodes: conf.NumNodes, + StorageType: conf.StorageType, + }, + }, + } + return iac.CreateInstanceWithClusters(ctx, &newConfig) +} + +// CreateInstance creates a new instance with configured clusters in the project. +// This method will return when the instance has been created or when an error occurs. +// +// Instances with multiple clusters are part of a private alpha release of Cloud Bigtable replication. +// This feature is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (iac *InstanceAdminClient) CreateInstanceWithClusters(ctx context.Context, conf *InstanceWithClustersConfig) error { + ctx = mergeOutgoingMetadata(ctx, iac.md) + clusters := make(map[string]*btapb.Cluster) + for _, cluster := range conf.Clusters { + clusters[cluster.ClusterID] = cluster.proto(iac.project) + } + + req := &btapb.CreateInstanceRequest{ + Parent: "projects/" + iac.project, + InstanceId: conf.InstanceID, + Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)}, + Clusters: clusters, + } + + lro, err := iac.iClient.CreateInstance(ctx, req) + if err != nil { + return err + } + resp := btapb.Instance{} + return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp) +} + +// DeleteInstance deletes an instance from the project. +func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.DeleteInstanceRequest{Name: "projects/" + iac.project + "/instances/" + instanceId} + _, err := iac.iClient.DeleteInstance(ctx, req) + return err +} + +// Instances returns a list of instances in the project. +func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.ListInstancesRequest{ + Parent: "projects/" + iac.project, + } + res, err := iac.iClient.ListInstances(ctx, req) + if err != nil { + return nil, err + } + if len(res.FailedLocations) > 0 { + // We don't have a good way to return a partial result in the face of some zones being unavailable. + // Fail the entire request. + return nil, status.Errorf(codes.Unavailable, "Failed locations: %v", res.FailedLocations) + } + + var is []*InstanceInfo + for _, i := range res.Instances { + m := instanceNameRegexp.FindStringSubmatch(i.Name) + if m == nil { + return nil, fmt.Errorf("malformed instance name %q", i.Name) + } + is = append(is, &InstanceInfo{ + Name: m[2], + DisplayName: i.DisplayName, + }) + } + return is, nil +} + +// InstanceInfo returns information about an instance. +func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId string) (*InstanceInfo, error) { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.GetInstanceRequest{ + Name: "projects/" + iac.project + "/instances/" + instanceId, + } + res, err := iac.iClient.GetInstance(ctx, req) + if err != nil { + return nil, err + } + + m := instanceNameRegexp.FindStringSubmatch(res.Name) + if m == nil { + return nil, fmt.Errorf("malformed instance name %q", res.Name) + } + return &InstanceInfo{ + Name: m[2], + DisplayName: res.DisplayName, + }, nil +} + +// ClusterConfig contains the information necessary to create a cluster +type ClusterConfig struct { + InstanceID, ClusterID, Zone string + NumNodes int32 + StorageType StorageType +} + +func (cc *ClusterConfig) proto(project string) *btapb.Cluster { + return &btapb.Cluster{ + ServeNodes: cc.NumNodes, + DefaultStorageType: cc.StorageType.proto(), + Location: "projects/" + project + "/locations/" + cc.Zone, + } +} + +// ClusterInfo represents information about a cluster. +type ClusterInfo struct { + Name string // name of the cluster + Zone string // GCP zone of the cluster (e.g. "us-central1-a") + ServeNodes int // number of allocated serve nodes + State string // state of the cluster +} + +// CreateCluster creates a new cluster in an instance. +// This method will return when the cluster has been created or when an error occurs. +// +// This is a private alpha release of Cloud Bigtable replication. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (iac *InstanceAdminClient) CreateCluster(ctx context.Context, conf *ClusterConfig) error { + ctx = mergeOutgoingMetadata(ctx, iac.md) + + req := &btapb.CreateClusterRequest{ + Parent: "projects/" + iac.project + "/instances/" + conf.InstanceID, + ClusterId: conf.ClusterID, + Cluster: conf.proto(iac.project), + } + + lro, err := iac.iClient.CreateCluster(ctx, req) + if err != nil { + return err + } + resp := btapb.Cluster{} + return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp) +} + +// DeleteCluster deletes a cluster from an instance. +// +// This is a private alpha release of Cloud Bigtable replication. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceId, clusterId string) error { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.DeleteClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId} + _, err := iac.iClient.DeleteCluster(ctx, req) + return err +} + +// UpdateCluster updates attributes of a cluster +func (iac *InstanceAdminClient) UpdateCluster(ctx context.Context, instanceId, clusterId string, serveNodes int32) error { + ctx = mergeOutgoingMetadata(ctx, iac.md) + cluster := &btapb.Cluster{ + Name: "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId, + ServeNodes: serveNodes} + lro, err := iac.iClient.UpdateCluster(ctx, cluster) + if err != nil { + return err + } + return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, nil) +} + +// Clusters lists the clusters in an instance. +func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceId string) ([]*ClusterInfo, error) { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.ListClustersRequest{Parent: "projects/" + iac.project + "/instances/" + instanceId} + res, err := iac.iClient.ListClusters(ctx, req) + if err != nil { + return nil, err + } + // TODO(garyelliott): Deal with failed_locations. + var cis []*ClusterInfo + for _, c := range res.Clusters { + nameParts := strings.Split(c.Name, "/") + locParts := strings.Split(c.Location, "/") + cis = append(cis, &ClusterInfo{ + Name: nameParts[len(nameParts)-1], + Zone: locParts[len(locParts)-1], + ServeNodes: int(c.ServeNodes), + State: c.State.String(), + }) + } + return cis, nil +} + +// GetCluster fetches a cluster in an instance +func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clusterID string) (*ClusterInfo, error) { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters" + clusterID} + c, err := iac.iClient.GetCluster(ctx, req) + if err != nil { + return nil, err + } + + nameParts := strings.Split(c.Name, "/") + locParts := strings.Split(c.Location, "/") + cis := &ClusterInfo{ + Name: nameParts[len(nameParts)-1], + Zone: locParts[len(locParts)-1], + ServeNodes: int(c.ServeNodes), + State: c.State.String(), + } + return cis, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/admin_test.go b/vendor/cloud.google.com/go/bigtable/admin_test.go new file mode 100644 index 0000000..0528441 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/admin_test.go @@ -0,0 +1,433 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "math" + "sort" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "fmt" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + "strings" +) + +func TestAdminIntegration(t *testing.T) { + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + timeout := 2 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + } + ctx, _ := context.WithTimeout(context.Background(), timeout) + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + + iAdminClient, err := testEnv.NewInstanceAdminClient() + if err != nil { + t.Fatalf("NewInstanceAdminClient: %v", err) + } + if iAdminClient != nil { + defer iAdminClient.Close() + + iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance) + if err != nil { + t.Errorf("InstanceInfo: %v", err) + } + if iInfo.Name != adminClient.instance { + t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) + } + } + + list := func() []string { + tbls, err := adminClient.Tables(ctx) + if err != nil { + t.Fatalf("Fetching list of tables: %v", err) + } + sort.Strings(tbls) + return tbls + } + containsAll := func(got, want []string) bool { + gotSet := make(map[string]bool) + + for _, s := range got { + gotSet[s] = true + } + for _, s := range want { + if !gotSet[s] { + return false + } + } + return true + } + + defer adminClient.DeleteTable(ctx, "mytable") + + if err := adminClient.CreateTable(ctx, "mytable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + + defer adminClient.DeleteTable(ctx, "myothertable") + + if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + + if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + + adminClient.WaitForReplication(ctx, "mytable") + + if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { + t.Fatalf("Deleting table: %v", err) + } + tables := list() + if got, want := tables, []string{"mytable"}; !containsAll(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) { + t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted) + } + + tblConf := TableConf{ + TableID: "conftable", + Families: map[string]GCPolicy{ + "fam1": MaxVersionsPolicy(1), + "fam2": MaxVersionsPolicy(2), + }, + } + if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil { + t.Fatalf("Creating table from TableConf: %v", err) + } + defer adminClient.DeleteTable(ctx, tblConf.TableID) + + tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID) + if err != nil { + t.Fatalf("Getting table info: %v", err) + } + sort.Strings(tblInfo.Families) + wantFams := []string{"fam1", "fam2"} + if !testutil.Equal(tblInfo.Families, wantFams) { + t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams) + } + + // Populate mytable and drop row ranges + if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + client, err := testEnv.NewClient() + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer client.Close() + + tbl := client.Open("mytable") + + prefixes := []string{"a", "b", "c"} + for _, prefix := range prefixes { + for i := 0; i < 5; i++ { + mut := NewMutation() + mut.Set("cf", "col", 0, []byte("1")) + if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + } + } + + if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil { + t.Errorf("DropRowRange a: %v", err) + } + if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil { + t.Errorf("DropRowRange c: %v", err) + } + if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil { + t.Errorf("DropRowRange x: %v", err) + } + + var gotRowCount int + tbl.ReadRows(ctx, RowRange{}, func(row Row) bool { + gotRowCount += 1 + if !strings.HasPrefix(row.Key(), "b") { + t.Errorf("Invalid row after dropping range: %v", row) + } + return true + }) + if gotRowCount != 5 { + t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5) + } +} + +func TestInstanceUpdate(t *testing.T) { + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + timeout := 2 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + + defer adminClient.Close() + + iAdminClient, err := testEnv.NewInstanceAdminClient() + if err != nil { + t.Fatalf("NewInstanceAdminClient: %v", err) + } + + if iAdminClient == nil { + return + } + + defer iAdminClient.Close() + + iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance) + if err != nil { + t.Errorf("InstanceInfo: %v", err) + } + + if iInfo.Name != adminClient.instance { + t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) + } + + if iInfo.DisplayName != adminClient.instance { + t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) + } + + const numNodes = 4 + // update cluster nodes + if err := iAdminClient.UpdateCluster(ctx, adminClient.instance, testEnv.Config().Cluster, int32(numNodes)); err != nil { + t.Errorf("UpdateCluster: %v", err) + } + + // get cluster after updating + cis, err := iAdminClient.GetCluster(ctx, adminClient.instance, testEnv.Config().Cluster) + if err != nil { + t.Errorf("GetCluster %v", err) + } + if cis.ServeNodes != int(numNodes) { + t.Errorf("ServeNodes returned %d, want %d", cis.ServeNodes, int(numNodes)) + } +} + +func TestAdminSnapshotIntegration(t *testing.T) { + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + if !testEnv.Config().UseProd { + t.Skip("emulator doesn't support snapshots") + } + + timeout := 2 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + } + ctx, _ := context.WithTimeout(context.Background(), timeout) + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + + table := testEnv.Config().Table + cluster := testEnv.Config().Cluster + + list := func(cluster string) ([]*SnapshotInfo, error) { + infos := []*SnapshotInfo(nil) + + it := adminClient.ListSnapshots(ctx, cluster) + for { + s, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + infos = append(infos, s) + } + return infos, err + } + + // Delete the table at the end of the test. Schedule ahead of time + // in case the client fails + defer adminClient.DeleteTable(ctx, table) + + if err := adminClient.CreateTable(ctx, table); err != nil { + t.Fatalf("Creating table: %v", err) + } + + // Precondition: no snapshots + snapshots, err := list(cluster) + if err != nil { + t.Fatalf("Initial snapshot list: %v", err) + } + if got, want := len(snapshots), 0; got != want { + t.Fatalf("Initial snapshot list len: %d, want: %d", got, want) + } + + // Create snapshot + defer adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot") + + if err = adminClient.SnapshotTable(ctx, table, cluster, "mysnapshot", 5*time.Hour); err != nil { + t.Fatalf("Creating snaphot: %v", err) + } + + // List snapshot + snapshots, err = list(cluster) + if err != nil { + t.Fatalf("Listing snapshots: %v", err) + } + if got, want := len(snapshots), 1; got != want { + t.Fatalf("Listing snapshot count: %d, want: %d", got, want) + } + if got, want := snapshots[0].Name, "mysnapshot"; got != want { + t.Fatalf("Snapshot name: %s, want: %s", got, want) + } + if got, want := snapshots[0].SourceTable, table; got != want { + t.Fatalf("Snapshot SourceTable: %s, want: %s", got, want) + } + if got, want := snapshots[0].DeleteTime, snapshots[0].CreateTime.Add(5*time.Hour); math.Abs(got.Sub(want).Minutes()) > 1 { + t.Fatalf("Snapshot DeleteTime: %s, want: %s", got, want) + } + + // Get snapshot + snapshot, err := adminClient.SnapshotInfo(ctx, cluster, "mysnapshot") + if err != nil { + t.Fatalf("SnapshotInfo: %v", snapshot) + } + if got, want := *snapshot, *snapshots[0]; got != want { + t.Fatalf("SnapshotInfo: %v, want: %v", got, want) + } + + // Restore + restoredTable := table + "-restored" + defer adminClient.DeleteTable(ctx, restoredTable) + if err = adminClient.CreateTableFromSnapshot(ctx, restoredTable, cluster, "mysnapshot"); err != nil { + t.Fatalf("CreateTableFromSnapshot: %v", err) + } + if _, err := adminClient.TableInfo(ctx, restoredTable); err != nil { + t.Fatalf("Restored TableInfo: %v", err) + } + + // Delete snapshot + if err = adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot"); err != nil { + t.Fatalf("DeleteSnapshot: %v", err) + } + snapshots, err = list(cluster) + if err != nil { + t.Fatalf("List after Delete: %v", err) + } + if got, want := len(snapshots), 0; got != want { + t.Fatalf("List after delete len: %d, want: %d", got, want) + } +} + +func TestGranularity(t *testing.T) { + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + timeout := 2 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + } + ctx, _ := context.WithTimeout(context.Background(), timeout) + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + + list := func() []string { + tbls, err := adminClient.Tables(ctx) + if err != nil { + t.Fatalf("Fetching list of tables: %v", err) + } + sort.Strings(tbls) + return tbls + } + containsAll := func(got, want []string) bool { + gotSet := make(map[string]bool) + + for _, s := range got { + gotSet[s] = true + } + for _, s := range want { + if !gotSet[s] { + return false + } + } + return true + } + + defer adminClient.DeleteTable(ctx, "mytable") + + if err := adminClient.CreateTable(ctx, "mytable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + + tables := list() + if got, want := tables, []string{"mytable"}; !containsAll(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + + // calling ModifyColumnFamilies to check the granularity of table + prefix := adminClient.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + "mytable", + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + table, err := adminClient.tClient.ModifyColumnFamilies(ctx, req) + if err != nil { + t.Fatalf("Creating column family: %v", err) + } + if table.Granularity != btapb.Table_TimestampGranularity(btapb.Table_MILLIS) { + t.Errorf("ModifyColumnFamilies returned granularity %#v, want %#v", table.Granularity, btapb.Table_TimestampGranularity(btapb.Table_MILLIS)) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go new file mode 100644 index 0000000..69e261d --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -0,0 +1,884 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable // import "cloud.google.com/go/bigtable" + +import ( + "errors" + "fmt" + "io" + "strconv" + "time" + + "cloud.google.com/go/bigtable/internal/gax" + btopt "cloud.google.com/go/bigtable/internal/option" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const prodAddr = "bigtable.googleapis.com:443" + +// Client is a client for reading and writing data to tables in an instance. +// +// A Client is safe to use concurrently, except for its Close method. +type Client struct { + conn *grpc.ClientConn + client btpb.BigtableClient + project, instance string + // App Profiles are part of the private alpha release of Cloud Bigtable replication. + // This feature + // is not currently available to most Cloud Bigtable customers. This feature + // might be changed in backward-incompatible ways and is not recommended for + // production use. It is not subject to any SLA or deprecation policy. + appProfile string +} + +// ClientConfig has configurations for the client. +type ClientConfig struct { + // The id of the app profile to associate with all data operations sent from this client. + // If unspecified, the default app profile for the instance will be used. + AppProfile string +} + +// NewClient creates a new Client for a given project and instance. +// The default ClientConfig will be used. +func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { + return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...) +} + +func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) { + o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent) + if err != nil { + return nil, err + } + // Default to a small connection pool that can be overridden. + o = append(o, + option.WithGRPCConnectionPool(4), + // Set the max size to correspond to server-side limits. + option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))), + // TODO(grpc/grpc-go#1388) using connection pool without WithBlock + // can cause RPCs to fail randomly. We can delete this after the issue is fixed. + option.WithGRPCDialOption(grpc.WithBlock())) + o = append(o, opts...) + conn, err := gtransport.Dial(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + return &Client{ + conn: conn, + client: btpb.NewBigtableClient(conn), + project: project, + instance: instance, + appProfile: config.AppProfile, + }, nil +} + +// Close closes the Client. +func (c *Client) Close() error { + return c.conn.Close() +} + +var ( + idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted} + isIdempotentRetryCode = make(map[codes.Code]bool) + retryOptions = []gax.CallOption{ + gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2), + gax.WithRetryCodes(idempotentRetryCodes), + } +) + +func init() { + for _, code := range idempotentRetryCodes { + isIdempotentRetryCode[code] = true + } +} + +func (c *Client) fullTableName(table string) string { + return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table) +} + +// A Table refers to a table. +// +// A Table is safe to use concurrently. +type Table struct { + c *Client + table string + + // Metadata to be sent with each request. + md metadata.MD +} + +// Open opens a table. +func (c *Client) Open(table string) *Table { + return &Table{ + c: c, + table: table, + md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)), + } +} + +// TODO(dsymonds): Read method that returns a sequence of ReadItems. + +// ReadRows reads rows from a table. f is called for each row. +// If f returns false, the stream is shut down and ReadRows returns. +// f owns its argument, and f is called serially in order by row key. +// +// By default, the yielded rows will contain all values in all cells. +// Use RowFilter to limit the cells returned. +func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error { + ctx = mergeOutgoingMetadata(ctx, t.md) + + var prevRowKey string + var err error + ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows") + defer func() { traceEndSpan(ctx, err) }() + attrMap := make(map[string]interface{}) + err = gax.Invoke(ctx, func(ctx context.Context) error { + if !arg.valid() { + // Empty row set, no need to make an API call. + // NOTE: we must return early if arg == RowList{} because reading + // an empty RowList from bigtable returns all rows from that table. + return nil + } + req := &btpb.ReadRowsRequest{ + TableName: t.c.fullTableName(t.table), + AppProfileId: t.c.appProfile, + Rows: arg.proto(), + } + for _, opt := range opts { + opt.set(req) + } + ctx, cancel := context.WithCancel(ctx) // for aborting the stream + defer cancel() + + startTime := time.Now() + stream, err := t.c.client.ReadRows(ctx, req) + if err != nil { + return err + } + cr := newChunkReader() + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // Reset arg for next Invoke call. + arg = arg.retainRowsAfter(prevRowKey) + attrMap["rowKey"] = prevRowKey + attrMap["error"] = err.Error() + attrMap["time_secs"] = time.Since(startTime).Seconds() + tracePrintf(ctx, attrMap, "Retry details in ReadRows") + return err + } + + for _, cc := range res.Chunks { + row, err := cr.Process(cc) + if err != nil { + // No need to prepare for a retry, this is an unretryable error. + return err + } + if row == nil { + continue + } + prevRowKey = row.Key() + if !f(row) { + // Cancel and drain stream. + cancel() + for { + if _, err := stream.Recv(); err != nil { + // The stream has ended. We don't return an error + // because the caller has intentionally interrupted the scan. + return nil + } + } + } + } + if err := cr.Close(); err != nil { + // No need to prepare for a retry, this is an unretryable error. + return err + } + } + return err + }, retryOptions...) + + return err +} + +// ReadRow is a convenience implementation of a single-row reader. +// A missing row will return a zero-length map and a nil error. +func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { + var r Row + err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { + r = rr + return true + }, opts...) + return r, err +} + +// decodeFamilyProto adds the cell data from f to the given row. +func decodeFamilyProto(r Row, row string, f *btpb.Family) { + fam := f.Name // does not have colon + for _, col := range f.Columns { + for _, cell := range col.Cells { + ri := ReadItem{ + Row: row, + Column: fam + ":" + string(col.Qualifier), + Timestamp: Timestamp(cell.TimestampMicros), + Value: cell.Value, + } + r[fam] = append(r[fam], ri) + } + } +} + +// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList. +// The serialized size of the RowSet must be no larger than 1MiB. +type RowSet interface { + proto() *btpb.RowSet + + // retainRowsAfter returns a new RowSet that does not include the + // given row key or any row key lexicographically less than it. + retainRowsAfter(lastRowKey string) RowSet + + // Valid reports whether this set can cover at least one row. + valid() bool +} + +// RowList is a sequence of row keys. +type RowList []string + +func (r RowList) proto() *btpb.RowSet { + keys := make([][]byte, len(r)) + for i, row := range r { + keys[i] = []byte(row) + } + return &btpb.RowSet{RowKeys: keys} +} + +func (r RowList) retainRowsAfter(lastRowKey string) RowSet { + var retryKeys RowList + for _, key := range r { + if key > lastRowKey { + retryKeys = append(retryKeys, key) + } + } + return retryKeys +} + +func (r RowList) valid() bool { + return len(r) > 0 +} + +// A RowRange is a half-open interval [Start, Limit) encompassing +// all the rows with keys at least as large as Start, and less than Limit. +// (Bigtable string comparison is the same as Go's.) +// A RowRange can be unbounded, encompassing all keys at least as large as Start. +type RowRange struct { + start string + limit string +} + +// NewRange returns the new RowRange [begin, end). +func NewRange(begin, end string) RowRange { + return RowRange{ + start: begin, + limit: end, + } +} + +// Unbounded tests whether a RowRange is unbounded. +func (r RowRange) Unbounded() bool { + return r.limit == "" +} + +// Contains says whether the RowRange contains the key. +func (r RowRange) Contains(row string) bool { + return r.start <= row && (r.limit == "" || r.limit > row) +} + +// String provides a printable description of a RowRange. +func (r RowRange) String() string { + a := strconv.Quote(r.start) + if r.Unbounded() { + return fmt.Sprintf("[%s,∞)", a) + } + return fmt.Sprintf("[%s,%q)", a, r.limit) +} + +func (r RowRange) proto() *btpb.RowSet { + rr := &btpb.RowRange{ + StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)}, + } + if !r.Unbounded() { + rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)} + } + return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}} +} + +func (r RowRange) retainRowsAfter(lastRowKey string) RowSet { + if lastRowKey == "" || lastRowKey < r.start { + return r + } + // Set the beginning of the range to the row after the last scanned. + start := lastRowKey + "\x00" + if r.Unbounded() { + return InfiniteRange(start) + } + return NewRange(start, r.limit) +} + +func (r RowRange) valid() bool { + return r.Unbounded() || r.start < r.limit +} + +// RowRangeList is a sequence of RowRanges representing the union of the ranges. +type RowRangeList []RowRange + +func (r RowRangeList) proto() *btpb.RowSet { + ranges := make([]*btpb.RowRange, len(r)) + for i, rr := range r { + // RowRange.proto() returns a RowSet with a single element RowRange array + ranges[i] = rr.proto().RowRanges[0] + } + return &btpb.RowSet{RowRanges: ranges} +} + +func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet { + if lastRowKey == "" { + return r + } + // Return a list of any range that has not yet been completely processed + var ranges RowRangeList + for _, rr := range r { + retained := rr.retainRowsAfter(lastRowKey) + if retained.valid() { + ranges = append(ranges, retained.(RowRange)) + } + } + return ranges +} + +func (r RowRangeList) valid() bool { + for _, rr := range r { + if rr.valid() { + return true + } + } + return false +} + +// SingleRow returns a RowSet for reading a single row. +func SingleRow(row string) RowSet { + return RowList{row} +} + +// PrefixRange returns a RowRange consisting of all keys starting with the prefix. +func PrefixRange(prefix string) RowRange { + return RowRange{ + start: prefix, + limit: prefixSuccessor(prefix), + } +} + +// InfiniteRange returns the RowRange consisting of all keys at least as +// large as start. +func InfiniteRange(start string) RowRange { + return RowRange{ + start: start, + limit: "", + } +} + +// prefixSuccessor returns the lexically smallest string greater than the +// prefix, if it exists, or "" otherwise. In either case, it is the string +// needed for the Limit of a RowRange. +func prefixSuccessor(prefix string) string { + if prefix == "" { + return "" // infinite range + } + n := len(prefix) + for n--; n >= 0 && prefix[n] == '\xff'; n-- { + } + if n == -1 { + return "" + } + ans := []byte(prefix[:n]) + ans = append(ans, prefix[n]+1) + return string(ans) +} + +// A ReadOption is an optional argument to ReadRows. +type ReadOption interface { + set(req *btpb.ReadRowsRequest) +} + +// RowFilter returns a ReadOption that applies f to the contents of read rows. +// +// If multiple RowFilters are provided, only the last is used. To combine filters, +// use ChainFilters or InterleaveFilters instead. +func RowFilter(f Filter) ReadOption { return rowFilter{f} } + +type rowFilter struct{ f Filter } + +func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() } + +// LimitRows returns a ReadOption that will limit the number of rows to be read. +func LimitRows(limit int64) ReadOption { return limitRows{limit} } + +type limitRows struct{ limit int64 } + +func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit } + +// mutationsAreRetryable returns true if all mutations are idempotent +// and therefore retryable. A mutation is idempotent iff all cell timestamps +// have an explicit timestamp set and do not rely on the timestamp being set on the server. +func mutationsAreRetryable(muts []*btpb.Mutation) bool { + serverTime := int64(ServerTime) + for _, mut := range muts { + setCell := mut.GetSetCell() + if setCell != nil && setCell.TimestampMicros == serverTime { + return false + } + } + return true +} + +// Apply applies a Mutation to a specific row. +func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { + ctx = mergeOutgoingMetadata(ctx, t.md) + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + var err error + ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/Apply") + defer func() { traceEndSpan(ctx, err) }() + var callOptions []gax.CallOption + if m.cond == nil { + req := &btpb.MutateRowRequest{ + TableName: t.c.fullTableName(t.table), + AppProfileId: t.c.appProfile, + RowKey: []byte(row), + Mutations: m.ops, + } + if mutationsAreRetryable(m.ops) { + callOptions = retryOptions + } + var res *btpb.MutateRowResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + res, err = t.c.client.MutateRow(ctx, req) + return err + }, callOptions...) + if err == nil { + after(res) + } + return err + } + + req := &btpb.CheckAndMutateRowRequest{ + TableName: t.c.fullTableName(t.table), + AppProfileId: t.c.appProfile, + RowKey: []byte(row), + PredicateFilter: m.cond.proto(), + } + if m.mtrue != nil { + if m.mtrue.cond != nil { + return errors.New("bigtable: conditional mutations cannot be nested") + } + req.TrueMutations = m.mtrue.ops + } + if m.mfalse != nil { + if m.mfalse.cond != nil { + return errors.New("bigtable: conditional mutations cannot be nested") + } + req.FalseMutations = m.mfalse.ops + } + if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) { + callOptions = retryOptions + } + var cmRes *btpb.CheckAndMutateRowResponse + err = gax.Invoke(ctx, func(ctx context.Context) error { + var err error + cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) + return err + }, callOptions...) + if err == nil { + after(cmRes) + } + return err +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption interface { + after(res proto.Message) +} + +type applyAfterFunc func(res proto.Message) + +func (a applyAfterFunc) after(res proto.Message) { a(res) } + +// GetCondMutationResult returns an ApplyOption that reports whether the conditional +// mutation's condition matched. +func GetCondMutationResult(matched *bool) ApplyOption { + return applyAfterFunc(func(res proto.Message) { + if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok { + *matched = res.PredicateMatched + } + }) +} + +// Mutation represents a set of changes for a single row of a table. +type Mutation struct { + ops []*btpb.Mutation + + // for conditional mutations + cond Filter + mtrue, mfalse *Mutation +} + +// NewMutation returns a new mutation. +func NewMutation() *Mutation { + return new(Mutation) +} + +// NewCondMutation returns a conditional mutation. +// The given row filter determines which mutation is applied: +// If the filter matches any cell in the row, mtrue is applied; +// otherwise, mfalse is applied. +// Either given mutation may be nil. +func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { + return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} +} + +// Set sets a value in a specified column, with the given timestamp. +// The timestamp will be truncated to millisecond granularity. +// A timestamp of ServerTime means to use the server timestamp. +func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimestampMicros: int64(ts.TruncateToMilliseconds()), + Value: value, + }}}) +} + +// DeleteCellsInColumn will delete all the cells whose columns are family:column. +func (m *Mutation) DeleteCellsInColumn(family, column string) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + }}}) +} + +// DeleteTimestampRange deletes all cells whose columns are family:column +// and whose timestamps are in the half-open interval [start, end). +// If end is zero, it will be interpreted as infinity. +// The timestamps will be truncated to millisecond granularity. +func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimeRange: &btpb.TimestampRange{ + StartTimestampMicros: int64(start.TruncateToMilliseconds()), + EndTimestampMicros: int64(end.TruncateToMilliseconds()), + }, + }}}) +} + +// DeleteCellsInFamily will delete all the cells whose columns are family:*. +func (m *Mutation) DeleteCellsInFamily(family string) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{DeleteFromFamily: &btpb.Mutation_DeleteFromFamily{ + FamilyName: family, + }}}) +} + +// DeleteRow deletes the entire row. +func (m *Mutation) DeleteRow() { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{DeleteFromRow: &btpb.Mutation_DeleteFromRow{}}}) +} + +// entryErr is a container that combines an entry with the error that was returned for it. +// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed. +type entryErr struct { + Entry *btpb.MutateRowsRequest_Entry + Err error +} + +// ApplyBulk applies multiple Mutations, up to a maximum of 100,000. +// Each mutation is individually applied atomically, +// but the set of mutations may be applied in any order. +// +// Two types of failures may occur. If the entire process +// fails, (nil, err) will be returned. If specific mutations +// fail to apply, ([]err, nil) will be returned, and the errors +// will correspond to the relevant rowKeys/muts arguments. +// +// Conditional mutations cannot be applied in bulk and providing one will result in an error. +func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) { + ctx = mergeOutgoingMetadata(ctx, t.md) + if len(rowKeys) != len(muts) { + return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts)) + } + + origEntries := make([]*entryErr, len(rowKeys)) + for i, key := range rowKeys { + mut := muts[i] + if mut.cond != nil { + return nil, errors.New("conditional mutations cannot be applied in bulk") + } + origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}} + } + + // entries will be reduced after each invocation to just what needs to be retried. + entries := make([]*entryErr, len(rowKeys)) + copy(entries, origEntries) + var err error + ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk") + defer func() { traceEndSpan(ctx, err) }() + attrMap := make(map[string]interface{}) + err = gax.Invoke(ctx, func(ctx context.Context) error { + attrMap["rowCount"] = len(entries) + tracePrintf(ctx, attrMap, "Row count in ApplyBulk") + err := t.doApplyBulk(ctx, entries, opts...) + if err != nil { + // We want to retry the entire request with the current entries + return err + } + entries = t.getApplyBulkRetries(entries) + if len(entries) > 0 && len(idempotentRetryCodes) > 0 { + // We have at least one mutation that needs to be retried. + // Return an arbitrary error that is retryable according to callOptions. + return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") + } + return nil + }, retryOptions...) + if err != nil { + return nil, err + } + + // Accumulate all of the errors into an array to return, interspersed with nils for successful + // entries. The absence of any errors means we should return nil. + var errs []error + var foundErr bool + for _, entry := range origEntries { + if entry.Err != nil { + foundErr = true + } + errs = append(errs, entry.Err) + } + if foundErr { + return errs, nil + } + return nil, nil +} + +// getApplyBulkRetries returns the entries that need to be retried +func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { + var retryEntries []*entryErr + for _, entry := range entries { + err := entry.Err + if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) { + // There was an error and the entry is retryable. + retryEntries = append(retryEntries, entry) + } + } + return retryEntries +} + +// doApplyBulk does the work of a single ApplyBulk invocation +func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error { + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs)) + for i, entryErr := range entryErrs { + entries[i] = entryErr.Entry + } + req := &btpb.MutateRowsRequest{ + TableName: t.c.fullTableName(t.table), + AppProfileId: t.c.appProfile, + Entries: entries, + } + stream, err := t.c.client.MutateRows(ctx, req) + if err != nil { + return err + } + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + + for i, entry := range res.Entries { + s := entry.Status + if s.Code == int32(codes.OK) { + entryErrs[i].Err = nil + } else { + entryErrs[i].Err = status.Errorf(codes.Code(s.Code), s.Message) + } + } + after(res) + } + return nil +} + +// Timestamp is in units of microseconds since 1 January 1970. +type Timestamp int64 + +// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. +// It indicates that the server's timestamp should be used. +const ServerTime Timestamp = -1 + +// Time converts a time.Time into a Timestamp. +func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } + +// Now returns the Timestamp representation of the current time on the client. +func Now() Timestamp { return Time(time.Now()) } + +// Time converts a Timestamp into a time.Time. +func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } + +// TruncateToMilliseconds truncates a Timestamp to millisecond granularity, +// which is currently the only granularity supported. +func (ts Timestamp) TruncateToMilliseconds() Timestamp { + if ts == ServerTime { + return ts + } + return ts - ts%1000 +} + +// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. +// It returns the newly written cells. +func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { + ctx = mergeOutgoingMetadata(ctx, t.md) + req := &btpb.ReadModifyWriteRowRequest{ + TableName: t.c.fullTableName(t.table), + AppProfileId: t.c.appProfile, + RowKey: []byte(row), + Rules: m.ops, + } + res, err := t.c.client.ReadModifyWriteRow(ctx, req) + if err != nil { + return nil, err + } + if res.Row == nil { + return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil") + } + r := make(Row) + for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family + decodeFamilyProto(r, row, fam) + } + return r, nil +} + +// ReadModifyWrite represents a set of operations on a single row of a table. +// It is like Mutation but for non-idempotent changes. +// When applied, these operations operate on the latest values of the row's cells, +// and result in a new value being written to the relevant cell with a timestamp +// that is max(existing timestamp, current server time). +// +// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will +// be executed serially by the server. +type ReadModifyWrite struct { + ops []*btpb.ReadModifyWriteRule +} + +// NewReadModifyWrite returns a new ReadModifyWrite. +func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } + +// AppendValue appends a value to a specific cell's value. +// If the cell is unset, it will be treated as an empty value. +func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { + m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btpb.ReadModifyWriteRule_AppendValue{AppendValue: v}, + }) +} + +// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, +// and adds a value to it. If the cell is unset, it will be treated as zero. +// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite +// operation will fail. +func (m *ReadModifyWrite) Increment(family, column string, delta int64) { + m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: delta}, + }) +} + +// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata, +// if any, joined with internal metadata. +func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context { + mdCopy, _ := metadata.FromOutgoingContext(ctx) + return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md)) +} + +func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) { + ctx = mergeOutgoingMetadata(ctx, t.md) + var sampledRowKeys []string + err := gax.Invoke(ctx, func(ctx context.Context) error { + sampledRowKeys = nil + req := &btpb.SampleRowKeysRequest{ + TableName: t.c.fullTableName(t.table), + AppProfileId: t.c.appProfile, + } + ctx, cancel := context.WithCancel(ctx) // for aborting the stream + defer cancel() + + stream, err := t.c.client.SampleRowKeys(ctx, req) + if err != nil { + return err + } + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + + key := string(res.RowKey) + if key == "" { + continue + } + + sampledRowKeys = append(sampledRowKeys, key) + } + return nil + }, retryOptions...) + return sampledRowKeys, err +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable_test.go b/vendor/cloud.google.com/go/bigtable/bigtable_test.go new file mode 100644 index 0000000..0dec53f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bigtable_test.go @@ -0,0 +1,1163 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "math/rand" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func TestPrefix(t *testing.T) { + tests := []struct { + prefix, succ string + }{ + {"", ""}, + {"\xff", ""}, // when used, "" means Infinity + {"x\xff", "y"}, + {"\xfe", "\xff"}, + } + for _, tc := range tests { + got := prefixSuccessor(tc.prefix) + if got != tc.succ { + t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ) + continue + } + r := PrefixRange(tc.prefix) + if tc.succ == "" && r.limit != "" { + t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit) + } + if tc.succ != "" && r.limit != tc.succ { + t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ) + } + } +} + +func TestApplyErrors(t *testing.T) { + ctx := context.Background() + table := &Table{ + c: &Client{ + project: "P", + instance: "I", + }, + table: "t", + } + f := ColumnFilter("C") + m := NewMutation() + m.DeleteRow() + // Test nested conditional mutations. + cm := NewCondMutation(f, NewCondMutation(f, m, nil), nil) + if err := table.Apply(ctx, "x", cm); err == nil { + t.Error("got nil, want error") + } + cm = NewCondMutation(f, nil, NewCondMutation(f, m, nil)) + if err := table.Apply(ctx, "x", cm); err == nil { + t.Error("got nil, want error") + } +} + +func TestClientIntegration(t *testing.T) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + + var timeout time.Duration + if testEnv.Config().UseProd { + timeout = 10 * time.Minute + t.Logf("Running test against production") + } else { + timeout = 1 * time.Minute + t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint) + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + client, err := testEnv.NewClient() + if err != nil { + t.Fatalf("Client: %v", err) + } + defer client.Close() + checkpoint("dialed Client") + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("AdminClient: %v", err) + } + defer adminClient.Close() + checkpoint("dialed AdminClient") + + table := testEnv.Config().Table + + // Delete the table at the end of the test. + // Do this even before creating the table so that if this is running + // against production and CreateTable fails there's a chance of cleaning it up. + defer adminClient.DeleteTable(ctx, table) + + if err := adminClient.CreateTable(ctx, table); err != nil { + t.Fatalf("Creating table: %v", err) + } + checkpoint("created table") + if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + checkpoint(`created "follows" column family`) + + tbl := client.Open(table) + + // Insert some data. + initialData := map[string][]string{ + "wmckinley": {"tjefferson"}, + "gwashington": {"jadams"}, + "tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below + "jadams": {"gwashington", "tjefferson"}, + } + for row, ss := range initialData { + mut := NewMutation() + for _, name := range ss { + mut.Set("follows", name, 0, []byte("1")) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Mutating row %q: %v", row, err) + } + } + checkpoint("inserted initial data") + + // TODO(igorbernstein): re-enable this when ready + //if err := adminClient.WaitForReplication(ctx, table); err != nil { + // t.Errorf("Waiting for replication for table %q: %v", table, err) + //} + //checkpoint("waited for replication") + + // Do a conditional mutation with a complex filter. + mutTrue := NewMutation() + mutTrue.Set("follows", "wmckinley", 0, []byte("1")) + filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter(".")) + mut := NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + // Do a second condition mutation with a filter that does not match, + // and thus no changes should be made. + mutTrue = NewMutation() + mutTrue.DeleteRow() + filter = ColumnFilter("snoop.dogg") + mut = NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + checkpoint("did two conditional mutations") + + // Fetch a row. + row, err := tbl.ReadRow(ctx, "jadams") + if err != nil { + t.Fatalf("Reading a row: %v", err) + } + wantRow := Row{ + "follows": []ReadItem{ + {Row: "jadams", Column: "follows:gwashington", Value: []byte("1")}, + {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, + }, + } + if !testutil.Equal(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + checkpoint("tested ReadRow") + + // Do a bunch of reads with filters. + readTests := []struct { + desc string + rr RowSet + filter Filter // may be nil + limit ReadOption // may be nil + + // We do the read, grab all the cells, turn them into "--", + // and join with a comma. + want string + }{ + { + desc: "read all, unfiltered", + rr: RowRange{}, + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InfiniteRange, unfiltered", + rr: InfiniteRange("tjefferson"), + want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with NewRange, unfiltered", + rr: NewRange("gargamel", "hubbard"), + want: "gwashington-jadams-1", + }, + { + desc: "read with PrefixRange, unfiltered", + rr: PrefixRange("jad"), + want: "jadams-gwashington-1,jadams-tjefferson-1", + }, + { + desc: "read with SingleRow, unfiltered", + rr: SingleRow("wmckinley"), + want: "wmckinley-tjefferson-1", + }, + { + desc: "read all, with ColumnFilter", + rr: RowRange{}, + filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" + want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read range, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "h", "k"), + want: "gwashington-jadams-1,tjefferson-jadams-1", + }, + { + desc: "read range from empty, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "", "u"), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read range from start to empty, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "h", ""), + want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with RowKeyFilter", + rr: RowRange{}, + filter: RowKeyFilter(".*wash.*"), + want: "gwashington-jadams-1", + }, + { + desc: "read with RowKeyFilter, no matches", + rr: RowRange{}, + filter: RowKeyFilter(".*xxx.*"), + want: "", + }, + { + desc: "read with FamilyFilter, no matches", + rr: RowRange{}, + filter: FamilyFilter(".*xxx.*"), + want: "", + }, + { + desc: "read with ColumnFilter + row limit", + rr: RowRange{}, + filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" + limit: LimitRows(2), + want: "gwashington-jadams-1,jadams-tjefferson-1", + }, + { + desc: "read all, strip values", + rr: RowRange{}, + filter: StripValueFilter(), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with ColumnFilter + row limit + strip values", + rr: RowRange{}, + filter: ChainFilters(ColumnFilter(".*j.*"), StripValueFilter()), // matches "jadams" and "tjefferson" + limit: LimitRows(2), + want: "gwashington-jadams-,jadams-tjefferson-", + }, + { + desc: "read with condition, strip values on true", + rr: RowRange{}, + filter: ConditionFilter(ColumnFilter(".*j.*"), StripValueFilter(), nil), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with condition, strip values on false", + rr: RowRange{}, + filter: ConditionFilter(ColumnFilter(".*xxx.*"), nil, StripValueFilter()), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with ValueRangeFilter + row limit", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("1"), []byte("5")), // matches our value of "1" + limit: LimitRows(2), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1", + }, + { + desc: "read with ValueRangeFilter, no match on exclusive end", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("0"), []byte("1")), // no match + want: "", + }, + { + desc: "read with ValueRangeFilter, no matches", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("3"), []byte("5")), // matches nothing + want: "", + }, + { + desc: "read with InterleaveFilter, no matches on all filters", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*")), + want: "", + }, + { + desc: "read with InterleaveFilter, no duplicate cells", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*j.*")), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InterleaveFilter, with duplicate cells", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*g.*")), + want: "jadams-gwashington-1,jadams-gwashington-1,tjefferson-gwashington-1,tjefferson-gwashington-1", + }, + { + desc: "read with a RowRangeList and no filter", + rr: RowRangeList{NewRange("gargamel", "hubbard"), InfiniteRange("wmckinley")}, + want: "gwashington-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "chain that excludes rows and matches nothing, in a condition", + rr: RowRange{}, + filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil), + want: "", + }, + { + desc: "chain that ends with an interleave that has no match. covers #804", + rr: RowRange{}, + filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*"))), StripValueFilter(), nil), + want: "", + }, + } + for _, tc := range readTests { + var opts []ReadOption + if tc.filter != nil { + opts = append(opts, RowFilter(tc.filter)) + } + if tc.limit != nil { + opts = append(opts, tc.limit) + } + var elt []string + err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + elt = append(elt, formatReadItem(ri)) + } + } + return true + }, opts...) + if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if got := strings.Join(elt, ","); got != tc.want { + t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) + } + } + // Read a RowList + var elt []string + keys := RowList{"wmckinley", "gwashington", "jadams"} + want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1" + err = tbl.ReadRows(ctx, keys, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + elt = append(elt, formatReadItem(ri)) + } + } + return true + }) + if err != nil { + t.Errorf("read RowList: %v", err) + } + + if got := strings.Join(elt, ","); got != want { + t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want) + } + checkpoint("tested ReadRows in a few ways") + + // Do a scan and stop part way through. + // Verify that the ReadRows callback doesn't keep running. + stopped := false + err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool { + if r.Key() < "h" { + return true + } + if !stopped { + stopped = true + return false + } + t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key()) + return false + }) + if err != nil { + t.Errorf("Partial ReadRows: %v", err) + } + checkpoint("did partial ReadRows test") + + // Delete a row and check it goes away. + mut = NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, "wmckinley", mut); err != nil { + t.Errorf("Apply DeleteRow: %v", err) + } + row, err = tbl.ReadRow(ctx, "wmckinley") + if err != nil { + t.Fatalf("Reading a row after DeleteRow: %v", err) + } + if len(row) != 0 { + t.Fatalf("Read non-zero row after DeleteRow: %v", row) + } + checkpoint("exercised DeleteRow") + + // Check ReadModifyWrite. + + if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + appendRMW := func(b []byte) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.AppendValue("counter", "likes", b) + return rmw + } + incRMW := func(n int64) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.Increment("counter", "likes", n) + return rmw + } + rmwSeq := []struct { + desc string + rmw *ReadModifyWrite + want []byte + }{ + { + desc: "append #1", + rmw: appendRMW([]byte{0, 0, 0}), + want: []byte{0, 0, 0}, + }, + { + desc: "append #2", + rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17 + want: []byte{0, 0, 0, 0, 0, 0, 0, 17}, + }, + { + desc: "increment", + rmw: incRMW(8), + want: []byte{0, 0, 0, 0, 0, 0, 0, 25}, + }, + } + for _, step := range rmwSeq { + row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw) + if err != nil { + t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) + } + // Make sure the modified cell returned by the RMW operation has a timestamp. + if row["counter"][0].Timestamp == 0 { + t.Errorf("RMW returned cell timestamp: got %v, want > 0", row["counter"][0].Timestamp) + } + clearTimestamps(row) + wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} + if !testutil.Equal(row, wantRow) { + t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) + } + } + + // Check for google-cloud-go/issues/723. RMWs that insert new rows should keep row order sorted in the emulator. + row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-2", appendRMW([]byte{0})) + if err != nil { + t.Fatalf("ApplyReadModifyWrite null string: %v", err) + } + row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-1", appendRMW([]byte{0})) + if err != nil { + t.Fatalf("ApplyReadModifyWrite null string: %v", err) + } + // Get only the correct row back on read. + r, err := tbl.ReadRow(ctx, "issue-723-1") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if r.Key() != "issue-723-1" { + t.Errorf("ApplyReadModifyWrite: incorrect read after RMW,\n got %v\nwant %v", r.Key(), "issue-723-1") + } + checkpoint("tested ReadModifyWrite") + + // Test arbitrary timestamps more thoroughly. + if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + const numVersions = 4 + mut = NewMutation() + for i := 0; i < numVersions; i++ { + // Timestamps are used in thousands because the server + // only permits that granularity. + mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) + mut.Set("ts", "col2", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) + } + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + // These should be returned in descending timestamp order. + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) + } + // Do the same read, but filter to the latest two versions. + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) + } + // Check cell offset / limit + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowLimitFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow) + } + // Check timestamp range filtering (with truncation) + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1001, 3000))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow) + } + // Delete non-existing cells, no such column family in this row + // Should not delete anything + if err := adminClient.CreateColumnFamily(ctx, table, "non-existing"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + mut = NewMutation() + mut.DeleteTimestampRange("non-existing", "col", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) + } + // Delete non-existing cells, no such column in this column family + // Should not delete anything + mut = NewMutation() + mut.DeleteTimestampRange("ts", "non-existing", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) + } + // Delete the cell with timestamp 2000 and repeat the last read, + // checking that we get ts 3000 and ts 1000. + mut = NewMutation() + mut.DeleteTimestampRange("ts", "col", 2001, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) + } + checkpoint("tested multiple versions in a cell") + + // Check DeleteCellsInFamily + if err := adminClient.CreateColumnFamily(ctx, table, "status"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + mut = NewMutation() + mut.Set("status", "start", 0, []byte("1")) + mut.Set("status", "end", 0, []byte("2")) + mut.Set("ts", "col", 0, []byte("3")) + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + if err := tbl.Apply(ctx, "row2", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + + mut = NewMutation() + mut.DeleteCellsInFamily("status") + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("Delete cf: %v", err) + } + + // ColumnFamily removed + r, err = tbl.ReadRow(ctx, "row1") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow) + } + + // ColumnFamily not removed + r, err = tbl.ReadRow(ctx, "row2") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "ts": []ReadItem{ + {Row: "row2", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, + }, + "status": []ReadItem{ + {Row: "row2", Column: "status:end", Timestamp: 0, Value: []byte("2")}, + {Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")}, + }, + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow) + } + checkpoint("tested family delete") + + // Check DeleteCellsInColumn + mut = NewMutation() + mut.Set("status", "start", 0, []byte("1")) + mut.Set("status", "middle", 0, []byte("2")) + mut.Set("status", "end", 0, []byte("3")) + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "middle") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "status": []ReadItem{ + {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, + {Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")}, + }, + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "start") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "status": []ReadItem{ + {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, + }, + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "end") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if len(r) != 0 { + t.Errorf("Delete column: got %v, want empty row", r) + } + // Add same cell after delete + mut = NewMutation() + mut.Set("status", "end", 0, []byte("3")) + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow) + } + checkpoint("tested column delete") + + // Do highly concurrent reads/writes. + // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. + const maxConcurrency = 100 + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + switch r := rand.Intn(100); { // r ∈ [0,100) + case 0 <= r && r < 30: + // Do a read. + _, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1))) + if err != nil { + t.Errorf("Concurrent read: %v", err) + } + case 30 <= r && r < 100: + // Do a write. + mut := NewMutation() + mut.Set("ts", "col", 0, []byte("data")) + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Errorf("Concurrent write: %v", err) + } + } + }() + } + wg.Wait() + checkpoint("tested high concurrency") + + // Large reads, writes and scans. + bigBytes := make([]byte, 5<<20) // 5 MB is larger than current default gRPC max of 4 MB, but less than the max we set. + nonsense := []byte("lorem ipsum dolor sit amet, ") + fill(bigBytes, nonsense) + mut = NewMutation() + mut.Set("ts", "col", 0, bigBytes) + if err := tbl.Apply(ctx, "bigrow", mut); err != nil { + t.Errorf("Big write: %v", err) + } + r, err = tbl.ReadRow(ctx, "bigrow") + if err != nil { + t.Errorf("Big read: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "bigrow", Column: "ts:col", Value: bigBytes}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Big read returned incorrect bytes: %v", r) + } + // Now write 1000 rows, each with 82 KB values, then scan them all. + medBytes := make([]byte, 82<<10) + fill(medBytes, nonsense) + sem := make(chan int, 50) // do up to 50 mutations at a time. + for i := 0; i < 1000; i++ { + mut := NewMutation() + mut.Set("ts", "big-scan", 0, medBytes) + row := fmt.Sprintf("row-%d", i) + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + sem <- 1 + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Preparing large scan: %v", err) + } + }() + } + wg.Wait() + n := 0 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + n += len(ri.Value) + } + } + return true + }, RowFilter(ColumnFilter("big-scan"))) + if err != nil { + t.Errorf("Doing large scan: %v", err) + } + if want := 1000 * len(medBytes); n != want { + t.Errorf("Large scan returned %d bytes, want %d", n, want) + } + // Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption. + rc := 0 + wantRc := 3 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + rc++ + return true + }, LimitRows(int64(wantRc))) + if rc != wantRc { + t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc) + } + checkpoint("tested big read/write/scan") + + // Test bulk mutations + if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + bulkData := map[string][]string{ + "red sox": {"2004", "2007", "2013"}, + "patriots": {"2001", "2003", "2004", "2014"}, + "celtics": {"1981", "1984", "1986", "2008"}, + } + var rowKeys []string + var muts []*Mutation + for row, ss := range bulkData { + mut := NewMutation() + for _, name := range ss { + mut.Set("bulk", name, 0, []byte("1")) + } + rowKeys = append(rowKeys, row) + muts = append(muts, mut) + } + status, err := tbl.ApplyBulk(ctx, rowKeys, muts) + if err != nil { + t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) + } + if status != nil { + t.Errorf("non-nil errors: %v", err) + } + checkpoint("inserted bulk data") + + // Read each row back + for rowKey, ss := range bulkData { + row, err := tbl.ReadRow(ctx, rowKey) + if err != nil { + t.Fatalf("Reading a bulk row: %v", err) + } + var wantItems []ReadItem + for _, val := range ss { + wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")}) + } + wantRow := Row{"bulk": wantItems} + if !testutil.Equal(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + } + checkpoint("tested reading from bulk insert") + + // Test bulk write errors. + // Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error. + badMut := NewMutation() + badMut.Set("badfamily", "col", ServerTime, nil) + badMut2 := NewMutation() + badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1")) + status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2}) + if err != nil { + t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) + } + if status == nil { + t.Errorf("No errors for bad bulk mutation") + } else if status[0] == nil || status[1] == nil { + t.Errorf("No error for bad bulk mutation") + } +} + +type requestCountingInterceptor struct { + grpc.ClientStream + requestCallback func() +} + +func (i *requestCountingInterceptor) SendMsg(m interface{}) error { + i.requestCallback() + return i.ClientStream.SendMsg(m) +} + +func (i *requestCountingInterceptor) RecvMsg(m interface{}) error { + return i.ClientStream.RecvMsg(m) +} + +func requestCallback(callback func()) func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + clientStream, err := streamer(ctx, desc, cc, method, opts...) + return &requestCountingInterceptor{ + ClientStream: clientStream, + requestCallback: callback, + }, err + } +} + +// TestReadRowsInvalidRowSet verifies that the client doesn't send ReadRows() requests with invalid RowSets. +func TestReadRowsInvalidRowSet(t *testing.T) { + testEnv, err := NewEmulatedEnv(IntegrationTestConfig{}) + if err != nil { + t.Fatalf("NewEmulatedEnv failed: %v", err) + } + var requestCount int + incrementRequestCount := func() { requestCount++ } + conn, err := grpc.Dial(testEnv.server.Addr, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)), + grpc.WithStreamInterceptor(requestCallback(incrementRequestCount)), + ) + if err != nil { + t.Fatalf("grpc.Dial failed: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + adminClient, err := NewAdminClient(ctx, testEnv.config.Project, testEnv.config.Instance, option.WithGRPCConn(conn)) + if err != nil { + t.Fatalf("NewClient failed: %v", err) + } + defer adminClient.Close() + if err := adminClient.CreateTable(ctx, testEnv.config.Table); err != nil { + t.Fatalf("CreateTable(%v) failed: %v", testEnv.config.Table, err) + } + client, err := NewClient(ctx, testEnv.config.Project, testEnv.config.Instance, option.WithGRPCConn(conn)) + if err != nil { + t.Fatalf("NewClient failed: %v", err) + } + defer client.Close() + table := client.Open(testEnv.config.Table) + tests := []struct { + rr RowSet + valid bool + }{ + { + rr: RowRange{}, + valid: true, + }, + { + rr: RowRange{start: "b"}, + valid: true, + }, + { + rr: RowRange{start: "b", limit: "c"}, + valid: true, + }, + { + rr: RowRange{start: "b", limit: "a"}, + valid: false, + }, + { + rr: RowList{"a"}, + valid: true, + }, + { + rr: RowList{}, + valid: false, + }, + } + for _, test := range tests { + requestCount = 0 + err = table.ReadRows(ctx, test.rr, func(r Row) bool { return true }) + if err != nil { + t.Fatalf("ReadRows(%v) failed: %v", test.rr, err) + } + requestValid := requestCount != 0 + if requestValid != test.valid { + t.Errorf("%s: got %v, want %v", test.rr, requestValid, test.valid) + } + } +} + +func formatReadItem(ri ReadItem) string { + // Use the column qualifier only to make the test data briefer. + col := ri.Column[strings.Index(ri.Column, ":")+1:] + return fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value) +} + +func fill(b, sub []byte) { + for len(b) > len(sub) { + n := copy(b, sub) + b = b[n:] + } +} + +func clearTimestamps(r Row) { + for _, ris := range r { + for i := range ris { + ris[i].Timestamp = 0 + } + } +} + +func TestSampleRowKeys(t *testing.T) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + ctx := context.Background() + client, adminClient, table, err := doSetup(ctx) + if err != nil { + t.Fatalf("%v", err) + } + defer client.Close() + defer adminClient.Close() + tbl := client.Open(table) + // Delete the table at the end of the test. + // Do this even before creating the table so that if this is running + // against production and CreateTable fails there's a chance of cleaning it up. + defer adminClient.DeleteTable(ctx, table) + + // Insert some data. + initialData := map[string][]string{ + "wmckinley11": {"tjefferson11"}, + "gwashington77": {"jadams77"}, + "tjefferson0": {"gwashington0", "jadams0"}, + } + + for row, ss := range initialData { + mut := NewMutation() + for _, name := range ss { + mut.Set("follows", name, 0, []byte("1")) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Mutating row %q: %v", row, err) + } + } + checkpoint("inserted initial data") + sampleKeys, err := tbl.SampleRowKeys(context.Background()) + if err != nil { + t.Errorf("%s: %v", "SampleRowKeys:", err) + } + if len(sampleKeys) == 0 { + t.Error("SampleRowKeys length 0") + } + checkpoint("tested SampleRowKeys.") +} + +func doSetup(ctx context.Context) (*Client, *AdminClient, string, error) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + fmt.Printf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + + testEnv, err := NewIntegrationEnv() + if err != nil { + return nil, nil, "", fmt.Errorf("IntegrationEnv: %v", err) + } + + var timeout time.Duration + if testEnv.Config().UseProd { + timeout = 10 * time.Minute + fmt.Printf("Running test against production") + } else { + timeout = 1 * time.Minute + fmt.Printf("bttest.Server running on %s", testEnv.Config().AdminEndpoint) + } + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + client, err := testEnv.NewClient() + if err != nil { + return nil, nil, "", fmt.Errorf("Client: %v", err) + } + checkpoint("dialed Client") + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + return nil, nil, "", fmt.Errorf("AdminClient: %v", err) + } + checkpoint("dialed AdminClient") + + table := testEnv.Config().Table + if err := adminClient.CreateTable(ctx, table); err != nil { + return nil, nil, "", fmt.Errorf("Creating table: %v", err) + } + checkpoint("created table") + if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { + return nil, nil, "", fmt.Errorf("Creating column family: %v", err) + } + checkpoint(`created "follows" column family`) + + return client, adminClient, table, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/example_test.go b/vendor/cloud.google.com/go/bigtable/bttest/example_test.go new file mode 100644 index 0000000..3725f28 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/example_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bttest_test + +import ( + "fmt" + "log" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func ExampleNewServer() { + + srv, err := bttest.NewServer("localhost:0") + + if err != nil { + log.Fatalln(err) + } + + ctx := context.Background() + + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + log.Fatalln(err) + } + + proj, instance := "proj", "instance" + + adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalln(err) + } + + if err = adminClient.CreateTable(ctx, "example"); err != nil { + log.Fatalln(err) + } + + if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { + log.Fatalln(err) + } + + client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalln(err) + } + tbl := client.Open("example") + + mut := bigtable.NewMutation() + mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) + if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { + log.Fatalln(err) + } + + if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { + log.Fatalln(err) + } else { + for _, column := range row["links"] { + fmt.Println(column.Column) + fmt.Println(string(column.Value)) + } + } + + // Output: + // links:golang.org + // Gophers! +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go new file mode 100644 index 0000000..c925e25 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go @@ -0,0 +1,1316 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bttest contains test helpers for working with the bigtable package. + +To use a Server, create it, and then connect to it with no security: +(The project/instance values are ignored.) + srv, err := bttest.NewServer("localhost:0") + ... + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + ... + client, err := bigtable.NewClient(ctx, proj, instance, + option.WithGRPCConn(conn)) + ... +*/ +package bttest // import "cloud.google.com/go/bigtable/bttest" + +import ( + "encoding/binary" + "fmt" + "log" + "math/rand" + "net" + "regexp" + "sort" + "strings" + "sync" + "time" + + "bytes" + + emptypb "github.com/golang/protobuf/ptypes/empty" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/btree" + "golang.org/x/net/context" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + statpb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Server is an in-memory Cloud Bigtable fake. +// It is unauthenticated, and only a rough approximation. +type Server struct { + Addr string + + l net.Listener + srv *grpc.Server + s *server +} + +// server is the real implementation of the fake. +// It is a separate and unexported type so the API won't be cluttered with +// methods that are only relevant to the fake's implementation. +type server struct { + mu sync.Mutex + tables map[string]*table // keyed by fully qualified name + gcc chan int // set when gcloop starts, closed when server shuts down + + // Any unimplemented methods will cause a panic. + btapb.BigtableTableAdminServer + btpb.BigtableServer +} + +// NewServer creates a new Server. +// The Server will be listening for gRPC connections, without TLS, +// on the provided address. The resolved address is named by the Addr field. +func NewServer(laddr string, opt ...grpc.ServerOption) (*Server, error) { + l, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + s := &Server{ + Addr: l.Addr().String(), + l: l, + srv: grpc.NewServer(opt...), + s: &server{ + tables: make(map[string]*table), + }, + } + btapb.RegisterBigtableTableAdminServer(s.srv, s.s) + btpb.RegisterBigtableServer(s.srv, s.s) + + go s.srv.Serve(s.l) + + return s, nil +} + +// Close shuts down the server. +func (s *Server) Close() { + s.s.mu.Lock() + if s.s.gcc != nil { + close(s.s.gcc) + } + s.s.mu.Unlock() + + s.srv.Stop() + s.l.Close() +} + +func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) (*btapb.Table, error) { + tbl := req.Parent + "/tables/" + req.TableId + + s.mu.Lock() + if _, ok := s.tables[tbl]; ok { + s.mu.Unlock() + return nil, status.Errorf(codes.AlreadyExists, "table %q already exists", tbl) + } + s.tables[tbl] = newTable(req) + s.mu.Unlock() + + return &btapb.Table{Name: tbl}, nil +} + +func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) { + res := &btapb.ListTablesResponse{} + prefix := req.Parent + "/tables/" + + s.mu.Lock() + for tbl := range s.tables { + if strings.HasPrefix(tbl, prefix) { + res.Tables = append(res.Tables, &btapb.Table{Name: tbl}) + } + } + s.mu.Unlock() + + return res, nil +} + +func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*btapb.Table, error) { + tbl := req.Name + + s.mu.Lock() + tblIns, ok := s.tables[tbl] + s.mu.Unlock() + if !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", tbl) + } + + return &btapb.Table{ + Name: tbl, + ColumnFamilies: toColumnFamilies(tblIns.columnFamilies()), + }, nil +} + +func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.tables[req.Name]; !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name) + } + delete(s.tables, req.Name) + return &emptypb.Empty{}, nil +} + +func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) { + tblName := req.Name[strings.LastIndex(req.Name, "/")+1:] + + s.mu.Lock() + tbl, ok := s.tables[req.Name] + s.mu.Unlock() + if !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name) + } + + tbl.mu.Lock() + defer tbl.mu.Unlock() + + for _, mod := range req.Modifications { + if create := mod.GetCreate(); create != nil { + if _, ok := tbl.families[mod.Id]; ok { + return nil, status.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id) + } + newcf := &columnFamily{ + name: req.Name + "/columnFamilies/" + mod.Id, + order: tbl.counter, + gcRule: create.GcRule, + } + tbl.counter++ + tbl.families[mod.Id] = newcf + } else if mod.GetDrop() { + if _, ok := tbl.families[mod.Id]; !ok { + return nil, fmt.Errorf("can't delete unknown family %q", mod.Id) + } + delete(tbl.families, mod.Id) + } else if modify := mod.GetUpdate(); modify != nil { + if _, ok := tbl.families[mod.Id]; !ok { + return nil, fmt.Errorf("no such family %q", mod.Id) + } + newcf := &columnFamily{ + name: req.Name + "/columnFamilies/" + mod.Id, + gcRule: modify.GcRule, + } + // assume that we ALWAYS want to replace by the new setting + // we may need partial update through + tbl.families[mod.Id] = newcf + } + } + + s.needGC() + return &btapb.Table{ + Name: tblName, + ColumnFamilies: toColumnFamilies(tbl.families), + Granularity: btapb.Table_TimestampGranularity(btapb.Table_MILLIS), + }, nil +} + +func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + tbl, ok := s.tables[req.Name] + if !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name) + } + + if req.GetDeleteAllDataFromTable() { + tbl.rows = btree.New(btreeDegree) + } else { + // Delete rows by prefix. + prefixBytes := req.GetRowKeyPrefix() + if prefixBytes == nil { + return nil, fmt.Errorf("missing row key prefix") + } + prefix := string(prefixBytes) + + // The BTree does not specify what happens if rows are deleted during + // iteration, and it provides no "delete range" method. + // So we collect the rows first, then delete them one by one. + var rowsToDelete []*row + tbl.rows.AscendGreaterOrEqual(btreeKey(prefix), func(i btree.Item) bool { + r := i.(*row) + if strings.HasPrefix(r.key, prefix) { + rowsToDelete = append(rowsToDelete, r) + return true + } else { + return false // stop iteration + } + }) + for _, r := range rowsToDelete { + tbl.rows.Delete(r) + } + } + return &emptypb.Empty{}, nil +} + +// This is a private alpha release of Cloud Bigtable replication. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (s *server) GenerateConsistencyToken(ctx context.Context, req *btapb.GenerateConsistencyTokenRequest) (*btapb.GenerateConsistencyTokenResponse, error) { + // Check that the table exists. + _, ok := s.tables[req.Name] + if !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name) + } + + return &btapb.GenerateConsistencyTokenResponse{ + ConsistencyToken: "TokenFor-" + req.Name, + }, nil +} + +// This is a private alpha release of Cloud Bigtable replication. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +func (s *server) CheckConsistency(ctx context.Context, req *btapb.CheckConsistencyRequest) (*btapb.CheckConsistencyResponse, error) { + // Check that the table exists. + _, ok := s.tables[req.Name] + if !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name) + } + + // Check this is the right token. + if req.ConsistencyToken != "TokenFor-"+req.Name { + return nil, status.Errorf(codes.InvalidArgument, "token %q not valid", req.ConsistencyToken) + } + + // Single cluster instances are always consistent. + return &btapb.CheckConsistencyResponse{ + Consistent: true, + }, nil +} + +func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return status.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + // Rows to read can be specified by a set of row keys and/or a set of row ranges. + // Output is a stream of sorted, de-duped rows. + tbl.mu.RLock() + rowSet := make(map[string]*row) + + addRow := func(i btree.Item) bool { + r := i.(*row) + rowSet[r.key] = r + return true + } + + if req.Rows != nil { + // Add the explicitly given keys + for _, key := range req.Rows.RowKeys { + k := string(key) + if i := tbl.rows.Get(btreeKey(k)); i != nil { + addRow(i) + } + } + + // Add keys from row ranges + for _, rr := range req.Rows.RowRanges { + var start, end string + switch sk := rr.StartKey.(type) { + case *btpb.RowRange_StartKeyClosed: + start = string(sk.StartKeyClosed) + case *btpb.RowRange_StartKeyOpen: + start = string(sk.StartKeyOpen) + "\x00" + } + switch ek := rr.EndKey.(type) { + case *btpb.RowRange_EndKeyClosed: + end = string(ek.EndKeyClosed) + "\x00" + case *btpb.RowRange_EndKeyOpen: + end = string(ek.EndKeyOpen) + } + switch { + case start == "" && end == "": + tbl.rows.Ascend(addRow) // all rows + case start == "": + tbl.rows.AscendLessThan(btreeKey(end), addRow) + case end == "": + tbl.rows.AscendGreaterOrEqual(btreeKey(start), addRow) + default: + tbl.rows.AscendRange(btreeKey(start), btreeKey(end), addRow) + } + } + } else { + // Read all rows + tbl.rows.Ascend(addRow) + } + tbl.mu.RUnlock() + + rows := make([]*row, 0, len(rowSet)) + for _, r := range rowSet { + rows = append(rows, r) + } + sort.Sort(byRowKey(rows)) + + limit := int(req.RowsLimit) + count := 0 + for _, r := range rows { + if limit > 0 && count >= limit { + return nil + } + streamed, err := streamRow(stream, r, req.Filter) + if err != nil { + return err + } + if streamed { + count++ + } + } + return nil +} + +// streamRow filters the given row and sends it via the given stream. +// Returns true if at least one cell matched the filter and was streamed, false otherwise. +func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (bool, error) { + r.mu.Lock() + nr := r.copy() + r.mu.Unlock() + r = nr + + if !filterRow(f, r) { + return false, nil + } + + rrr := &btpb.ReadRowsResponse{} + families := r.sortedFamilies() + for _, fam := range families { + for _, colName := range fam.colNames { + cells := fam.cells[colName] + if len(cells) == 0 { + continue + } + // TODO(dsymonds): Apply transformers. + for _, cell := range cells { + rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(r.key), + FamilyName: &wrappers.StringValue{Value: fam.name}, + Qualifier: &wrappers.BytesValue{Value: []byte(colName)}, + TimestampMicros: cell.ts, + Value: cell.value, + }) + } + } + } + // We can't have a cell with just COMMIT set, which would imply a new empty cell. + // So modify the last cell to have the COMMIT flag set. + if len(rrr.Chunks) > 0 { + rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true} + } + + return true, stream.Send(rrr) +} + +// filterRow modifies a row with the given filter. Returns true if at least one cell from the row matches, +// false otherwise. +func filterRow(f *btpb.RowFilter, r *row) bool { + if f == nil { + return true + } + // Handle filters that apply beyond just including/excluding cells. + switch f := f.Filter.(type) { + case *btpb.RowFilter_BlockAllFilter: + return !f.BlockAllFilter + case *btpb.RowFilter_PassAllFilter: + return f.PassAllFilter + case *btpb.RowFilter_Chain_: + for _, sub := range f.Chain.Filters { + if !filterRow(sub, r) { + return false + } + } + return true + case *btpb.RowFilter_Interleave_: + srs := make([]*row, 0, len(f.Interleave.Filters)) + for _, sub := range f.Interleave.Filters { + sr := r.copy() + filterRow(sub, sr) + srs = append(srs, sr) + } + // merge + // TODO(dsymonds): is this correct? + r.families = make(map[string]*family) + for _, sr := range srs { + for _, fam := range sr.families { + f := r.getOrCreateFamily(fam.name, fam.order) + for colName, cs := range fam.cells { + f.cells[colName] = append(f.cellsByColumn(colName), cs...) + } + } + } + var count int + for _, fam := range r.families { + for _, cs := range fam.cells { + sort.Sort(byDescTS(cs)) + count += len(cs) + } + } + return count > 0 + case *btpb.RowFilter_CellsPerColumnLimitFilter: + lim := int(f.CellsPerColumnLimitFilter) + for _, fam := range r.families { + for col, cs := range fam.cells { + if len(cs) > lim { + fam.cells[col] = cs[:lim] + } + } + } + return true + case *btpb.RowFilter_Condition_: + if filterRow(f.Condition.PredicateFilter, r.copy()) { + if f.Condition.TrueFilter == nil { + return false + } + return filterRow(f.Condition.TrueFilter, r) + } + if f.Condition.FalseFilter == nil { + return false + } + return filterRow(f.Condition.FalseFilter, r) + case *btpb.RowFilter_RowKeyRegexFilter: + pat := string(f.RowKeyRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad rowkey_regex_filter pattern %q: %v", pat, err) + return false + } + if !rx.MatchString(r.key) { + return false + } + case *btpb.RowFilter_CellsPerRowLimitFilter: + // Grab the first n cells in the row. + lim := int(f.CellsPerRowLimitFilter) + for _, fam := range r.families { + for _, col := range fam.colNames { + cs := fam.cells[col] + if len(cs) > lim { + fam.cells[col] = cs[:lim] + lim = 0 + } else { + lim -= len(cs) + } + } + } + return true + case *btpb.RowFilter_CellsPerRowOffsetFilter: + // Skip the first n cells in the row. + offset := int(f.CellsPerRowOffsetFilter) + for _, fam := range r.families { + for _, col := range fam.colNames { + cs := fam.cells[col] + if len(cs) > offset { + fam.cells[col] = cs[offset:] + offset = 0 + return true + } else { + fam.cells[col] = cs[:0] + offset -= len(cs) + } + } + } + return true + } + + // Any other case, operate on a per-cell basis. + cellCount := 0 + for _, fam := range r.families { + for colName, cs := range fam.cells { + fam.cells[colName] = filterCells(f, fam.name, colName, cs) + cellCount += len(fam.cells[colName]) + } + } + return cellCount > 0 +} + +func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) []cell { + var ret []cell + for _, cell := range cs { + if includeCell(f, fam, col, cell) { + cell = modifyCell(f, cell) + ret = append(ret, cell) + } + } + return ret +} + +func modifyCell(f *btpb.RowFilter, c cell) cell { + if f == nil { + return c + } + // Consider filters that may modify the cell contents + switch f.Filter.(type) { + case *btpb.RowFilter_StripValueTransformer: + return cell{ts: c.ts} + default: + return c + } +} + +func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool { + if f == nil { + return true + } + // TODO(dsymonds): Implement many more filters. + switch f := f.Filter.(type) { + case *btpb.RowFilter_CellsPerColumnLimitFilter: + // Don't log, row-level filter + return true + case *btpb.RowFilter_RowKeyRegexFilter: + // Don't log, row-level filter + return true + case *btpb.RowFilter_StripValueTransformer: + // Don't log, cell-modifying filter + return true + default: + log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f) + return true + case *btpb.RowFilter_FamilyNameRegexFilter: + pat := string(f.FamilyNameRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad family_name_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(fam) + case *btpb.RowFilter_ColumnQualifierRegexFilter: + pat := string(f.ColumnQualifierRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(col) + case *btpb.RowFilter_ValueRegexFilter: + pat := string(f.ValueRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad value_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.Match(cell.value) + case *btpb.RowFilter_ColumnRangeFilter: + if fam != f.ColumnRangeFilter.FamilyName { + return false + } + // Start qualifier defaults to empty string closed + inRangeStart := func() bool { return col >= "" } + switch sq := f.ColumnRangeFilter.StartQualifier.(type) { + case *btpb.ColumnRange_StartQualifierOpen: + inRangeStart = func() bool { return col > string(sq.StartQualifierOpen) } + case *btpb.ColumnRange_StartQualifierClosed: + inRangeStart = func() bool { return col >= string(sq.StartQualifierClosed) } + } + // End qualifier defaults to no upper boundary + inRangeEnd := func() bool { return true } + switch eq := f.ColumnRangeFilter.EndQualifier.(type) { + case *btpb.ColumnRange_EndQualifierClosed: + inRangeEnd = func() bool { return col <= string(eq.EndQualifierClosed) } + case *btpb.ColumnRange_EndQualifierOpen: + inRangeEnd = func() bool { return col < string(eq.EndQualifierOpen) } + } + return inRangeStart() && inRangeEnd() + case *btpb.RowFilter_TimestampRangeFilter: + // Lower bound is inclusive and defaults to 0, upper bound is exclusive and defaults to infinity. + return cell.ts >= f.TimestampRangeFilter.StartTimestampMicros && + (f.TimestampRangeFilter.EndTimestampMicros == 0 || cell.ts < f.TimestampRangeFilter.EndTimestampMicros) + case *btpb.RowFilter_ValueRangeFilter: + v := cell.value + // Start value defaults to empty string closed + inRangeStart := func() bool { return bytes.Compare(v, []byte{}) >= 0 } + switch sv := f.ValueRangeFilter.StartValue.(type) { + case *btpb.ValueRange_StartValueOpen: + inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueOpen) > 0 } + case *btpb.ValueRange_StartValueClosed: + inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueClosed) >= 0 } + } + // End value defaults to no upper boundary + inRangeEnd := func() bool { return true } + switch ev := f.ValueRangeFilter.EndValue.(type) { + case *btpb.ValueRange_EndValueClosed: + inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueClosed) <= 0 } + case *btpb.ValueRange_EndValueOpen: + inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueOpen) < 0 } + } + return inRangeStart() && inRangeEnd() + } +} + +func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + fs := tbl.columnFamilies() + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + if err := applyMutations(tbl, r, req.Mutations, fs); err != nil { + return nil, err + } + return &btpb.MutateRowResponse{}, nil +} + +func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_MutateRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return status.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))} + + fs := tbl.columnFamilies() + + for i, entry := range req.Entries { + r := tbl.mutableRow(string(entry.RowKey)) + r.mu.Lock() + code, msg := int32(codes.OK), "" + if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil { + code = int32(codes.Internal) + msg = err.Error() + } + res.Entries[i] = &btpb.MutateRowsResponse_Entry{ + Index: int64(i), + Status: &statpb.Status{Code: code, Message: msg}, + } + r.mu.Unlock() + } + stream.Send(res) + return nil +} + +func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + res := &btpb.CheckAndMutateRowResponse{} + + fs := tbl.columnFamilies() + + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + + // Figure out which mutation to apply. + whichMut := false + if req.PredicateFilter == nil { + // Use true_mutations iff row contains any cells. + whichMut = !r.isEmpty() + } else { + // Use true_mutations iff any cells in the row match the filter. + // TODO(dsymonds): This could be cheaper. + nr := r.copy() + filterRow(req.PredicateFilter, nr) + whichMut = !nr.isEmpty() + } + res.PredicateMatched = whichMut + muts := req.FalseMutations + if whichMut { + muts = req.TrueMutations + } + + if err := applyMutations(tbl, r, muts, fs); err != nil { + return nil, err + } + return res, nil +} + +// applyMutations applies a sequence of mutations to a row. +// fam should be a snapshot of the keys of tbl.families. +// It assumes r.mu is locked. +func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]*columnFamily) error { + for _, mut := range muts { + switch mut := mut.Mutation.(type) { + default: + return fmt.Errorf("can't handle mutation type %T", mut) + case *btpb.Mutation_SetCell_: + set := mut.SetCell + if _, ok := fs[set.FamilyName]; !ok { + return fmt.Errorf("unknown family %q", set.FamilyName) + } + ts := set.TimestampMicros + if ts == -1 { // bigtable.ServerTime + ts = newTimestamp() + } + if !tbl.validTimestamp(ts) { + return fmt.Errorf("invalid timestamp %d", ts) + } + fam := set.FamilyName + col := string(set.ColumnQualifier) + + newCell := cell{ts: ts, value: set.Value} + f := r.getOrCreateFamily(fam, fs[fam].order) + f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) + case *btpb.Mutation_DeleteFromColumn_: + del := mut.DeleteFromColumn + if _, ok := fs[del.FamilyName]; !ok { + return fmt.Errorf("unknown family %q", del.FamilyName) + } + fam := del.FamilyName + col := string(del.ColumnQualifier) + if _, ok := r.families[fam]; ok { + cs := r.families[fam].cells[col] + if del.TimeRange != nil { + tsr := del.TimeRange + if !tbl.validTimestamp(tsr.StartTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) + } + if !tbl.validTimestamp(tsr.EndTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) + } + // Find half-open interval to remove. + // Cells are in descending timestamp order, + // so the predicates to sort.Search are inverted. + si, ei := 0, len(cs) + if tsr.StartTimestampMicros > 0 { + ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) + } + if tsr.EndTimestampMicros > 0 { + si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) + } + if si < ei { + copy(cs[si:], cs[ei:]) + cs = cs[:len(cs)-(ei-si)] + } + } else { + cs = nil + } + if len(cs) == 0 { + delete(r.families[fam].cells, col) + colNames := r.families[fam].colNames + i := sort.Search(len(colNames), func(i int) bool { return colNames[i] >= col }) + if i < len(colNames) && colNames[i] == col { + r.families[fam].colNames = append(colNames[:i], colNames[i+1:]...) + } + if len(r.families[fam].cells) == 0 { + delete(r.families, fam) + } + } else { + r.families[fam].cells[col] = cs + } + } + case *btpb.Mutation_DeleteFromRow_: + r.families = make(map[string]*family) + case *btpb.Mutation_DeleteFromFamily_: + fampre := mut.DeleteFromFamily.FamilyName + delete(r.families, fampre) + } + } + return nil +} + +func maxTimestamp(x, y int64) int64 { + if x > y { + return x + } + return y +} + +func newTimestamp() int64 { + ts := time.Now().UnixNano() / 1e3 + ts -= ts % 1000 // round to millisecond granularity + return ts +} + +func appendOrReplaceCell(cs []cell, newCell cell) []cell { + replaced := false + for i, cell := range cs { + if cell.ts == newCell.ts { + cs[i] = newCell + replaced = true + break + } + } + if !replaced { + cs = append(cs, newCell) + } + sort.Sort(byDescTS(cs)) + return cs +} + +func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWriteRowRequest) (*btpb.ReadModifyWriteRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + updates := make(map[string]cell) // copy of updated cells; keyed by full column name + + fs := tbl.columnFamilies() + + rowKey := string(req.RowKey) + r := tbl.mutableRow(rowKey) + // This must be done before the row lock, acquired below, is released. + r.mu.Lock() + defer r.mu.Unlock() + // Assume all mutations apply to the most recent version of the cell. + // TODO(dsymonds): Verify this assumption and document it in the proto. + for _, rule := range req.Rules { + if _, ok := fs[rule.FamilyName]; !ok { + return nil, fmt.Errorf("unknown family %q", rule.FamilyName) + } + + fam := rule.FamilyName + col := string(rule.ColumnQualifier) + isEmpty := false + f := r.getOrCreateFamily(fam, fs[fam].order) + cs := f.cells[col] + isEmpty = len(cs) == 0 + + ts := newTimestamp() + var newCell, prevCell cell + if !isEmpty { + cells := r.families[fam].cells[col] + prevCell = cells[0] + + // ts is the max of now or the prev cell's timestamp in case the + // prev cell is in the future + ts = maxTimestamp(ts, prevCell.ts) + } + + switch rule := rule.Rule.(type) { + default: + return nil, fmt.Errorf("unknown RMW rule oneof %T", rule) + case *btpb.ReadModifyWriteRule_AppendValue: + newCell = cell{ts: ts, value: append(prevCell.value, rule.AppendValue...)} + case *btpb.ReadModifyWriteRule_IncrementAmount: + var v int64 + if !isEmpty { + prevVal := prevCell.value + if len(prevVal) != 8 { + return nil, fmt.Errorf("increment on non-64-bit value") + } + v = int64(binary.BigEndian.Uint64(prevVal)) + } + v += rule.IncrementAmount + var val [8]byte + binary.BigEndian.PutUint64(val[:], uint64(v)) + newCell = cell{ts: ts, value: val[:]} + } + key := strings.Join([]string{fam, col}, ":") + updates[key] = newCell + f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) + } + + res := &btpb.Row{ + Key: req.RowKey, + } + for col, cell := range updates { + i := strings.Index(col, ":") + fam, qual := col[:i], col[i+1:] + var f *btpb.Family + for _, ff := range res.Families { + if ff.Name == fam { + f = ff + break + } + } + if f == nil { + f = &btpb.Family{Name: fam} + res.Families = append(res.Families, f) + } + f.Columns = append(f.Columns, &btpb.Column{ + Qualifier: []byte(qual), + Cells: []*btpb.Cell{{ + TimestampMicros: cell.ts, + Value: cell.value, + }}, + }) + } + return &btpb.ReadModifyWriteRowResponse{Row: res}, nil +} + +func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigtable_SampleRowKeysServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return status.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + tbl.mu.RLock() + defer tbl.mu.RUnlock() + + // The return value of SampleRowKeys is very loosely defined. Return at least the + // final row key in the table and choose other row keys randomly. + var offset int64 + var err error + i := 0 + tbl.rows.Ascend(func(it btree.Item) bool { + row := it.(*row) + if i == tbl.rows.Len()-1 || rand.Int31n(100) == 0 { + resp := &btpb.SampleRowKeysResponse{ + RowKey: []byte(row.key), + OffsetBytes: offset, + } + err = stream.Send(resp) + if err != nil { + return false + } + } + offset += int64(row.size()) + i++ + return true + }) + return err +} + +// needGC is invoked whenever the server needs gcloop running. +func (s *server) needGC() { + s.mu.Lock() + if s.gcc == nil { + s.gcc = make(chan int) + go s.gcloop(s.gcc) + } + s.mu.Unlock() +} + +func (s *server) gcloop(done <-chan int) { + const ( + minWait = 500 // ms + maxWait = 1500 // ms + ) + + for { + // Wait for a random time interval. + d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond + select { + case <-time.After(d): + case <-done: + return // server has been closed + } + + // Do a GC pass over all tables. + var tables []*table + s.mu.Lock() + for _, tbl := range s.tables { + tables = append(tables, tbl) + } + s.mu.Unlock() + for _, tbl := range tables { + tbl.gc() + } + } +} + +type table struct { + mu sync.RWMutex + counter uint64 // increment by 1 when a new family is created + families map[string]*columnFamily // keyed by plain family name + rows *btree.BTree // indexed by row key +} + +const btreeDegree = 16 + +func newTable(ctr *btapb.CreateTableRequest) *table { + fams := make(map[string]*columnFamily) + c := uint64(0) + if ctr.Table != nil { + for id, cf := range ctr.Table.ColumnFamilies { + fams[id] = &columnFamily{ + name: ctr.Parent + "/columnFamilies/" + id, + order: c, + gcRule: cf.GcRule, + } + c++ + } + } + return &table{ + families: fams, + counter: c, + rows: btree.New(btreeDegree), + } +} + +func (t *table) validTimestamp(ts int64) bool { + // Assume millisecond granularity is required. + return ts%1000 == 0 +} + +func (t *table) columnFamilies() map[string]*columnFamily { + cp := make(map[string]*columnFamily) + t.mu.RLock() + for fam, cf := range t.families { + cp[fam] = cf + } + t.mu.RUnlock() + return cp +} + +func (t *table) mutableRow(key string) *row { + bkey := btreeKey(key) + // Try fast path first. + t.mu.RLock() + i := t.rows.Get(bkey) + t.mu.RUnlock() + if i != nil { + return i.(*row) + } + + // We probably need to create the row. + t.mu.Lock() + defer t.mu.Unlock() + i = t.rows.Get(bkey) + if i != nil { + return i.(*row) + } + r := newRow(key) + t.rows.ReplaceOrInsert(r) + return r +} + +func (t *table) gc() { + // This method doesn't add or remove rows, so we only need a read lock for the table. + t.mu.RLock() + defer t.mu.RUnlock() + + // Gather GC rules we'll apply. + rules := make(map[string]*btapb.GcRule) // keyed by "fam" + for fam, cf := range t.families { + if cf.gcRule != nil { + rules[fam] = cf.gcRule + } + } + if len(rules) == 0 { + return + } + + t.rows.Ascend(func(i btree.Item) bool { + r := i.(*row) + r.mu.Lock() + r.gc(rules) + r.mu.Unlock() + return true + }) +} + +type byRowKey []*row + +func (b byRowKey) Len() int { return len(b) } +func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } + +type row struct { + key string + + mu sync.Mutex + families map[string]*family // keyed by family name +} + +func newRow(key string) *row { + return &row{ + key: key, + families: make(map[string]*family), + } +} + +// copy returns a copy of the row. +// Cell values are aliased. +// r.mu should be held. +func (r *row) copy() *row { + nr := newRow(r.key) + for _, fam := range r.families { + nr.families[fam.name] = &family{ + name: fam.name, + order: fam.order, + colNames: fam.colNames, + cells: make(map[string][]cell), + } + for col, cs := range fam.cells { + // Copy the []cell slice, but not the []byte inside each cell. + nr.families[fam.name].cells[col] = append([]cell(nil), cs...) + } + } + return nr +} + +// isEmpty returns true if a row doesn't contain any cell +func (r *row) isEmpty() bool { + for _, fam := range r.families { + for _, cs := range fam.cells { + if len(cs) > 0 { + return false + } + } + } + return true +} + +// sortedFamilies returns a column family set +// sorted in ascending creation order in a row. +func (r *row) sortedFamilies() []*family { + var families []*family + for _, fam := range r.families { + families = append(families, fam) + } + sort.Sort(byCreationOrder(families)) + return families +} + +func (r *row) getOrCreateFamily(name string, order uint64) *family { + if _, ok := r.families[name]; !ok { + r.families[name] = &family{ + name: name, + order: order, + cells: make(map[string][]cell), + } + } + return r.families[name] +} + +// gc applies the given GC rules to the row. +// r.mu should be held. +func (r *row) gc(rules map[string]*btapb.GcRule) { + for _, fam := range r.families { + rule, ok := rules[fam.name] + if !ok { + continue + } + for col, cs := range fam.cells { + r.families[fam.name].cells[col] = applyGC(cs, rule) + } + } +} + +// size returns the total size of all cell values in the row. +func (r *row) size() int { + size := 0 + for _, fam := range r.families { + for _, cells := range fam.cells { + for _, cell := range cells { + size += len(cell.value) + } + } + } + return size +} + +// Less implements btree.Less. +func (r *row) Less(i btree.Item) bool { + return r.key < i.(*row).key +} + +// btreeKey returns a row for use as a key into the BTree. +func btreeKey(s string) *row { return &row{key: s} } + +func (r *row) String() string { + return r.key +} + +var gcTypeWarn sync.Once + +// applyGC applies the given GC rule to the cells. +func applyGC(cells []cell, rule *btapb.GcRule) []cell { + switch rule := rule.Rule.(type) { + default: + // TODO(dsymonds): Support GcRule_Intersection_ + gcTypeWarn.Do(func() { + log.Printf("Unsupported GC rule type %T", rule) + }) + case *btapb.GcRule_Union_: + for _, sub := range rule.Union.Rules { + cells = applyGC(cells, sub) + } + return cells + case *btapb.GcRule_MaxAge: + // Timestamps are in microseconds. + cutoff := time.Now().UnixNano() / 1e3 + cutoff -= rule.MaxAge.Seconds * 1e6 + cutoff -= int64(rule.MaxAge.Nanos) / 1e3 + // The slice of cells in in descending timestamp order. + // This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff. + si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff }) + if si < len(cells) { + log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si) + } + return cells[:si] + case *btapb.GcRule_MaxNumVersions: + n := int(rule.MaxNumVersions) + if len(cells) > n { + cells = cells[:n] + } + return cells + } + return cells +} + +type family struct { + name string // Column family name + order uint64 // Creation order of column family + colNames []string // Collumn names are sorted in lexicographical ascending order + cells map[string][]cell // Keyed by collumn name; cells are in descending timestamp order +} + +type byCreationOrder []*family + +func (b byCreationOrder) Len() int { return len(b) } +func (b byCreationOrder) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCreationOrder) Less(i, j int) bool { return b[i].order < b[j].order } + +// cellsByColumn adds the column name to colNames set if it does not exist +// and returns all cells within a column +func (f *family) cellsByColumn(name string) []cell { + if _, ok := f.cells[name]; !ok { + f.colNames = append(f.colNames, name) + sort.Strings(f.colNames) + } + return f.cells[name] +} + +type cell struct { + ts int64 + value []byte +} + +type byDescTS []cell + +func (b byDescTS) Len() int { return len(b) } +func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } + +type columnFamily struct { + name string + order uint64 // Creation order of column family + gcRule *btapb.GcRule +} + +func (c *columnFamily) proto() *btapb.ColumnFamily { + return &btapb.ColumnFamily{ + GcRule: c.gcRule, + } +} + +func toColumnFamilies(families map[string]*columnFamily) map[string]*btapb.ColumnFamily { + fs := make(map[string]*btapb.ColumnFamily) + for k, v := range families { + fs[k] = v.proto() + } + return fs +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go new file mode 100644 index 0000000..8b5c6d8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go @@ -0,0 +1,718 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bttest + +import ( + "fmt" + "math/rand" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + "google.golang.org/grpc" +) + +func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + if _, err := s.CreateTable( + ctx, + &btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil { + t.Fatal(err) + } + const name = `cluster/tables/t` + tbl := s.tables[name] + req := &btapb.ModifyColumnFamiliesRequest{ + Name: name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, + }}, + } + _, err := s.ModifyColumnFamilies(ctx, req) + if err != nil { + t.Fatal(err) + } + req = &btapb.ModifyColumnFamiliesRequest{ + Name: name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{ + GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}, + }}, + }}, + } + if _, err := s.ModifyColumnFamilies(ctx, req); err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + var ts int64 + ms := func() []*btpb.Mutation { + return []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte(`col`), + TimestampMicros: atomic.AddInt64(&ts, 1000), + }}, + }} + } + + rmw := func() *btpb.ReadModifyWriteRowRequest { + return &btpb.ReadModifyWriteRowRequest{ + TableName: name, + RowKey: []byte(fmt.Sprint(rand.Intn(100))), + Rules: []*btpb.ReadModifyWriteRule{{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1}, + }}, + } + } + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + req := &btpb.MutateRowRequest{ + TableName: name, + RowKey: []byte(fmt.Sprint(rand.Intn(100))), + Mutations: ms(), + } + s.MutateRow(ctx, req) + } + }() + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + _, _ = s.ReadModifyWriteRow(ctx, rmw()) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + tbl.gc() + }() + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + t.Error("Concurrent mutations and GCs haven't completed after 1s") + } +} + +func TestCreateTableWithFamily(t *testing.T) { + // The Go client currently doesn't support creating a table with column families + // in one operation but it is allowed by the API. This must still be supported by the + // fake server so this test lives here instead of in the main bigtable + // integration test. + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 123}}}, + "cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 456}}}, + }, + } + cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name}) + if err != nil { + t.Fatalf("Getting table: %v", err) + } + cf := tbl.ColumnFamilies["cf1"] + if cf == nil { + t.Fatalf("Missing col family cf1") + } + if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want { + t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) + } + cf = tbl.ColumnFamilies["cf2"] + if cf == nil { + t.Fatalf("Missing col family cf2") + } + if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want { + t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) + } +} + +type MockSampleRowKeysServer struct { + responses []*btpb.SampleRowKeysResponse + grpc.ServerStream +} + +func (s *MockSampleRowKeysServer) Send(resp *btpb.SampleRowKeysResponse) error { + s.responses = append(s.responses, resp) + return nil +} + +func TestSampleRowKeys(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, + }, + } + tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + + // Populate the table + val := []byte("value") + rowCount := 1000 + for i := 0; i < rowCount; i++ { + req := &btpb.MutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-" + strconv.Itoa(i)), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + TimestampMicros: 0, + Value: val, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + + mock := &MockSampleRowKeysServer{} + if err := s.SampleRowKeys(&btpb.SampleRowKeysRequest{TableName: tbl.Name}, mock); err != nil { + t.Errorf("SampleRowKeys error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + // Make sure the offset of the final response is the offset of the final row + got := mock.responses[len(mock.responses)-1].OffsetBytes + want := int64((rowCount - 1) * len(val)) + if got != want { + t.Errorf("Invalid offset: got %d, want %d", got, want) + } +} + +func TestDropRowRange(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + + tbl := s.tables[tblInfo.Name] + + // Populate the table + prefixes := []string{"AAA", "BBB", "CCC", "DDD"} + count := 3 + doWrite := func() { + for _, prefix := range prefixes { + for i := 0; i < count; i++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte(prefix + strconv.Itoa(i)), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + TimestampMicros: 0, + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + } + } + + doWrite() + tblSize := tbl.rows.Len() + req := &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("AAA")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping first range: %v", err) + } + got, want := tbl.rows.Len(), tblSize-count + if got != want { + t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("DDD")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping second range: %v", err) + } + got, want = tbl.rows.Len(), tblSize-(2*count) + if got != want { + t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("XXX")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping invalid range: %v", err) + } + got, want = tbl.rows.Len(), tblSize-(2*count) + if got != want { + t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping all data: %v", err) + } + got, want = tbl.rows.Len(), 0 + if got != want { + t.Errorf("Row count after drop all: got %d, want %d", got, want) + } + + // Test that we can write rows, delete some and then write them again. + count = 1 + doWrite() + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping all data: %v", err) + } + got, want = tbl.rows.Len(), 0 + if got != want { + t.Errorf("Row count after drop all: got %d, want %d", got, want) + } + + doWrite() + got, want = tbl.rows.Len(), len(prefixes) + if got != want { + t.Errorf("Row count after rewrite: got %d, want %d", got, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("BBB")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping range: %v", err) + } + doWrite() + got, want = tbl.rows.Len(), len(prefixes) + if got != want { + t.Errorf("Row count after drop range: got %d, want %d", got, want) + } +} + +type MockReadRowsServer struct { + responses []*btpb.ReadRowsResponse + grpc.ServerStream +} + +func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error { + s.responses = append(s.responses, resp) + return nil +} + +func TestReadRows(t *testing.T) { + ctx := context.Background() + s := &server{ + tables: make(map[string]*table), + } + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + mreq := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ + FamilyName: "cf0", + ColumnQualifier: []byte("col"), + TimestampMicros: 1000, + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, mreq); err != nil { + t.Fatalf("Populating table: %v", err) + } + + for _, rowset := range []*btpb.RowSet{ + {RowKeys: [][]byte{[]byte("row")}}, + {RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")}}}}, + {RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("r")}}}}, + {RowRanges: []*btpb.RowRange{{ + StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")}, + EndKey: &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte("s")}, + }}}, + } { + mock := &MockReadRowsServer{} + req := &btpb.ReadRowsRequest{TableName: tblInfo.Name, Rows: rowset} + if err = s.ReadRows(req, mock); err != nil { + t.Fatalf("ReadRows error: %v", err) + } + if got, want := len(mock.responses), 1; got != want { + t.Errorf("%+v: response count: got %d, want %d", rowset, got, want) + } + } +} + +func TestReadRowsOrder(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + count := 3 + mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { + return &btapb.ModifyColumnFamiliesRequest{ + Name: tblInfo.Name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf" + strconv.Itoa(i), + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, + }}, + } + } + for i := 1; i <= count; i++ { + _, err = s.ModifyColumnFamilies(ctx, mcf(i)) + if err != nil { + t.Fatal(err) + } + } + // Populate the table + for fc := 0; fc < count; fc++ { + for cc := count; cc > 0; cc-- { + for tc := 0; tc < count; tc++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ + FamilyName: "cf" + strconv.Itoa(fc), + ColumnQualifier: []byte("col" + strconv.Itoa(cc)), + TimestampMicros: int64((tc + 1) * 1000), + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + } + } + req := &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + mock := &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 27 { + t.Fatalf("Chunk count: got %d, want 27", len(mock.responses[0].Chunks)) + } + testOrder := func(ms *MockReadRowsServer) { + var prevFam, prevCol string + var prevTime int64 + for _, cc := range ms.responses[0].Chunks { + if prevFam == "" { + prevFam = cc.FamilyName.Value + prevCol = string(cc.Qualifier.Value) + prevTime = cc.TimestampMicros + continue + } + if cc.FamilyName.Value < prevFam { + t.Errorf("Family order is not correct: got %s < %s", cc.FamilyName.Value, prevFam) + } else if cc.FamilyName.Value == prevFam { + if string(cc.Qualifier.Value) < prevCol { + t.Errorf("Column order is not correct: got %s < %s", string(cc.Qualifier.Value), prevCol) + } else if string(cc.Qualifier.Value) == prevCol { + if cc.TimestampMicros > prevTime { + t.Errorf("cell order is not correct: got %d > %d", cc.TimestampMicros, prevTime) + } + } + } + prevFam = cc.FamilyName.Value + prevCol = string(cc.Qualifier.Value) + prevTime = cc.TimestampMicros + } + } + testOrder(mock) + + // Read with interleave filter + inter := &btpb.RowFilter_Interleave{} + fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: "1"}} + cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte("2")}} + inter.Filters = append(inter.Filters, fnr, cqr) + req = &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + Filter: &btpb.RowFilter{ + Filter: &btpb.RowFilter_Interleave_{Interleave: inter}, + }, + } + + mock = &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 18 { + t.Fatalf("Chunk count: got %d, want 18", len(mock.responses[0].Chunks)) + } + testOrder(mock) + + // Check order after ReadModifyWriteRow + rmw := func(i int) *btpb.ReadModifyWriteRowRequest { + return &btpb.ReadModifyWriteRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Rules: []*btpb.ReadModifyWriteRule{{ + FamilyName: "cf3", + ColumnQualifier: []byte("col" + strconv.Itoa(i)), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1}, + }}, + } + } + for i := count; i > 0; i-- { + s.ReadModifyWriteRow(ctx, rmw(i)) + } + req = &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + mock = &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 30 { + t.Fatalf("Chunk count: got %d, want 30", len(mock.responses[0].Chunks)) + } + testOrder(mock) +} + +func TestCheckAndMutateRowWithoutPredicate(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, + }, + } + tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + + // Populate the table + val := []byte("value") + mrreq := &btpb.MutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-present"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + TimestampMicros: 0, + Value: val, + }}, + }}, + } + if _, err := s.MutateRow(ctx, mrreq); err != nil { + t.Fatalf("Populating table: %v", err) + } + + req := &btpb.CheckAndMutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-not-present"), + } + if res, err := s.CheckAndMutateRow(ctx, req); err != nil { + t.Errorf("CheckAndMutateRow error: %v", err) + } else if got, want := res.PredicateMatched, false; got != want { + t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want) + } + + req = &btpb.CheckAndMutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-present"), + } + if res, err := s.CheckAndMutateRow(ctx, req); err != nil { + t.Errorf("CheckAndMutateRow error: %v", err) + } else if got, want := res.PredicateMatched, true; got != want { + t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want) + } +} + +// helper function to populate table data +func populateTable(ctx context.Context, s *server) (*btapb.Table, error) { + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + return nil, err + } + count := 3 + mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { + return &btapb.ModifyColumnFamiliesRequest{ + Name: tblInfo.Name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf" + strconv.Itoa(i), + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + } + for i := 1; i <= count; i++ { + _, err = s.ModifyColumnFamilies(ctx, mcf(i)) + if err != nil { + return nil, err + } + } + // Populate the table + for fc := 0; fc < count; fc++ { + for cc := count; cc > 0; cc-- { + for tc := 0; tc < count; tc++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf" + strconv.Itoa(fc), + ColumnQualifier: []byte("col" + strconv.Itoa(cc)), + TimestampMicros: int64((tc + 1) * 1000), + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + return nil, err + } + } + } + } + + return tblInfo, nil +} + +func TestFilters(t *testing.T) { + tests := []struct { + in *btpb.RowFilter + out int + }{ + {in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{true}}, out: 0}, + {in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{false}}, out: 1}, + {in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{true}}, out: 1}, + {in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{false}}, out: 0}, + } + + ctx := context.Background() + + s := &server{ + tables: make(map[string]*table), + } + + tblInfo, err := populateTable(ctx, s) + if err != nil { + t.Fatal(err) + } + + req := &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + + for _, tc := range tests { + req.Filter = tc.in + + mock := &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + continue + } + + if len(mock.responses) != tc.out { + t.Errorf("Response count: got %d, want %d", len(mock.responses), tc.out) + continue + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go new file mode 100644 index 0000000..2035647 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go @@ -0,0 +1,1323 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// Command docs are in cbtdoc.go. + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "log" + "os" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "encoding/csv" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +var ( + oFlag = flag.String("o", "", "if set, redirect stdout to this file") + + config *cbtconfig.Config + client *bigtable.Client + adminClient *bigtable.AdminClient + instanceAdminClient *bigtable.InstanceAdminClient + + version = "" + revision = "" + revisionDate = "" +) + +func getCredentialOpts(opts []option.ClientOption) []option.ClientOption { + if ts := config.TokenSource; ts != nil { + opts = append(opts, option.WithTokenSource(ts)) + } + if tlsCreds := config.TLSCreds; tlsCreds != nil { + opts = append(opts, option.WithGRPCDialOption(grpc.WithTransportCredentials(tlsCreds))) + } + return opts +} + +func getClient(clientConf bigtable.ClientConfig) *bigtable.Client { + if client == nil { + var opts []option.ClientOption + if ep := config.DataEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + client, err = bigtable.NewClientWithConfig(context.Background(), config.Project, config.Instance, clientConf, opts...) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + } + return client +} + +func getAdminClient() *bigtable.AdminClient { + if adminClient == nil { + var opts []option.ClientOption + if ep := config.AdminEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance, opts...) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + } + return adminClient +} + +func getInstanceAdminClient() *bigtable.InstanceAdminClient { + if instanceAdminClient == nil { + var opts []option.ClientOption + if ep := config.AdminEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project, opts...) + if err != nil { + log.Fatalf("Making bigtable.InstanceAdminClient: %v", err) + } + } + return instanceAdminClient +} + +func main() { + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Usage = func() { usage(os.Stderr) } + flag.Parse() + if flag.NArg() == 0 { + usage(os.Stderr) + os.Exit(1) + } + + if *oFlag != "" { + f, err := os.Create(*oFlag) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := f.Close(); err != nil { + log.Fatal(err) + } + }() + os.Stdout = f + } + + ctx := context.Background() + for _, cmd := range commands { + if cmd.Name == flag.Arg(0) { + if err := config.CheckFlags(cmd.Required); err != nil { + log.Fatal(err) + } + cmd.do(ctx, flag.Args()[1:]...) + return + } + } + log.Fatalf("Unknown command %q", flag.Arg(0)) +} + +func usage(w io.Writer) { + fmt.Fprintf(w, "Usage: %s [flags] ...\n", os.Args[0]) + flag.CommandLine.SetOutput(w) + flag.CommandLine.PrintDefaults() + fmt.Fprintf(w, "\n%s", cmdSummary) +} + +var cmdSummary string // generated in init, below + +func init() { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0) + for _, cmd := range commands { + fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc) + } + tw.Flush() + buf.WriteString(configHelp) + buf.WriteString("\ncbt ` + version + ` ` + revision + ` ` + revisionDate + `") + cmdSummary = buf.String() +} + +var configHelp = ` +Alpha features are not currently available to most Cloud Bigtable customers. The +features might be changed in backward-incompatible ways and are not recommended +for production use. They are not subject to any SLA or deprecation policy. + +For convenience, values of the -project, -instance, -creds, +-admin-endpoint and -data-endpoint flags may be specified in +` + cbtconfig.Filename() + ` in this format: + project = my-project-123 + instance = my-instance + creds = path-to-account-key.json + admin-endpoint = hostname:port + data-endpoint = hostname:port +All values are optional, and all will be overridden by flags. +` + +var commands = []struct { + Name, Desc string + do func(context.Context, ...string) + Usage string + Required cbtconfig.RequiredFlags +}{ + { + Name: "count", + Desc: "Count rows in a table", + do: doCount, + Usage: "cbt count
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createinstance", + Desc: "Create an instance with an initial cluster", + do: doCreateInstance, + Usage: "cbt createinstance \n" + + " instance-id Permanent, unique id for the instance\n" + + " display-name Description of the instance\n" + + " cluster-id Permanent, unique id for the cluster in the instance\n" + + " zone The zone in which to create the cluster\n" + + " num-nodes The number of nodes to create\n" + + " storage-type SSD or HDD\n", + Required: cbtconfig.ProjectRequired, + }, + { + Name: "createcluster", + Desc: "Create a cluster in the configured instance (replication alpha)", + do: doCreateCluster, + Usage: "cbt createcluster \n" + + " cluster-id Permanent, unique id for the cluster in the instance\n" + + " zone The zone in which to create the cluster\n" + + " num-nodes The number of nodes to create\n" + + " storage-type SSD or HDD\n", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createfamily", + Desc: "Create a column family", + do: doCreateFamily, + Usage: "cbt createfamily
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createtable", + Desc: "Create a table", + do: doCreateTable, + Usage: "cbt createtable
[families=family[:(maxage= | maxversions=)],...] [splits=split,...]\n" + + " families: Column families and their associated GC policies. See \"setgcpolicy\".\n" + + " Example: families=family1:maxage=1w,family2:maxversions=1\n" + + " splits: Row key to be used to initially split the table", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "updatecluster", + Desc: "Update a cluster in the configured instance", + do: doUpdateCluster, + Usage: "cbt updatecluster [num-nodes=num-nodes]\n" + + " cluster-id Permanent, unique id for the cluster in the instance\n" + + " num-nodes The number of nodes to update to", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deleteinstance", + Desc: "Deletes an instance", + do: doDeleteInstance, + Usage: "cbt deleteinstance ", + Required: cbtconfig.ProjectRequired, + }, + { + Name: "deletecluster", + Desc: "Deletes a cluster from the configured instance (replication alpha)", + do: doDeleteCluster, + Usage: "cbt deletecluster ", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletecolumn", + Desc: "Delete all cells in a column", + do: doDeleteColumn, + Usage: "cbt deletecolumn
[app-profile=]\n" + + " app-profile= The app profile id to use for the request (replication alpha)\n", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletefamily", + Desc: "Delete a column family", + do: doDeleteFamily, + Usage: "cbt deletefamily
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deleterow", + Desc: "Delete a row", + do: doDeleteRow, + Usage: "cbt deleterow
[app-profile=]\n" + + " app-profile= The app profile id to use for the request (replication alpha)\n", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletetable", + Desc: "Delete a table", + do: doDeleteTable, + Usage: "cbt deletetable
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "doc", + Desc: "Print godoc-suitable documentation for cbt", + do: doDoc, + Usage: "cbt doc", + Required: cbtconfig.NoneRequired, + }, + { + Name: "help", + Desc: "Print help text", + do: doHelp, + Usage: "cbt help [command]", + Required: cbtconfig.NoneRequired, + }, + { + Name: "listinstances", + Desc: "List instances in a project", + do: doListInstances, + Usage: "cbt listinstances", + Required: cbtconfig.ProjectRequired, + }, + { + Name: "listclusters", + Desc: "List instances in an instance", + do: doListClusters, + Usage: "cbt listclusters", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "lookup", + Desc: "Read from a single row", + do: doLookup, + Usage: "cbt lookup
[app-profile=]\n" + + " app-profile= The app profile id to use for the request (replication alpha)\n", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "ls", + Desc: "List tables and column families", + do: doLS, + Usage: "cbt ls List tables\n" + + "cbt ls
List column families in
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "mddoc", + Desc: "Print documentation for cbt in Markdown format", + do: doMDDoc, + Usage: "cbt mddoc", + Required: cbtconfig.NoneRequired, + }, + { + Name: "read", + Desc: "Read rows", + do: doRead, + Usage: "cbt read
[start=] [end=] [prefix=]" + + " [regex=] [count=] [app-profile=]\n" + + " start= Start reading at this row\n" + + " end= Stop reading before this row\n" + + " prefix= Read rows with this prefix\n" + + " regex= Read rows with keys matching this regex\n" + + " count= Read only this many rows\n" + + " app-profile= The app profile id to use for the request (replication alpha)\n", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "set", + Desc: "Set value of a cell", + do: doSet, + Usage: "cbt set
[app-profile=] family:column=val[@ts] ...\n" + + " app-profile= The app profile id to use for the request (replication alpha)\n" + + " family:column=val[@ts] may be repeated to set multiple cells.\n" + + "\n" + + " ts is an optional integer timestamp.\n" + + " If it cannot be parsed, the `@ts` part will be\n" + + " interpreted as part of the value.", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "setgcpolicy", + Desc: "Set the GC policy for a column family", + do: doSetGCPolicy, + Usage: "cbt setgcpolicy
( maxage= | maxversions= )\n" + + "\n" + + ` maxage= Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" + + " maxversions= Maximum number of versions to preserve", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "waitforreplication", + Desc: "Blocks until all the completed writes have been replicated to all the clusters (replication alpha)", + do: doWaitForReplicaiton, + Usage: "cbt waitforreplication
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createtablefromsnapshot", + Desc: "Create a table from a snapshot (snapshots alpha)", + do: doCreateTableFromSnapshot, + Usage: "cbt createtablefromsnapshot
\n" + + " table The name of the table to create\n" + + " cluster The cluster where the snapshot is located\n" + + " snapshot The snapshot to restore", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createsnapshot", + Desc: "Create a snapshot from a source table (snapshots alpha)", + do: doSnapshotTable, + Usage: "cbt createsnapshot
[ttl=]\n" + + "\n" + + ` [ttl=] Lifespan of the snapshot (e.g. "1h", "4d")` + "\n", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "listsnapshots", + Desc: "List snapshots in a cluster (snapshots alpha)", + do: doListSnapshots, + Usage: "cbt listsnapshots []", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "getsnapshot", + Desc: "Get snapshot info (snapshots alpha)", + do: doGetSnapshot, + Usage: "cbt getsnapshot ", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletesnapshot", + Desc: "Delete snapshot in a cluster (snapshots alpha)", + do: doDeleteSnapshot, + Usage: "cbt deletesnapshot ", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "version", + Desc: "Print the current cbt version", + do: doVersion, + Usage: "cbt version", + Required: cbtconfig.NoneRequired, + }, +} + +func doCount(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt count
") + } + tbl := getClient(bigtable.ClientConfig{}).Open(args[0]) + + n := 0 + err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { + n++ + return true + }, bigtable.RowFilter(bigtable.StripValueFilter())) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } + fmt.Println(n) +} + +func doCreateTable(ctx context.Context, args ...string) { + if len(args) < 1 { + log.Fatal("usage: cbt createtable
[families=family[:gcpolicy],...] [splits=split,...]") + } + + tblConf := bigtable.TableConf{TableID: args[0]} + for _, arg := range args[1:] { + i := strings.Index(arg, "=") + if i < 0 { + log.Fatalf("Bad arg %q", arg) + } + key, val := arg[:i], arg[i+1:] + chunks, err := csv.NewReader(strings.NewReader(val)).Read() + if err != nil { + log.Fatalf("Invalid families arg format: %v", err) + } + switch key { + default: + log.Fatalf("Unknown arg key %q", key) + case "families": + tblConf.Families = make(map[string]bigtable.GCPolicy) + for _, family := range chunks { + famPolicy := strings.Split(family, ":") + var gcPolicy bigtable.GCPolicy + if len(famPolicy) < 2 { + gcPolicy = bigtable.MaxVersionsPolicy(1) + log.Printf("Using default GC Policy of %v for family %v", gcPolicy, family) + } else { + gcPolicy, err = parseGCPolicy(famPolicy[1]) + if err != nil { + log.Fatal(err) + } + } + tblConf.Families[famPolicy[0]] = gcPolicy + } + case "splits": + tblConf.SplitKeys = chunks + } + } + + if err := getAdminClient().CreateTableFromConf(ctx, &tblConf); err != nil { + log.Fatalf("Creating table: %v", err) + } +} + +func doCreateFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt createfamily
") + } + err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Creating column family: %v", err) + } +} + +func doCreateInstance(ctx context.Context, args ...string) { + if len(args) < 6 { + log.Fatal("cbt createinstance ") + } + + numNodes, err := strconv.ParseInt(args[4], 0, 32) + if err != nil { + log.Fatalf("Bad num-nodes %q: %v", args[4], err) + } + + sType, err := parseStorageType(args[5]) + if err != nil { + log.Fatal(err) + } + + ic := bigtable.InstanceWithClustersConfig{ + InstanceID: args[0], + DisplayName: args[1], + Clusters: []bigtable.ClusterConfig{{ + ClusterID: args[2], + Zone: args[3], + NumNodes: int32(numNodes), + StorageType: sType, + }}, + } + err = getInstanceAdminClient().CreateInstanceWithClusters(ctx, &ic) + if err != nil { + log.Fatalf("Creating instance: %v", err) + } +} + +func doCreateCluster(ctx context.Context, args ...string) { + if len(args) < 4 { + log.Fatal("usage: cbt createcluster ") + } + + numNodes, err := strconv.ParseInt(args[2], 0, 32) + if err != nil { + log.Fatalf("Bad num_nodes %q: %v", args[2], err) + } + + sType, err := parseStorageType(args[3]) + if err != nil { + log.Fatal(err) + } + + cc := bigtable.ClusterConfig{ + InstanceID: config.Instance, + ClusterID: args[0], + Zone: args[1], + NumNodes: int32(numNodes), + StorageType: sType, + } + err = getInstanceAdminClient().CreateCluster(ctx, &cc) + if err != nil { + log.Fatalf("Creating cluster: %v", err) + } +} + +func doUpdateCluster(ctx context.Context, args ...string) { + if len(args) < 2 { + log.Fatal("cbt updatecluster [num-nodes=num-nodes]") + } + + numNodes := int64(0) + var err error + for _, arg := range args[1:] { + i := strings.Index(arg, "=") + if i < 0 { + log.Fatalf("Bad arg %q", arg) + } + key, val := arg[:i], arg[i+1:] + switch key { + default: + log.Fatalf("Unknown arg key %q", key) + case "num-nodes": + numNodes, err = strconv.ParseInt(val, 0, 32) + if err != nil { + log.Fatalf("Bad num-nodes %q: %v", val, err) + } + } + } + if numNodes > 0 { + err = getInstanceAdminClient().UpdateCluster(ctx, config.Instance, args[0], int32(numNodes)) + if err != nil { + log.Fatalf("Updating cluster: %v", err) + } + } else { + log.Fatal("Updating cluster: nothing to update") + } +} + +func doDeleteInstance(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt deleteinstance ") + } + err := getInstanceAdminClient().DeleteInstance(ctx, args[0]) + if err != nil { + log.Fatalf("Deleting instance: %v", err) + } +} + +func doDeleteCluster(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt deletecluster ") + } + err := getInstanceAdminClient().DeleteCluster(ctx, config.Instance, args[0]) + if err != nil { + log.Fatalf("Deleting cluster: %v", err) + } +} + +func doDeleteColumn(ctx context.Context, args ...string) { + usage := "usage: cbt deletecolumn
[app-profile=]" + if len(args) != 4 || len(args) != 5 { + log.Fatal(usage) + } + var appProfile string + if len(args) == 5 { + if !strings.HasPrefix(args[4], "app-profile=") { + log.Fatal(usage) + } + appProfile = strings.Split(args[4], "=")[1] + } + tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0]) + mut := bigtable.NewMutation() + mut.DeleteCellsInColumn(args[2], args[3]) + if err := tbl.Apply(ctx, args[1], mut); err != nil { + log.Fatalf("Deleting cells in column: %v", err) + } +} + +func doDeleteFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deletefamily
") + } + err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Deleting column family: %v", err) + } +} + +func doDeleteRow(ctx context.Context, args ...string) { + usage := "usage: cbt deleterow
[app-profile=]" + if len(args) != 2 || len(args) != 3 { + log.Fatal(usage) + } + var appProfile string + if len(args) == 3 { + if !strings.HasPrefix(args[2], "app-profile=") { + log.Fatal(usage) + } + appProfile = strings.Split(args[2], "=")[1] + } + tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0]) + mut := bigtable.NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, args[1], mut); err != nil { + log.Fatalf("Deleting row: %v", err) + } +} + +func doDeleteTable(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatalf("Can't do `cbt deletetable %s`", args) + } + err := getAdminClient().DeleteTable(ctx, args[0]) + if err != nil { + log.Fatalf("Deleting table: %v", err) + } +} + +// to break circular dependencies +var ( + doDocFn func(ctx context.Context, args ...string) + doHelpFn func(ctx context.Context, args ...string) + doMDDocFn func(ctx context.Context, args ...string) +) + +func init() { + doDocFn = doDocReal + doHelpFn = doHelpReal + doMDDocFn = doMDDocReal +} + +func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) } +func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) } +func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) } + +func docFlags() []*flag.Flag { + // Only include specific flags, in a specific order. + var flags []*flag.Flag + for _, name := range []string{"project", "instance", "creds"} { + f := flag.Lookup(name) + if f == nil { + log.Fatalf("Flag not linked: -%s", name) + } + flags = append(flags, f) + } + return flags +} + +func doDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + "Flags": docFlags(), + "ConfigHelp": configHelp, + } + var buf bytes.Buffer + if err := docTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad doc template: %v", err) + } + out, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatalf("Bad doc output: %v", err) + } + os.Stdout.Write(out) +} + +func indentLines(s, ind string) string { + ss := strings.Split(s, "\n") + for i, p := range ss { + ss[i] = ind + p + } + return strings.Join(ss, "\n") +} + +var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{ + "indent": indentLines, +}). + Parse(` +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to +install the cbt tool, see the +[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview). + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +The options are: +{{range .Flags}} + -{{.Name}} string + {{.Usage}}{{end}} + +{{.ConfigHelp}} + +{{range .Commands}} +{{.Desc}} + +Usage: +{{indent .Usage "\t"}} + + + +{{end}} +*/ +package main +`)) + +func doHelpReal(ctx context.Context, args ...string) { + if len(args) == 0 { + usage(os.Stdout) + return + } + for _, cmd := range commands { + if cmd.Name == args[0] { + fmt.Println(cmd.Usage) + return + } + } + log.Fatalf("Don't know command %q", args[0]) +} + +func doListInstances(ctx context.Context, args ...string) { + if len(args) != 0 { + log.Fatalf("usage: cbt listinstances") + } + is, err := getInstanceAdminClient().Instances(ctx) + if err != nil { + log.Fatalf("Getting list of instances: %v", err) + } + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Instance Name\tInfo\n") + fmt.Fprintf(tw, "-------------\t----\n") + for _, i := range is { + fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName) + } + tw.Flush() +} + +func doListClusters(ctx context.Context, args ...string) { + if len(args) != 0 { + log.Fatalf("usage: cbt listclusters") + } + cis, err := getInstanceAdminClient().Clusters(ctx, config.Instance) + if err != nil { + log.Fatalf("Getting list of clusters: %v", err) + } + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Cluster Name\tZone\tState\n") + fmt.Fprintf(tw, "------------\t----\t----\n") + for _, ci := range cis { + fmt.Fprintf(tw, "%s\t%s\t%s (%d serve nodes)\n", ci.Name, ci.Zone, ci.State, ci.ServeNodes) + } + tw.Flush() +} + +func doLookup(ctx context.Context, args ...string) { + if len(args) < 2 { + log.Fatalf("usage: cbt lookup
[app-profile=]") + } + var appProfile string + if len(args) > 2 { + i := strings.Index(args[2], "=") + if i < 0 { + log.Fatalf("Bad arg %q", args[2]) + } + appProfile = strings.Split(args[2], "=")[1] + } + table, row := args[0], args[1] + tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(table) + r, err := tbl.ReadRow(ctx, row) + if err != nil { + log.Fatalf("Reading row: %v", err) + } + printRow(r) +} + +func printRow(r bigtable.Row) { + fmt.Println(strings.Repeat("-", 40)) + fmt.Println(r.Key()) + + var fams []string + for fam := range r { + fams = append(fams, fam) + } + sort.Strings(fams) + for _, fam := range fams { + ris := r[fam] + sort.Sort(byColumn(ris)) + for _, ri := range ris { + ts := time.Unix(0, int64(ri.Timestamp)*1e3) + fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000")) + fmt.Printf(" %q\n", ri.Value) + } + } +} + +type byColumn []bigtable.ReadItem + +func (b byColumn) Len() int { return len(b) } +func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } + +type byFamilyName []bigtable.FamilyInfo + +func (b byFamilyName) Len() int { return len(b) } +func (b byFamilyName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byFamilyName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +func doLS(ctx context.Context, args ...string) { + switch len(args) { + default: + log.Fatalf("Can't do `cbt ls %s`", args) + case 0: + tables, err := getAdminClient().Tables(ctx) + if err != nil { + log.Fatalf("Getting list of tables: %v", err) + } + sort.Strings(tables) + for _, table := range tables { + fmt.Println(table) + } + case 1: + table := args[0] + ti, err := getAdminClient().TableInfo(ctx, table) + if err != nil { + log.Fatalf("Getting table info: %v", err) + } + sort.Sort(byFamilyName(ti.FamilyInfos)) + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Family Name\tGC Policy\n") + fmt.Fprintf(tw, "-----------\t---------\n") + for _, fam := range ti.FamilyInfos { + fmt.Fprintf(tw, "%s\t%s\n", fam.Name, fam.GCPolicy) + } + tw.Flush() + } +} + +func doMDDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + "Flags": docFlags(), + "ConfigHelp": configHelp, + } + var buf bytes.Buffer + if err := mddocTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad mddoc template: %v", err) + } + io.Copy(os.Stdout, &buf) +} + +var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{ + "indent": indentLines, +}). + Parse(` +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +The options are: +{{range .Flags}} + -{{.Name}} string + {{.Usage}}{{end}} + +{{.ConfigHelp}} + +{{range .Commands}} +## {{.Desc}} + +{{indent .Usage "\t"}} + + + +{{end}} +`)) + +func doRead(ctx context.Context, args ...string) { + if len(args) < 1 { + log.Fatalf("usage: cbt read
[args ...]") + } + + parsed := make(map[string]string) + for _, arg := range args[1:] { + i := strings.Index(arg, "=") + if i < 0 { + log.Fatalf("Bad arg %q", arg) + } + key, val := arg[:i], arg[i+1:] + switch key { + default: + log.Fatalf("Unknown arg key %q", key) + case "limit": + // Be nicer; we used to support this, but renamed it to "end". + log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end") + case "start", "end", "prefix", "count", "regex", "app-profile": + parsed[key] = val + } + } + if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" { + log.Fatal(`"start"/"end" may not be mixed with "prefix"`) + } + + var rr bigtable.RowRange + if start, end := parsed["start"], parsed["end"]; end != "" { + rr = bigtable.NewRange(start, end) + } else if start != "" { + rr = bigtable.InfiniteRange(start) + } + if prefix := parsed["prefix"]; prefix != "" { + rr = bigtable.PrefixRange(prefix) + } + + var opts []bigtable.ReadOption + if count := parsed["count"]; count != "" { + n, err := strconv.ParseInt(count, 0, 64) + if err != nil { + log.Fatalf("Bad count %q: %v", count, err) + } + opts = append(opts, bigtable.LimitRows(n)) + } + if regex := parsed["regex"]; regex != "" { + opts = append(opts, bigtable.RowFilter(bigtable.RowKeyFilter(regex))) + } + + // TODO(dsymonds): Support filters. + tbl := getClient(bigtable.ClientConfig{AppProfile: parsed["app-profile"]}).Open(args[0]) + err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { + printRow(r) + return true + }, opts...) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } +} + +var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`) + +func doSet(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt set
[app-profile=] family:[column]=val[@ts] ...") + } + var appProfile string + row := args[1] + mut := bigtable.NewMutation() + for _, arg := range args[2:] { + if strings.HasPrefix(arg, "app-profile=") { + appProfile = strings.Split(arg, "=")[1] + continue + } + m := setArg.FindStringSubmatch(arg) + if m == nil { + log.Fatalf("Bad set arg %q", arg) + } + val := m[3] + ts := bigtable.Now() + if i := strings.LastIndex(val, "@"); i >= 0 { + // Try parsing a timestamp. + n, err := strconv.ParseInt(val[i+1:], 0, 64) + if err == nil { + val = val[:i] + ts = bigtable.Timestamp(n) + } + } + mut.Set(m[1], m[2], ts, []byte(val)) + } + tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0]) + if err := tbl.Apply(ctx, row, mut); err != nil { + log.Fatalf("Applying mutation: %v", err) + } +} + +func doSetGCPolicy(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt setgcpolicy
( maxage= | maxversions= | maxage= (and|or) maxversions= )") + } + table := args[0] + fam := args[1] + + pol, err := parseGCPolicy(strings.Join(args[2:], " ")) + if err != nil { + log.Fatal(err) + } + if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil { + log.Fatalf("Setting GC policy: %v", err) + } +} + +func doWaitForReplicaiton(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatalf("usage: cbt waitforreplication
") + } + table := args[0] + + fmt.Printf("Waiting for all writes up to %s to be replicated.\n", time.Now().Format("2006/01/02-15:04:05")) + if err := getAdminClient().WaitForReplication(ctx, table); err != nil { + log.Fatalf("Waiting for replication: %v", err) + } +} + +func parseGCPolicy(policyStr string) (bigtable.GCPolicy, error) { + words := strings.Fields(policyStr) + switch len(words) { + case 1: + return parseSinglePolicy(words[0]) + case 3: + p1, err := parseSinglePolicy(words[0]) + if err != nil { + return nil, err + } + p2, err := parseSinglePolicy(words[2]) + if err != nil { + return nil, err + } + switch words[1] { + case "and": + return bigtable.IntersectionPolicy(p1, p2), nil + case "or": + return bigtable.UnionPolicy(p1, p2), nil + default: + return nil, fmt.Errorf("Expected 'and' or 'or', saw %q", words[1]) + } + default: + return nil, fmt.Errorf("Expected '1' or '3' parameter count, saw %d", len(words)) + } + return nil, nil +} + +func parseSinglePolicy(s string) (bigtable.GCPolicy, error) { + words := strings.Split(s, "=") + if len(words) != 2 { + return nil, fmt.Errorf("Expected 'name=value', got %q", words) + } + switch words[0] { + case "maxage": + d, err := parseDuration(words[1]) + if err != nil { + return nil, err + } + return bigtable.MaxAgePolicy(d), nil + case "maxversions": + n, err := strconv.ParseUint(words[1], 10, 16) + if err != nil { + return nil, err + } + return bigtable.MaxVersionsPolicy(int(n)), nil + default: + return nil, fmt.Errorf("Expected 'maxage' or 'maxversions', got %q", words[1]) + } + return nil, nil +} + +func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) { + switch storageTypeStr { + case "SSD": + return bigtable.SSD, nil + case "HDD": + return bigtable.HDD, nil + } + return -1, fmt.Errorf("Invalid storage type: %v, must be SSD or HDD", storageTypeStr) +} + +func doCreateTableFromSnapshot(ctx context.Context, args ...string) { + if len(args) != 3 { + log.Fatal("usage: cbt createtablefromsnapshot
") + } + tableName := args[0] + clusterName := args[1] + snapshotName := args[2] + err := getAdminClient().CreateTableFromSnapshot(ctx, tableName, clusterName, snapshotName) + + if err != nil { + log.Fatalf("Creating table: %v", err) + } +} + +func doSnapshotTable(ctx context.Context, args ...string) { + if len(args) != 3 && len(args) != 4 { + log.Fatal("usage: cbt createsnapshot
[ttl=]") + } + clusterName := args[0] + snapshotName := args[1] + tableName := args[2] + ttl := bigtable.DefaultSnapshotDuration + + for _, arg := range args[3:] { + i := strings.Index(arg, "=") + if i < 0 { + log.Fatalf("Bad arg %q", arg) + } + key, val := arg[:i], arg[i+1:] + switch key { + default: + log.Fatalf("Unknown arg key %q", key) + case "ttl": + var err error + ttl, err = parseDuration(val) + if err != nil { + log.Fatalf("Invalid snapshot ttl value %q: %v", val, err) + } + } + } + + err := getAdminClient().SnapshotTable(ctx, tableName, clusterName, snapshotName, ttl) + if err != nil { + log.Fatalf("Failed to create Snapshot: %v", err) + } +} + +func doListSnapshots(ctx context.Context, args ...string) { + if len(args) != 0 && len(args) != 1 { + log.Fatal("usage: cbt listsnapshots []") + } + + var cluster string + + if len(args) == 0 { + cluster = "-" + } else { + cluster = args[0] + } + + it := getAdminClient().ListSnapshots(ctx, cluster) + + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Snapshot\tSource Table\tCreated At\tExpires At\n") + fmt.Fprintf(tw, "--------\t------------\t----------\t----------\n") + timeLayout := "2006-01-02 15:04 MST" + + for { + snapshot, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatalf("Failed to fetch snapshots %v", err) + } + fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", snapshot.Name, snapshot.SourceTable, snapshot.CreateTime.Format(timeLayout), snapshot.DeleteTime.Format(timeLayout)) + } + tw.Flush() +} + +func doGetSnapshot(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatalf("usage: cbt getsnapshot ") + } + clusterName := args[0] + snapshotName := args[1] + + snapshot, err := getAdminClient().SnapshotInfo(ctx, clusterName, snapshotName) + if err != nil { + log.Fatalf("Failed to get snapshot: %v", err) + } + + timeLayout := "2006-01-02 15:04 MST" + + fmt.Printf("Name: %s\n", snapshot.Name) + fmt.Printf("Source table: %s\n", snapshot.SourceTable) + fmt.Printf("Created at: %s\n", snapshot.CreateTime.Format(timeLayout)) + fmt.Printf("Expires at: %s\n", snapshot.DeleteTime.Format(timeLayout)) +} + +func doDeleteSnapshot(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deletesnapshot ") + } + cluster := args[0] + snapshot := args[1] + + err := getAdminClient().DeleteSnapshot(ctx, cluster, snapshot) + + if err != nil { + log.Fatalf("Failed to delete snapshot: %v", err) + } +} + +// parseDuration parses a duration string. +// It is similar to Go's time.ParseDuration, except with a different set of supported units, +// and only simple formats supported. +func parseDuration(s string) (time.Duration, error) { + // [0-9]+[a-z]+ + + // Split [0-9]+ from [a-z]+. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + } + ds, u := s[:i], s[i:] + if ds == "" || u == "" { + return 0, fmt.Errorf("invalid duration %q", s) + } + // Parse them. + d, err := strconv.ParseUint(ds, 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid duration %q: %v", s, err) + } + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, s) + } + if d > uint64((1<<63-1)/unit) { + // overflow + return 0, fmt.Errorf("invalid duration %q overflows", s) + } + return time.Duration(d) * unit, nil +} + +var unitMap = map[string]time.Duration{ + "ms": time.Millisecond, + "s": time.Second, + "m": time.Minute, + "h": time.Hour, + "d": 24 * time.Hour, +} + +func doVersion(ctx context.Context, args ...string) { + fmt.Printf("%s %s %s\n", version, revision, revisionDate) +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go new file mode 100644 index 0000000..2616fb4 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go @@ -0,0 +1,113 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "testing" + "time" + + "cloud.google.com/go/bigtable" + "github.com/google/go-cmp/cmp" +) + +func TestParseDuration(t *testing.T) { + tests := []struct { + in string + // out or fail are mutually exclusive + out time.Duration + fail bool + }{ + {in: "10ms", out: 10 * time.Millisecond}, + {in: "3s", out: 3 * time.Second}, + {in: "60m", out: 60 * time.Minute}, + {in: "12h", out: 12 * time.Hour}, + {in: "7d", out: 168 * time.Hour}, + + {in: "", fail: true}, + {in: "0", fail: true}, + {in: "7ns", fail: true}, + {in: "14mo", fail: true}, + {in: "3.5h", fail: true}, + {in: "106752d", fail: true}, // overflow + } + for _, tc := range tests { + got, err := parseDuration(tc.in) + if !tc.fail && err != nil { + t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err) + continue + } + if tc.fail && err == nil { + t.Errorf("parseDuration(%q) did not fail", tc.in) + continue + } + if tc.fail { + continue + } + if got != tc.out { + t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out) + } + } +} + +func TestParseGCPolicy(t *testing.T) { + tests := []struct { + in string + out bigtable.GCPolicy + fail bool + }{ + {in: "maxage=1h", out: bigtable.MaxAgePolicy(time.Hour * 1)}, + {in: "maxversions=2", out: bigtable.MaxVersionsPolicy(int(2))}, + {in: "maxversions=2 and maxage=1h", out: bigtable.IntersectionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)}, + {in: "maxversions=2 or maxage=1h", out: bigtable.UnionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)}, + + {in: "maxage=1", fail: true}, + {in: "maxage = 1h", fail: true}, + {in: "maxage =1h", fail: true}, + {in: "maxage= 1h", fail: true}, + {in: "foomaxage=1h", fail: true}, + {in: "maxversions=1h", fail: true}, + {in: "maxversions= 1", fail: true}, + {in: "maxversions = 1", fail: true}, + {in: "maxversions =1", fail: true}, + {in: "barmaxversions=1", fail: true}, + {in: "maxage = 1h or maxversions=1h", fail: true}, + {in: "foomaxversions=2 or maxage=1h", fail: true}, + {in: "maxversions=2 or barmaxage=1h", fail: true}, + {in: "foomaxversions=2 or barmaxage=1h", fail: true}, + {in: "maxage = 1h and maxversions=1h", fail: true}, + {in: "foomaxage=1h and maxversions=1", fail: true}, + {in: "maxage=1h and barmaxversions=1", fail: true}, + {in: "foomaxage=1h and barmaxversions=1", fail: true}, + } + for _, tc := range tests { + got, err := parseGCPolicy(tc.in) + if !tc.fail && err != nil { + t.Errorf("parseGCPolicy(%q) unexpectedly failed: %v", tc.in, err) + continue + } + if tc.fail && err == nil { + t.Errorf("parseGCPolicy(%q) did not fail", tc.in) + continue + } + if tc.fail { + continue + } + var cmpOpts cmp.Options + cmpOpts = append(cmpOpts, cmp.AllowUnexported(bigtable.IntersectionPolicy([]bigtable.GCPolicy{}...)), cmp.AllowUnexported(bigtable.UnionPolicy([]bigtable.GCPolicy{}...))) + if !cmp.Equal(got, tc.out, cmpOpts) { + t.Errorf("parseGCPolicy(%q) =%v, want %v", tc.in, got, tc.out) + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go new file mode 100644 index 0000000..317ea08 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go @@ -0,0 +1,316 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to +install the cbt tool, see the +[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview). + +Usage: + + cbt [options] command [arguments] + +The commands are: + + count Count rows in a table + createinstance Create an instance with an initial cluster + createcluster Create a cluster in the configured instance (replication alpha) + createfamily Create a column family + createtable Create a table + updatecluster Update a cluster in the configured instance + deleteinstance Deletes an instance + deletecluster Deletes a cluster from the configured instance (replication alpha) + deletecolumn Delete all cells in a column + deletefamily Delete a column family + deleterow Delete a row + deletetable Delete a table + doc Print godoc-suitable documentation for cbt + help Print help text + listinstances List instances in a project + listclusters List instances in an instance + lookup Read from a single row + ls List tables and column families + mddoc Print documentation for cbt in Markdown format + read Read rows + set Set value of a cell + setgcpolicy Set the GC policy for a column family + waitforreplication Blocks until all the completed writes have been replicated to all the clusters (replication alpha) + version Print the current cbt version + +Use "cbt help " for more information about a command. + +The options are: + + -project string + project ID, if unset uses gcloud configured project + -instance string + Cloud Bigtable instance + -creds string + if set, use application credentials in this file + + +Alpha features are not currently available to most Cloud Bigtable customers. The +features might be changed in backward-incompatible ways and are not recommended +for production use. They are not subject to any SLA or deprecation policy. + +For convenience, values of the -project, -instance, -creds, +-admin-endpoint and -data-endpoint flags may be specified in +/usr/local/google/home/igorbernstein/.cbtrc in this format: + project = my-project-123 + instance = my-instance + creds = path-to-account-key.json + admin-endpoint = hostname:port + data-endpoint = hostname:port +All values are optional, and all will be overridden by flags. + + + +Count rows in a table + +Usage: + cbt count
+ + + + +Create an instance with an initial cluster + +Usage: + cbt createinstance + instance-id Permanent, unique id for the instance + display-name Description of the instance + cluster-id Permanent, unique id for the cluster in the instance + zone The zone in which to create the cluster + num-nodes The number of nodes to create + storage-type SSD or HDD + + + + + +Create a cluster in the configured instance (replication alpha) + +Usage: + cbt createcluster + cluster-id Permanent, unique id for the cluster in the instance + zone The zone in which to create the cluster + num-nodes The number of nodes to create + storage-type SSD or HDD + + + + + +Create a column family + +Usage: + cbt createfamily
+ + + + +Create a table + +Usage: + cbt createtable
[families=family[:(maxage= | maxversions=)],...] [splits=split,...] + families: Column families and their associated GC policies. See "setgcpolicy". + Example: families=family1:maxage=1w,family2:maxversions=1 + splits: Row key to be used to initially split the table + + + + +Update a cluster in the configured instance + +Usage: + cbt updatecluster [num-nodes=num-nodes] + cluster-id Permanent, unique id for the cluster in the instance + num-nodes The number of nodes to update to + + + + +Deletes an instance + +Usage: + cbt deleteinstance + + + + +Deletes a cluster from the configured instance (replication alpha) + +Usage: + cbt deletecluster + + + + +Delete all cells in a column + +Usage: + cbt deletecolumn
[app-profile=] + app-profile= The app profile id to use for the request (replication alpha) + + + + + +Delete a column family + +Usage: + cbt deletefamily
+ + + + +Delete a row + +Usage: + cbt deleterow
[app-profile=] + app-profile= The app profile id to use for the request (replication alpha) + + + + + +Delete a table + +Usage: + cbt deletetable
+ + + + +Print godoc-suitable documentation for cbt + +Usage: + cbt doc + + + + +Print help text + +Usage: + cbt help [command] + + + + +List instances in a project + +Usage: + cbt listinstances + + + + +List instances in an instance + +Usage: + cbt listclusters + + + + +Read from a single row + +Usage: + cbt lookup
[app-profile=] + app-profile= The app profile id to use for the request (replication alpha) + + + + + +List tables and column families + +Usage: + cbt ls List tables + cbt ls
List column families in
+ + + + +Print documentation for cbt in Markdown format + +Usage: + cbt mddoc + + + + +Read rows + +Usage: + cbt read
[start=] [end=] [prefix=] [regex=] [count=] [app-profile=] + start= Start reading at this row + end= Stop reading before this row + prefix= Read rows with this prefix + regex= Read rows with keys matching this regex + count= Read only this many rows + app-profile= The app profile id to use for the request (replication alpha) + + + + + +Set value of a cell + +Usage: + cbt set
[app-profile=] family:column=val[@ts] ... + app-profile= The app profile id to use for the request (replication alpha) + family:column=val[@ts] may be repeated to set multiple cells. + + ts is an optional integer timestamp. + If it cannot be parsed, the `@ts` part will be + interpreted as part of the value. + + + + +Set the GC policy for a column family + +Usage: + cbt setgcpolicy
( maxage= | maxversions= ) + + maxage= Maximum timestamp age to preserve (e.g. "1h", "4d") + maxversions= Maximum number of versions to preserve + + + + +Blocks until all the completed writes have been replicated to all the clusters (replication alpha) + +Usage: + cbt waitforreplication
+ + + + +Print the current cbt version + +Usage: + cbt version + + + + +*/ +package main diff --git a/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go b/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go new file mode 100644 index 0000000..f561c14 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +cbtemulator launches the in-memory Cloud Bigtable server on the given address. +*/ +package main + +import ( + "flag" + "fmt" + "log" + + "cloud.google.com/go/bigtable/bttest" + "google.golang.org/grpc" +) + +var ( + host = flag.String("host", "localhost", "the address to bind to on the local machine") + port = flag.Int("port", 9000, "the port number to bind to on the local machine") +) + +func main() { + grpc.EnableTracing = false + flag.Parse() + srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port)) + if err != nil { + log.Fatalf("failed to start emulator: %v", err) + } + + fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr) + select {} +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go b/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go new file mode 100644 index 0000000..1c86ed8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go @@ -0,0 +1,205 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Loadtest does some load testing through the Go client library for Cloud Bigtable. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "os/signal" + "sync" + "sync/atomic" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "cloud.google.com/go/bigtable/internal/stat" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, + "how long to run the load test for; 0 to run forever until SIGTERM") + scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") + csvOutput = flag.String("csv_output", "", + "output path for statistics in .csv format. If this file already exists it will be overwritten.") + poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client") + reqCount = flag.Int("req_count", 100, "number of concurrent requests") + + config *cbtconfig.Config + client *bigtable.Client + adminClient *bigtable.AdminClient +) + +func main() { + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 0 { + flag.Usage() + os.Exit(1) + } + + var options []option.ClientOption + if *poolSize > 1 { + options = append(options, + option.WithGRPCConnectionPool(*poolSize), + + // TODO(grpc/grpc-go#1388) using connection pool without WithBlock + // can cause RPCs to fail randomly. We can delete this after the issue is fixed. + option.WithGRPCDialOption(grpc.WithBlock())) + } + + var csvFile *os.File + if *csvOutput != "" { + csvFile, err = os.Create(*csvOutput) + if err != nil { + log.Fatalf("creating csv output file: %v", err) + } + defer csvFile.Close() + log.Printf("Writing statistics to %q ...", *csvOutput) + } + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + defer adminClient.Close() + + // Create a scratch table. + log.Printf("Setting up scratch table...") + tblConf := bigtable.TableConf{ + TableID: *scratchTable, + Families: map[string]bigtable.GCPolicy{"f": bigtable.MaxVersionsPolicy(1)}, + } + if err := adminClient.CreateTableFromConf(context.Background(), &tblConf); err != nil { + log.Fatalf("Making scratch table %q: %v", *scratchTable, err) + } + // Upon a successful run, delete the table. Don't bother checking for errors. + defer adminClient.DeleteTable(context.Background(), *scratchTable) + + // Also delete the table on SIGTERM. + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + s := <-c + log.Printf("Caught %v, cleaning scratch table.", s) + adminClient.DeleteTable(context.Background(), *scratchTable) + os.Exit(1) + }() + + log.Printf("Starting load test... (run for %v)", *runFor) + tbl := client.Open(*scratchTable) + sem := make(chan int, *reqCount) // limit the number of requests happening at once + var reads, writes stats + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) || *runFor == 0 { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + var stats *stats + defer func() { + stats.Record(ok, time.Since(opStart)) + }() + + row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows + + switch rand.Intn(10) { + default: + // read + stats = &reads + _, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) + if err != nil { + log.Printf("Error doing read: %v", err) + ok = false + } + case 0, 1, 2, 3, 4: + // write + stats = &writes + mut := bigtable.NewMutation() + mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write + if err := tbl.Apply(context.Background(), row, mut); err != nil { + log.Printf("Error doing mutation: %v", err) + ok = false + } + } + }() + } + wg.Wait() + + readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok) + writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok) + log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg) + log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg) + + if csvFile != nil { + stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile) + } +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go b/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go new file mode 100644 index 0000000..72e3743 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go @@ -0,0 +1,155 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Scantest does scan-related load testing against Cloud Bigtable. The logic here +mimics a similar test written using the Java client. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "sync" + "sync/atomic" + "text/tabwriter" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "cloud.google.com/go/bigtable/internal/stat" + "golang.org/x/net/context" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") + numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans") + rowLimit = flag.Int("row_limit", 10000, "max number of records per scan") + + config *cbtconfig.Config + client *bigtable.Client +) + +func main() { + flag.Usage = func() { + fmt.Printf("Usage: scantest [options] \n\n") + flag.PrintDefaults() + } + + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 1 { + flag.Usage() + os.Exit(1) + } + + table := flag.Arg(0) + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + + log.Printf("Starting scan test... (run for %v)", *runFor) + tbl := client.Open(table) + sem := make(chan int, *numScans) // limit the number of requests happening at once + var scans stats + + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + defer func() { + scans.Record(ok, time.Since(opStart)) + }() + + // Start at a random row key + key := fmt.Sprintf("user%d", rand.Int63()) + limit := bigtable.LimitRows(int64(*rowLimit)) + noop := func(bigtable.Row) bool { return true } + if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil { + log.Printf("Error during scan: %v", err) + ok = false + } + }() + } + wg.Wait() + + agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok) + log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v", + scans.ok, scans.tries, agg, throughputString(agg)) +} + +func throughputString(agg *stat.Aggregate) string { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + rowLimitF := float64(*rowLimit) + fmt.Fprintf( + tw, + "min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n", + rowLimitF/agg.Max.Seconds(), + rowLimitF/agg.Median.Seconds(), + rowLimitF/agg.Min.Seconds()) + tw.Flush() + return buf.String() +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/doc.go b/vendor/cloud.google.com/go/bigtable/doc.go new file mode 100644 index 0000000..0d7706f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/doc.go @@ -0,0 +1,125 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bigtable is an API to Google Cloud Bigtable. + +See https://cloud.google.com/bigtable/docs/ for general product documentation. + +Setup and Credentials + +Use NewClient or NewAdminClient to create a client that can be used to access +the data or admin APIs respectively. Both require credentials that have permission +to access the Cloud Bigtable API. + +If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials +(https://developers.google.com/accounts/docs/application-default-credentials) +is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. + +To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource. +For instance, you can use service account credentials by visiting +https://cloud.google.com/console/project/MYPROJECT/apiui/credential, +creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing + jsonKey, err := ioutil.ReadFile(pathToKeyFile) + ... + config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. + ... + client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx))) + ... +Here, `google` means the golang.org/x/oauth2/google package +and `option` means the google.golang.org/api/option package. + +Reading + +The principal way to read from a Bigtable is to use the ReadRows method on *Table. +A RowRange specifies a contiguous portion of a table. A Filter may be provided through +RowFilter to limit or transform the data that is returned. + tbl := client.Open("mytable") + ... + // Read all the rows starting with "com.google.", + // but only fetch the columns in the "links" family. + rr := bigtable.PrefixRange("com.google.") + err := tbl.ReadRows(ctx, rr, func(r Row) bool { + // do something with r + return true // keep going + }, bigtable.RowFilter(bigtable.FamilyFilter("links"))) + ... + +To read a single row, use the ReadRow helper method. + r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key + ... + +Writing + +This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. +The former expresses idempotent operations. +The latter expresses non-idempotent operations and returns the new values of updated cells. +These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), +building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite +methods on a Table. + +For instance, to set a couple of cells in a table, + tbl := client.Open("mytable") + mut := bigtable.NewMutation() + mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) + mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) + err := tbl.Apply(ctx, "com.google.cloud", mut) + ... + +To increment an encoded value in one cell, + tbl := client.Open("mytable") + rmw := bigtable.NewReadModifyWrite() + rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" + r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) + ... + +Retries + +If a read or write operation encounters a transient error it will be retried until a successful +response, an unretryable error or the context deadline is reached. Non-idempotent writes (where +the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls +will not re-scan rows that have already been processed. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + +*/ +package bigtable // import "cloud.google.com/go/bigtable" + +// Scope constants for authentication credentials. +// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. +const ( + // Scope is the OAuth scope for Cloud Bigtable data operations. + Scope = "https://www.googleapis.com/auth/bigtable.data" + // ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. + ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" + + // AdminScope is the OAuth scope for Cloud Bigtable table admin operations. + AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" + + // InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations. + InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" +) + +// clientUserAgent identifies the version of this package. +// It should be bumped upon significant changes only. +const clientUserAgent = "cbt-go/20160628" + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" diff --git a/vendor/cloud.google.com/go/bigtable/export_test.go b/vendor/cloud.google.com/go/bigtable/export_test.go new file mode 100644 index 0000000..888e4af --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/export_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "errors" + "flag" + "fmt" + "strings" + "time" + + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +var legacyUseProd string +var integrationConfig IntegrationTestConfig + +func init() { + c := &integrationConfig + + flag.BoolVar(&c.UseProd, "it.use-prod", false, "Use remote bigtable instead of local emulator") + flag.StringVar(&c.AdminEndpoint, "it.admin-endpoint", "", "Admin api host and port") + flag.StringVar(&c.DataEndpoint, "it.data-endpoint", "", "Data api host and port") + flag.StringVar(&c.Project, "it.project", "", "Project to use for integration test") + flag.StringVar(&c.Instance, "it.instance", "", "Bigtable instance to use") + flag.StringVar(&c.Cluster, "it.cluster", "", "Bigtable cluster to use") + flag.StringVar(&c.Table, "it.table", "", "Bigtable table to create") + + // Backwards compat + flag.StringVar(&legacyUseProd, "use_prod", "", `DEPRECATED: if set to "proj,instance,table", run integration test against production`) + +} + +// IntegrationTestConfig contains parameters to pick and setup a IntegrationEnv for testing +type IntegrationTestConfig struct { + UseProd bool + AdminEndpoint string + DataEndpoint string + Project string + Instance string + Cluster string + Table string +} + +// IntegrationEnv represents a testing environment. +// The environment can be implemented using production or an emulator +type IntegrationEnv interface { + Config() IntegrationTestConfig + NewAdminClient() (*AdminClient, error) + // NewInstanceAdminClient will return nil if instance administration is unsupported in this environment + NewInstanceAdminClient() (*InstanceAdminClient, error) + NewClient() (*Client, error) + Close() +} + +// NewIntegrationEnv creates a new environment based on the command line args +func NewIntegrationEnv() (IntegrationEnv, error) { + c := integrationConfig + + if legacyUseProd != "" { + fmt.Println("WARNING: using legacy commandline arg -use_prod, please switch to -it.*") + parts := strings.SplitN(legacyUseProd, ",", 3) + c.UseProd = true + c.Project = parts[0] + c.Instance = parts[1] + c.Table = parts[2] + } + + if integrationConfig.UseProd { + return NewProdEnv(c) + } else { + return NewEmulatedEnv(c) + } +} + +// EmulatedEnv encapsulates the state of an emulator +type EmulatedEnv struct { + config IntegrationTestConfig + server *bttest.Server +} + +// NewEmulatedEnv builds and starts the emulator based environment +func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) { + srv, err := bttest.NewServer("localhost:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20)) + if err != nil { + return nil, err + } + + if config.Project == "" { + config.Project = "project" + } + if config.Instance == "" { + config.Instance = "instance" + } + if config.Table == "" { + config.Table = "mytable" + } + config.AdminEndpoint = srv.Addr + config.DataEndpoint = srv.Addr + + env := &EmulatedEnv{ + config: config, + server: srv, + } + return env, nil +} + +// Close stops & cleans up the emulator +func (e *EmulatedEnv) Close() { + e.server.Close() +} + +// Config gets the config used to build this environment +func (e *EmulatedEnv) Config() IntegrationTestConfig { + return e.config +} + +// NewAdminClient builds a new connected admin client for this environment +func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, err + } + return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) +} + +// NewInstanceAdminClient returns nil for the emulated environment since the API is not implemented. +func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) { + return nil, nil +} + +// NewClient builds a new connected data client for this environment +func (e *EmulatedEnv) NewClient() (*Client, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))) + if err != nil { + return nil, err + } + return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) +} + +// ProdEnv encapsulates the state necessary to connect to the external Bigtable service +type ProdEnv struct { + config IntegrationTestConfig +} + +// NewProdEnv builds the environment representation +func NewProdEnv(config IntegrationTestConfig) (*ProdEnv, error) { + if config.Project == "" { + return nil, errors.New("Project not set") + } + if config.Instance == "" { + return nil, errors.New("Instance not set") + } + if config.Table == "" { + return nil, errors.New("Table not set") + } + + return &ProdEnv{config}, nil +} + +// Close is a no-op for production environments +func (e *ProdEnv) Close() {} + +// Config gets the config used to build this environment +func (e *ProdEnv) Config() IntegrationTestConfig { + return e.config +} + +// NewAdminClient builds a new connected admin client for this environment +func (e *ProdEnv) NewAdminClient() (*AdminClient, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + var clientOpts []option.ClientOption + if endpoint := e.config.AdminEndpoint; endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) + } + return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...) +} + +// NewInstanceAdminClient returns a new connected instance admin client for this environment +func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + var clientOpts []option.ClientOption + if endpoint := e.config.AdminEndpoint; endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) + } + return NewInstanceAdminClient(ctx, e.config.Project, clientOpts...) +} + +// NewClient builds a connected data client for this environment +func (e *ProdEnv) NewClient() (*Client, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + var clientOpts []option.ClientOption + if endpoint := e.config.DataEndpoint; endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) + } + return NewClient(ctx, e.config.Project, e.config.Instance, clientOpts...) +} diff --git a/vendor/cloud.google.com/go/bigtable/filter.go b/vendor/cloud.google.com/go/bigtable/filter.go new file mode 100644 index 0000000..1fea1d2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/filter.go @@ -0,0 +1,317 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + "time" + + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// A Filter represents a row filter. +type Filter interface { + String() string + proto() *btpb.RowFilter +} + +// ChainFilters returns a filter that applies a sequence of filters. +func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } + +type chainFilter struct { + sub []Filter +} + +func (cf chainFilter) String() string { + var ss []string + for _, sf := range cf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " | ") + ")" +} + +func (cf chainFilter) proto() *btpb.RowFilter { + chain := &btpb.RowFilter_Chain{} + for _, sf := range cf.sub { + chain.Filters = append(chain.Filters, sf.proto()) + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Chain_{Chain: chain}, + } +} + +// InterleaveFilters returns a filter that applies a set of filters in parallel +// and interleaves the results. +func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } + +type interleaveFilter struct { + sub []Filter +} + +func (ilf interleaveFilter) String() string { + var ss []string + for _, sf := range ilf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " + ") + ")" +} + +func (ilf interleaveFilter) proto() *btpb.RowFilter { + inter := &btpb.RowFilter_Interleave{} + for _, sf := range ilf.sub { + inter.Filters = append(inter.Filters, sf.proto()) + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Interleave_{Interleave: inter}, + } +} + +// RowKeyFilter returns a filter that matches cells from rows whose +// key matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } + +type rowKeyFilter string + +func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } + +func (rkf rowKeyFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{RowKeyRegexFilter: []byte(rkf)}} +} + +// FamilyFilter returns a filter that matches cells whose family name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } + +type familyFilter string + +func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } + +func (ff familyFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: string(ff)}} +} + +// ColumnFilter returns a filter that matches cells whose column name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } + +type columnFilter string + +func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } + +func (cf columnFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte(cf)}} +} + +// ValueFilter returns a filter that matches cells whose value +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ValueFilter(pattern string) Filter { return valueFilter(pattern) } + +type valueFilter string + +func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } + +func (vf valueFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{ValueRegexFilter: []byte(vf)}} +} + +// LatestNFilter returns a filter that matches the most recent N cells in each column. +func LatestNFilter(n int) Filter { return latestNFilter(n) } + +type latestNFilter int32 + +func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } + +func (lnf latestNFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{CellsPerColumnLimitFilter: int32(lnf)}} +} + +// StripValueFilter returns a filter that replaces each value with the empty string. +func StripValueFilter() Filter { return stripValueFilter{} } + +type stripValueFilter struct{} + +func (stripValueFilter) String() string { return "strip_value()" } +func (stripValueFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{StripValueTransformer: true}} +} + +// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero +// time means no bound. +// The timestamp will be truncated to millisecond granularity. +func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter { + trf := timestampRangeFilter{} + if !startTime.IsZero() { + trf.startTime = Time(startTime) + } + if !endTime.IsZero() { + trf.endTime = Time(endTime) + } + return trf +} + +// TimestampRangeFilterMicros returns a filter that matches any cells whose timestamp is within the given time bounds, +// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound. +// The timestamp will be truncated to millisecond granularity. +func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter { + return timestampRangeFilter{startTime, endTime} +} + +type timestampRangeFilter struct { + startTime Timestamp + endTime Timestamp +} + +func (trf timestampRangeFilter) String() string { + return fmt.Sprintf("timestamp_range(%v,%v)", trf.startTime, trf.endTime) +} + +func (trf timestampRangeFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_TimestampRangeFilter{TimestampRangeFilter: &btpb.TimestampRange{ + StartTimestampMicros: int64(trf.startTime.TruncateToMilliseconds()), + EndTimestampMicros: int64(trf.endTime.TruncateToMilliseconds()), + }, + }} +} + +// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single +// family, as specified by an inclusive start qualifier and exclusive end qualifier. +func ColumnRangeFilter(family, start, end string) Filter { + return columnRangeFilter{family, start, end} +} + +type columnRangeFilter struct { + family string + start string + end string +} + +func (crf columnRangeFilter) String() string { + return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end) +} + +func (crf columnRangeFilter) proto() *btpb.RowFilter { + r := &btpb.ColumnRange{FamilyName: crf.family} + if crf.start != "" { + r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{StartQualifierClosed: []byte(crf.start)} + } + if crf.end != "" { + r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{EndQualifierOpen: []byte(crf.end)} + } + return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnRangeFilter{ColumnRangeFilter: r}} +} + +// ValueRangeFilter returns a filter that matches cells with values that fall within +// the given range, as specified by an inclusive start value and exclusive end value. +func ValueRangeFilter(start, end []byte) Filter { + return valueRangeFilter{start, end} +} + +type valueRangeFilter struct { + start []byte + end []byte +} + +func (vrf valueRangeFilter) String() string { + return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end) +} + +func (vrf valueRangeFilter) proto() *btpb.RowFilter { + r := &btpb.ValueRange{} + if vrf.start != nil { + r.StartValue = &btpb.ValueRange_StartValueClosed{StartValueClosed: vrf.start} + } + if vrf.end != nil { + r.EndValue = &btpb.ValueRange_EndValueOpen{EndValueOpen: vrf.end} + } + return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRangeFilter{ValueRangeFilter: r}} +} + +// ConditionFilter returns a filter that evaluates to one of two possible filters depending +// on whether or not the given predicate filter matches at least one cell. +// If the matched filter is nil then no results will be returned. +// IMPORTANT NOTE: The predicate filter does not execute atomically with the +// true and false filters, which may lead to inconsistent or unexpected +// results. Additionally, condition filters have poor performance, especially +// when filters are set for the false condition. +func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter { + return conditionFilter{predicateFilter, trueFilter, falseFilter} +} + +type conditionFilter struct { + predicateFilter Filter + trueFilter Filter + falseFilter Filter +} + +func (cf conditionFilter) String() string { + return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter) +} + +func (cf conditionFilter) proto() *btpb.RowFilter { + var tf *btpb.RowFilter + var ff *btpb.RowFilter + if cf.trueFilter != nil { + tf = cf.trueFilter.proto() + } + if cf.falseFilter != nil { + ff = cf.falseFilter.proto() + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Condition_{Condition: &btpb.RowFilter_Condition{ + PredicateFilter: cf.predicateFilter.proto(), + TrueFilter: tf, + FalseFilter: ff, + }}} +} + +// CellsPerRowOffsetFilter returns a filter that skips the first N cells of each row, matching all subsequent cells. +func CellsPerRowOffsetFilter(n int) Filter { + return cellsPerRowOffsetFilter(n) +} + +type cellsPerRowOffsetFilter int32 + +func (cof cellsPerRowOffsetFilter) String() string { + return fmt.Sprintf("cells_per_row_offset(%d)", cof) +} + +func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{CellsPerRowOffsetFilter: int32(cof)}} +} + +// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row. +func CellsPerRowLimitFilter(n int) Filter { + return cellsPerRowLimitFilter(n) +} + +type cellsPerRowLimitFilter int32 + +func (clf cellsPerRowLimitFilter) String() string { + return fmt.Sprintf("cells_per_row_limit(%d)", clf) +} + +func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{CellsPerRowLimitFilter: int32(clf)}} +} + +// TODO(dsymonds): More filters: sampling diff --git a/vendor/cloud.google.com/go/bigtable/gc.go b/vendor/cloud.google.com/go/bigtable/gc.go new file mode 100644 index 0000000..74510da --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/gc.go @@ -0,0 +1,158 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" + bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" +) + +// A GCPolicy represents a rule that determines which cells are eligible for garbage collection. +type GCPolicy interface { + String() string + proto() *bttdpb.GcRule +} + +// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. +func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } + +type intersectionPolicy struct { + sub []GCPolicy +} + +func (ip intersectionPolicy) String() string { + var ss []string + for _, sp := range ip.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " && ") + ")" +} + +func (ip intersectionPolicy) proto() *bttdpb.GcRule { + inter := &bttdpb.GcRule_Intersection{} + for _, sp := range ip.sub { + inter.Rules = append(inter.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Intersection_{Intersection: inter}, + } +} + +// UnionPolicy returns a GC policy that applies when any of its sub-policies apply. +func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } + +type unionPolicy struct { + sub []GCPolicy +} + +func (up unionPolicy) String() string { + var ss []string + for _, sp := range up.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " || ") + ")" +} + +func (up unionPolicy) proto() *bttdpb.GcRule { + union := &bttdpb.GcRule_Union{} + for _, sp := range up.sub { + union.Rules = append(union.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Union_{Union: union}, + } +} + +// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell +// except for the most recent n. +func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } + +type maxVersionsPolicy int + +func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } + +func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { + return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{MaxNumVersions: int32(mvp)}} +} + +// MaxAgePolicy returns a GC policy that applies to all cells +// older than the given age. +func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } + +type maxAgePolicy time.Duration + +var units = []struct { + d time.Duration + suffix string +}{ + {24 * time.Hour, "d"}, + {time.Hour, "h"}, + {time.Minute, "m"}, +} + +func (ma maxAgePolicy) String() string { + d := time.Duration(ma) + for _, u := range units { + if d%u.d == 0 { + return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) + } + } + return fmt.Sprintf("age() > %d", d/time.Microsecond) +} + +func (ma maxAgePolicy) proto() *bttdpb.GcRule { + // This doesn't handle overflows, etc. + // Fix this if people care about GC policies over 290 years. + ns := time.Duration(ma).Nanoseconds() + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_MaxAge{MaxAge: &durpb.Duration{ + Seconds: ns / 1e9, + Nanos: int32(ns % 1e9), + }}, + } +} + +// GCRuleToString converts the given GcRule proto to a user-visible string. +func GCRuleToString(rule *bttdpb.GcRule) string { + if rule == nil { + return "" + } + switch r := rule.Rule.(type) { + case *bttdpb.GcRule_MaxNumVersions: + return MaxVersionsPolicy(int(r.MaxNumVersions)).String() + case *bttdpb.GcRule_MaxAge: + return MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String() + case *bttdpb.GcRule_Intersection_: + return joinRules(r.Intersection.Rules, " && ") + case *bttdpb.GcRule_Union_: + return joinRules(r.Union.Rules, " || ") + default: + return "" + } +} + +func joinRules(rules []*bttdpb.GcRule, sep string) string { + var chunks []string + for _, r := range rules { + chunks = append(chunks, GCRuleToString(r)) + } + return "(" + strings.Join(chunks, sep) + ")" +} diff --git a/vendor/cloud.google.com/go/bigtable/gc_test.go b/vendor/cloud.google.com/go/bigtable/gc_test.go new file mode 100644 index 0000000..0c77958 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/gc_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bigtable + +import ( + "testing" + "time" + + bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" +) + +func TestGcRuleToString(t *testing.T) { + + intersection := IntersectionPolicy(MaxVersionsPolicy(5), MaxVersionsPolicy(10), MaxAgePolicy(16*time.Hour)) + + var tests = []struct { + proto *bttdpb.GcRule + want string + }{ + {MaxAgePolicy(72 * time.Hour).proto(), "age() > 3d"}, + {MaxVersionsPolicy(5).proto(), "versions() > 5"}, + {intersection.proto(), "(versions() > 5 && versions() > 10 && age() > 16h)"}, + {UnionPolicy(intersection, MaxAgePolicy(72*time.Hour)).proto(), + "((versions() > 5 && versions() > 10 && age() > 16h) || age() > 3d)"}, + } + + for _, test := range tests { + got := GCRuleToString(test.proto) + if got != test.want { + t.Errorf("got gc rule string: %v, wanted: %v", got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/go18.go b/vendor/cloud.google.com/go/bigtable/go18.go new file mode 100644 index 0000000..552b7b6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/go18.go @@ -0,0 +1,68 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package bigtable + +import ( + "fmt" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/trace" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func openCensusOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), + } +} + +func traceStartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name) + return ctx +} + +func traceEndSpan(ctx context.Context, err error) { + span := trace.FromContext(ctx) + if err != nil { + span.SetStatus(trace.Status{Message: err.Error()}) + } + + span.End() +} + +func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { + var attrs []trace.Attribute + for k, v := range attrMap { + var a trace.Attribute + switch v := v.(type) { + case string: + a = trace.StringAttribute(k, v) + case bool: + a = trace.BoolAttribute(k, v) + case int: + a = trace.Int64Attribute(k, int64(v)) + case int64: + a = trace.Int64Attribute(k, v) + default: + a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) + } + attrs = append(attrs, a) + } + trace.FromContext(ctx).Annotatef(attrs, format, args...) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go b/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go new file mode 100644 index 0000000..073406f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go @@ -0,0 +1,246 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud. +package cbtconfig + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" + "google.golang.org/grpc/credentials" +) + +// Config represents a configuration. +type Config struct { + Project, Instance string // required + Creds string // optional + AdminEndpoint string // optional + DataEndpoint string // optional + CertFile string // optional + TokenSource oauth2.TokenSource // derived + TLSCreds credentials.TransportCredentials // derived +} + +type RequiredFlags uint + +const NoneRequired RequiredFlags = 0 +const ( + ProjectRequired RequiredFlags = 1 << iota + InstanceRequired +) +const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired + +// RegisterFlags registers a set of standard flags for this config. +// It should be called before flag.Parse. +func (c *Config) RegisterFlags() { + flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project") + flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance") + flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") + flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint") + flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint") + flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file") +} + +// CheckFlags checks that the required config values are set. +func (c *Config) CheckFlags(required RequiredFlags) error { + var missing []string + if c.CertFile != "" { + b, err := ioutil.ReadFile(c.CertFile) + if err != nil { + return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err) + } + + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return fmt.Errorf("Failed to append certificates from %s", c.CertFile) + } + + c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp}) + } + if required != NoneRequired { + c.SetFromGcloud() + } + if required&ProjectRequired != 0 && c.Project == "" { + missing = append(missing, "-project") + } + if required&InstanceRequired != 0 && c.Instance == "" { + missing = append(missing, "-instance") + } + if len(missing) > 0 { + return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) + } + return nil +} + +// Filename returns the filename consulted for standard configuration. +func Filename() string { + // TODO(dsymonds): Might need tweaking for Windows. + return filepath.Join(os.Getenv("HOME"), ".cbtrc") +} + +// Load loads a .cbtrc file. +// If the file is not present, an empty config is returned. +func Load() (*Config, error) { + filename := Filename() + data, err := ioutil.ReadFile(filename) + if err != nil { + // silent fail if the file isn't there + if os.IsNotExist(err) { + return &Config{}, nil + } + return nil, fmt.Errorf("Reading %s: %v", filename, err) + } + c := new(Config) + s := bufio.NewScanner(bytes.NewReader(data)) + for s.Scan() { + line := s.Text() + i := strings.Index(line, "=") + if i < 0 { + return nil, fmt.Errorf("Bad line in %s: %q", filename, line) + } + key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) + switch key { + default: + return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) + case "project": + c.Project = val + case "instance": + c.Instance = val + case "creds": + c.Creds = val + case "admin-endpoint": + c.AdminEndpoint = val + case "data-endpoint": + c.DataEndpoint = val + } + + } + return c, s.Err() +} + +type GcloudCredential struct { + AccessToken string `json:"access_token"` + Expiry time.Time `json:"token_expiry"` +} + +func (cred *GcloudCredential) Token() *oauth2.Token { + return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry} +} + +type GcloudConfig struct { + Configuration struct { + Properties struct { + Core struct { + Project string `json:"project"` + } `json:"core"` + } `json:"properties"` + } `json:"configuration"` + Credential GcloudCredential `json:"credential"` +} + +type GcloudCmdTokenSource struct { + Command string + Args []string +} + +// Token implements the oauth2.TokenSource interface +func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) { + gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args) + if err != nil { + return nil, err + } + return gcloudConfig.Credential.Token(), nil +} + +// LoadGcloudConfig retrieves the gcloud configuration values we need use via the +// 'config-helper' command +func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) { + out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output() + if err != nil { + return nil, fmt.Errorf("Could not retrieve gcloud configuration") + } + + var gcloudConfig GcloudConfig + if err := json.Unmarshal(out, &gcloudConfig); err != nil { + return nil, fmt.Errorf("Could not parse gcloud configuration") + } + + return &gcloudConfig, nil +} + +// SetFromGcloud retrieves and sets any missing config values from the gcloud +// configuration if possible possible +func (c *Config) SetFromGcloud() error { + + if c.Creds == "" { + c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + if c.Creds == "" { + log.Printf("-creds flag unset, will use gcloud credential") + } + } else { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds) + } + + if c.Project == "" { + log.Printf("-project flag unset, will use gcloud active project") + } + + if c.Creds != "" && c.Project != "" { + return nil + } + + gcloudCmd := "gcloud" + if runtime.GOOS == "windows" { + gcloudCmd = gcloudCmd + ".cmd" + } + + gcloudCmdArgs := []string{"config", "config-helper", + "--format=json(configuration.properties.core.project,credential)"} + + gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs) + if err != nil { + return err + } + + if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" { + log.Printf("gcloud active project is \"%s\"", + gcloudConfig.Configuration.Properties.Core.Project) + c.Project = gcloudConfig.Configuration.Properties.Core.Project + } + + if c.Creds == "" { + c.TokenSource = oauth2.ReuseTokenSource( + gcloudConfig.Credential.Token(), + &GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs}) + } + + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go b/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go new file mode 100644 index 0000000..60a18be --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. +package gax + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type CallOption interface { + Resolve(*CallSettings) +} + +type callOptions []CallOption + +func (opts callOptions) Resolve(s *CallSettings) *CallSettings { + for _, opt := range opts { + opt.Resolve(s) + } + return s +} + +// Encapsulates the call settings for a particular API call. +type CallSettings struct { + Timeout time.Duration + RetrySettings RetrySettings +} + +// Per-call configurable settings for retrying upon transient failure. +type RetrySettings struct { + RetryCodes map[codes.Code]bool + BackoffSettings BackoffSettings +} + +// Parameters to the exponential backoff algorithm for retrying. +type BackoffSettings struct { + DelayTimeoutSettings MultipliableDuration + RPCTimeoutSettings MultipliableDuration +} + +type MultipliableDuration struct { + Initial time.Duration + Max time.Duration + Multiplier float64 +} + +func (w CallSettings) Resolve(s *CallSettings) { + s.Timeout = w.Timeout + s.RetrySettings = w.RetrySettings + + s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes)) + for key, value := range w.RetrySettings.RetryCodes { + s.RetrySettings.RetryCodes[key] = value + } +} + +type withRetryCodes []codes.Code + +func (w withRetryCodes) Resolve(s *CallSettings) { + s.RetrySettings.RetryCodes = make(map[codes.Code]bool) + for _, code := range w { + s.RetrySettings.RetryCodes[code] = true + } +} + +// WithRetryCodes sets a list of Google API canonical error codes upon which a +// retry should be attempted. +func WithRetryCodes(retryCodes []codes.Code) CallOption { + return withRetryCodes(retryCodes) +} + +type withDelayTimeoutSettings MultipliableDuration + +func (w withDelayTimeoutSettings) Resolve(s *CallSettings) { + s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w) +} + +// WithDelayTimeoutSettings specifies: +// - The initial delay time, in milliseconds, between the completion of +// the first failed request and the initiation of the first retrying +// request. +// - The multiplier by which to increase the delay time between the +// completion of failed requests, and the initiation of the subsequent +// retrying request. +// - The maximum delay time, in milliseconds, between requests. When this +// value is reached, `RetryDelayMultiplier` will no longer be used to +// increase delay time. +func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption { + return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier}) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go new file mode 100644 index 0000000..b7be7d4 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go @@ -0,0 +1,84 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. +package gax + +import ( + "math/rand" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "log" + "os" +) + +var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags) + +// A user defined call stub. +type APICall func(context.Context) error + +// scaleDuration returns the product of a and mult. +func scaleDuration(a time.Duration, mult float64) time.Duration { + ns := float64(a) * mult + return time.Duration(ns) +} + +// invokeWithRetry calls stub using an exponential backoff retry mechanism +// based on the values provided in callSettings. +func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error { + retrySettings := callSettings.RetrySettings + backoffSettings := callSettings.RetrySettings.BackoffSettings + delay := backoffSettings.DelayTimeoutSettings.Initial + for { + // If the deadline is exceeded... + if ctx.Err() != nil { + return ctx.Err() + } + err := stub(ctx) + code := grpc.Code(err) + if code == codes.OK { + return nil + } + + if !retrySettings.RetryCodes[code] { + return err + } + + // Sleep a random amount up to the current delay + d := time.Duration(rand.Int63n(int64(delay))) + delayCtx, _ := context.WithTimeout(ctx, delay) + logger.Printf("Retryable error: %v, retrying in %v", err, d) + <-delayCtx.Done() + + delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier) + if delay > backoffSettings.DelayTimeoutSettings.Max { + delay = backoffSettings.DelayTimeoutSettings.Max + } + } +} + +// Invoke calls stub with a child of context modified by the specified options. +func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error { + settings := &CallSettings{} + callOptions(opts).Resolve(settings) + if len(settings.RetrySettings.RetryCodes) > 0 { + return invokeWithRetry(ctx, stub, *settings) + } + return stub(ctx) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go new file mode 100644 index 0000000..6d3c67e --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package gax + +import ( + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestRandomizedDelays(t *testing.T) { + max := 200 * time.Millisecond + settings := []CallOption{ + WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}), + WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5), + } + + deadline := time.Now().Add(1 * time.Second) + ctx, _ := context.WithDeadline(context.Background(), deadline) + var invokeTime time.Time + Invoke(ctx, func(childCtx context.Context) error { + // Keep failing, make sure we never slept more than max (plus a fudge factor) + if !invokeTime.IsZero() { + if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) { + t.Logf("Slept too long. Got: %v, want: %v", got, max) + } + } + invokeTime = time.Now() + // Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90 + errf := status.Errorf + return errf(codes.Unavailable, "") + }, settings...) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/option/option.go b/vendor/cloud.google.com/go/bigtable/internal/option/option.go new file mode 100644 index 0000000..3b9072e --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/option/option.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package option contains common code for dealing with client options. +package option + +import ( + "fmt" + "os" + + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +// DefaultClientOptions returns the default client options to use for the +// client's gRPC connection. +func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) { + var o []option.ClientOption + // Check the environment variables for the bigtable emulator. + // Dial it directly and don't pass any credentials. + if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("emulator grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + option.WithEndpoint(endpoint), + option.WithScopes(scope), + option.WithUserAgent(userAgent), + } + } + return o, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go b/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go new file mode 100644 index 0000000..5fb047f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go @@ -0,0 +1,144 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stat + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "math" + "sort" + "strconv" + "text/tabwriter" + "time" +) + +type byDuration []time.Duration + +func (data byDuration) Len() int { return len(data) } +func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] } +func (data byDuration) Less(i, j int) bool { return data[i] < data[j] } + +// quantile returns a value representing the kth of q quantiles. +// May alter the order of data. +func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) { + if len(data) < 1 { + return 0, false + } + if k > q { + return 0, false + } + if k < 0 || q < 1 { + return 0, false + } + + sort.Sort(byDuration(data)) + + if k == 0 { + return data[0], true + } + if k == q { + return data[len(data)-1], true + } + + bucketSize := float64(len(data)-1) / float64(q) + i := float64(k) * bucketSize + + lower := int(math.Trunc(i)) + var upper int + if i > float64(lower) && lower+1 < len(data) { + // If the quantile lies between two elements + upper = lower + 1 + } else { + upper = lower + } + weightUpper := i - float64(lower) + weightLower := 1 - weightUpper + return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true +} + +type Aggregate struct { + Name string + Count, Errors int + Min, Median, Max time.Duration + P75, P90, P95, P99 time.Duration // percentiles +} + +// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data. +func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate { + agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount} + + if len(latencies) == 0 { + return nil + } + var ok bool + if agg.Min, ok = quantile(latencies, 0, 2); !ok { + return nil + } + if agg.Median, ok = quantile(latencies, 1, 2); !ok { + return nil + } + if agg.Max, ok = quantile(latencies, 2, 2); !ok { + return nil + } + if agg.P75, ok = quantile(latencies, 75, 100); !ok { + return nil + } + if agg.P90, ok = quantile(latencies, 90, 100); !ok { + return nil + } + if agg.P95, ok = quantile(latencies, 95, 100); !ok { + return nil + } + if agg.P99, ok = quantile(latencies, 99, 100); !ok { + return nil + } + return &agg +} + +func (agg *Aggregate) String() string { + if agg == nil { + return "no data" + } + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n", + agg.Min, agg.Median, agg.Max, agg.P95, agg.P99) + tw.Flush() + return buf.String() +} + +// WriteCSV writes a csv file to the given Writer, +// with a header row and one row per aggregate. +func WriteCSV(aggs []*Aggregate, iow io.Writer) error { + w := csv.NewWriter(iow) + defer w.Flush() + err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"}) + if err != nil { + return err + } + for _, agg := range aggs { + err = w.Write([]string{ + agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors), + agg.Min.String(), agg.Median.String(), agg.Max.String(), + agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(), + }) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/not_go18.go b/vendor/cloud.google.com/go/bigtable/not_go18.go new file mode 100644 index 0000000..f86700d --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/not_go18.go @@ -0,0 +1,36 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package bigtable + +import ( + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +// OpenCensus only supports go 1.8 and higher. + +func openCensusOptions() []option.ClientOption { return nil } + +func traceStartSpan(ctx context.Context, _ string) context.Context { + return ctx +} + +func traceEndSpan(context.Context, error) { +} + +func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) { +} diff --git a/vendor/cloud.google.com/go/bigtable/reader.go b/vendor/cloud.google.com/go/bigtable/reader.go new file mode 100644 index 0000000..2991934 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/reader.go @@ -0,0 +1,250 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "bytes" + "fmt" + + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// A Row is returned by ReadRows. The map is keyed by column family (the prefix +// of the column name before the colon). The values are the returned ReadItems +// for that column family in the order returned by Read. +type Row map[string][]ReadItem + +// Key returns the row's key, or "" if the row is empty. +func (r Row) Key() string { + for _, items := range r { + if len(items) > 0 { + return items[0].Row + } + } + return "" +} + +// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. +type ReadItem struct { + Row, Column string + Timestamp Timestamp + Value []byte +} + +// The current state of the read rows state machine. +type rrState int64 + +const ( + newRow rrState = iota + rowInProgress + cellInProgress +) + +// chunkReader handles cell chunks from the read rows response and combines +// them into full Rows. +type chunkReader struct { + state rrState + curKey []byte + curFam string + curQual []byte + curTS int64 + curVal []byte + curRow Row + lastKey string +} + +// newChunkReader returns a new chunkReader for handling read rows responses. +func newChunkReader() *chunkReader { + return &chunkReader{state: newRow} +} + +// Process takes a cell chunk and returns a new Row if the given chunk +// completes a Row, or nil otherwise. +func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) { + var row Row + switch cr.state { + case newRow: + if err := cr.validateNewRow(cc); err != nil { + return nil, err + } + + cr.curRow = make(Row) + cr.curKey = cc.RowKey + cr.curFam = cc.FamilyName.Value + cr.curQual = cc.Qualifier.Value + cr.curTS = cc.TimestampMicros + row = cr.handleCellValue(cc) + + case rowInProgress: + if err := cr.validateRowInProgress(cc); err != nil { + return nil, err + } + + if cc.GetResetRow() { + cr.resetToNewRow() + return nil, nil + } + + if cc.FamilyName != nil { + cr.curFam = cc.FamilyName.Value + } + if cc.Qualifier != nil { + cr.curQual = cc.Qualifier.Value + } + cr.curTS = cc.TimestampMicros + row = cr.handleCellValue(cc) + + case cellInProgress: + if err := cr.validateCellInProgress(cc); err != nil { + return nil, err + } + if cc.GetResetRow() { + cr.resetToNewRow() + return nil, nil + } + row = cr.handleCellValue(cc) + } + + return row, nil +} + +// Close must be called after all cell chunks from the response +// have been processed. An error will be returned if the reader is +// in an invalid state, in which case the error should be propagated to the caller. +func (cr *chunkReader) Close() error { + if cr.state != newRow { + return fmt.Errorf("invalid state for end of stream %q", cr.state) + } + return nil +} + +// handleCellValue returns a Row if the cell value includes a commit, otherwise nil. +func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row { + if cc.ValueSize > 0 { + // ValueSize is specified so expect a split value of ValueSize bytes + if cr.curVal == nil { + cr.curVal = make([]byte, 0, cc.ValueSize) + } + cr.curVal = append(cr.curVal, cc.Value...) + cr.state = cellInProgress + } else { + // This cell is either the complete value or the last chunk of a split + if cr.curVal == nil { + cr.curVal = cc.Value + } else { + cr.curVal = append(cr.curVal, cc.Value...) + } + cr.finishCell() + + if cc.GetCommitRow() { + return cr.commitRow() + } else { + cr.state = rowInProgress + } + } + + return nil +} + +func (cr *chunkReader) finishCell() { + ri := ReadItem{ + Row: string(cr.curKey), + Column: string(cr.curFam) + ":" + string(cr.curQual), + Timestamp: Timestamp(cr.curTS), + Value: cr.curVal, + } + cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri) + cr.curVal = nil +} + +func (cr *chunkReader) commitRow() Row { + row := cr.curRow + cr.lastKey = cr.curRow.Key() + cr.resetToNewRow() + return row +} + +func (cr *chunkReader) resetToNewRow() { + cr.curKey = nil + cr.curFam = "" + cr.curQual = nil + cr.curVal = nil + cr.curRow = nil + cr.curTS = 0 + cr.state = newRow +} + +func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error { + if cc.GetResetRow() { + return fmt.Errorf("reset_row not allowed between rows") + } + if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil { + return fmt.Errorf("missing key field for new row %v", cc) + } + if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) { + return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey)) + } + return nil +} + +func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { + if err := cr.validateRowStatus(cc); err != nil { + return err + } + if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) { + return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey) + } + if cc.FamilyName != nil && cc.Qualifier == nil { + return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName) + } + return nil +} + +func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { + if err := cr.validateRowStatus(cc); err != nil { + return err + } + if cr.curVal == nil { + return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc) + } + if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) { + return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc) + } + return nil +} + +func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool { + return cc.RowKey != nil || + cc.FamilyName != nil || + cc.Qualifier != nil || + cc.TimestampMicros != 0 +} + +// Validate a RowStatus, commit or reset, if present. +func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error { + // Resets can't be specified with any other part of a cell + if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) || + cc.Value != nil || + cc.ValueSize != 0 || + cc.Labels != nil) { + return fmt.Errorf("reset must not be specified with other fields %v", cc) + } + if cc.GetCommitRow() && cc.ValueSize > 0 { + return fmt.Errorf("commit row found in between chunks in a cell") + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/reader_test.go b/vendor/cloud.google.com/go/bigtable/reader_test.go new file mode 100644 index 0000000..f202891 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/reader_test.go @@ -0,0 +1,344 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/wrappers" + btspb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// Indicates that a field in the proto should be omitted, rather than included +// as a wrapped empty string. +const nilStr = "<>" + +func TestSingleCell(t *testing.T) { + cr := newChunkReader() + + // All in one cell + row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + if len(row["fm"]) != 1 { + t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"])) + } + want := []ReadItem{ri("rk", "fm", "col", 1, "value")} + if !testutil.Equal(row["fm"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestMultipleCells(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) + cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) + cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false)) + row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + + want := []ReadItem{ + ri("rs", "fm1", "col1", 0, "val1"), + ri("rs", "fm1", "col1", 1, "val2"), + ri("rs", "fm1", "col2", 0, "val3"), + } + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + want = []ReadItem{ + ri("rs", "fm2", "col1", 0, "val4"), + ri("rs", "fm2", "col2", 1, "extralongval5"), + } + if !testutil.Equal(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestSplitCells(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false)) + cr.Process(ccData("world", 0, false)) + row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + + want := []ReadItem{ + ri("rs", "fm1", "col1", 0, "hello world"), + ri("rs", "fm1", "col2", 0, "val2"), + } + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestMultipleRows(t *testing.T) { + cr := newChunkReader() + + row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + + row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} + if !testutil.Equal(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestBlankQualifier(t *testing.T) { + cr := newChunkReader() + + row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")} + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + + row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} + if !testutil.Equal(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestReset(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) + cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) + cr.Process(ccReset()) + row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) + want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestNewFamEmptyQualifier(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + _, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true)) + if err == nil { + t.Fatalf("Expected error on second chunk with no qualifier set") + } +} + +// The read rows acceptance test reads a json file specifying a number of tests, +// each consisting of one or more cell chunk text protos and one or more resulting +// cells or errors. +type AcceptanceTest struct { + Tests []TestCase `json:"tests"` +} + +type TestCase struct { + Name string `json:"name"` + Chunks []string `json:"chunks"` + Results []TestResult `json:"results"` +} + +type TestResult struct { + RK string `json:"rk"` + FM string `json:"fm"` + Qual string `json:"qual"` + TS int64 `json:"ts"` + Value string `json:"value"` + Error bool `json:"error"` // If true, expect an error. Ignore any other field. +} + +func TestAcceptance(t *testing.T) { + testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json") + if err != nil { + t.Fatalf("could not open acceptance test file %v", err) + } + + var accTest AcceptanceTest + err = json.Unmarshal(testJson, &accTest) + if err != nil { + t.Fatalf("could not parse acceptance test file: %v", err) + } + + for _, test := range accTest.Tests { + runTestCase(t, test) + } +} + +func runTestCase(t *testing.T, test TestCase) { + // Increment an index into the result array as we get results + cr := newChunkReader() + var results []TestResult + var seenErr bool + for _, chunkText := range test.Chunks { + // Parse and pass each cell chunk to the ChunkReader + cc := &btspb.ReadRowsResponse_CellChunk{} + err := proto.UnmarshalText(chunkText, cc) + if err != nil { + t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err) + return + } + row, err := cr.Process(cc) + if err != nil { + results = append(results, TestResult{Error: true}) + seenErr = true + break + } else { + // Turn the Row into TestResults + for fm, ris := range row { + for _, ri := range ris { + tr := TestResult{ + RK: ri.Row, + FM: fm, + Qual: strings.Split(ri.Column, ":")[1], + TS: int64(ri.Timestamp), + Value: string(ri.Value), + } + results = append(results, tr) + } + } + } + } + + // Only Close if we don't have an error yet, otherwise Close: is expected. + if !seenErr { + err := cr.Close() + if err != nil { + results = append(results, TestResult{Error: true}) + } + } + + got := toSet(results) + want := toSet(test.Results) + if !testutil.Equal(got, want) { + t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want) + } +} + +func toSet(res []TestResult) map[TestResult]bool { + set := make(map[TestResult]bool) + for _, tr := range res { + set[tr] = true + } + return set +} + +// ri returns a ReadItem for the given components +func ri(rk string, fm string, qual string, ts int64, val string) ReadItem { + return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)} +} + +// cc returns a CellChunk proto +func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { + // The components of the cell key are wrapped and can be null or empty + var rkWrapper []byte + if rk == nilStr { + rkWrapper = nil + } else { + rkWrapper = []byte(rk) + } + + var fmWrapper *wrappers.StringValue + if fm != nilStr { + fmWrapper = &wrappers.StringValue{Value: fm} + } else { + fmWrapper = nil + } + + var qualWrapper *wrappers.BytesValue + if qual != nilStr { + qualWrapper = &wrappers.BytesValue{Value: []byte(qual)} + } else { + qualWrapper = nil + } + + return &btspb.ReadRowsResponse_CellChunk{ + RowKey: rkWrapper, + FamilyName: fmWrapper, + Qualifier: qualWrapper, + TimestampMicros: ts, + Value: []byte(val), + ValueSize: size, + RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}} +} + +// ccData returns a CellChunk with only a value and size +func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { + return cc(nilStr, nilStr, nilStr, 0, val, size, commit) +} + +// ccReset returns a CellChunk with RestRow set to true +func ccReset() *btspb.ReadRowsResponse_CellChunk { + return &btspb.ReadRowsResponse_CellChunk{ + RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}} +} diff --git a/vendor/cloud.google.com/go/bigtable/retry_test.go b/vendor/cloud.google.com/go/bigtable/retry_test.go new file mode 100644 index 0000000..03a9389 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/retry_test.go @@ -0,0 +1,372 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bigtable + +import ( + "strings" + "testing" + "time" + + "cloud.google.com/go/bigtable/bttest" + "cloud.google.com/go/internal/testutil" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/go-cmp/cmp" + "golang.org/x/net/context" + "google.golang.org/api/option" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + rpcpb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) { + srv, err := bttest.NewServer("localhost:0", opt...) + if err != nil { + return nil, nil, err + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, nil, err + } + + client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock())) + if err != nil { + return nil, nil, err + } + + adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock())) + if err != nil { + return nil, nil, err + } + if err := adminClient.CreateTable(context.Background(), "table"); err != nil { + return nil, nil, err + } + if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil { + return nil, nil, err + } + t := client.Open("table") + + cleanupFunc := func() { + adminClient.Close() + client.Close() + srv.Close() + } + return t, cleanupFunc, nil +} + +func TestRetryApply(t *testing.T) { + ctx := context.Background() + + errCount := 0 + code := codes.Unavailable // Will be retried + // Intercept requests and return an error or defer to the underlying handler + errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 { + errCount++ + return nil, status.Errorf(code, "") + } + return handler(ctx, req) + } + tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector)) + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + defer cleanup() + + mut := NewMutation() + mut.Set("cf", "col", 1, []byte("val")) + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("applying single mutation with retries: %v", err) + } + row, err := tbl.ReadRow(ctx, "row1") + if err != nil { + t.Errorf("reading single value with retries: %v", err) + } + if row == nil { + t.Errorf("applying single mutation with retries: could not read back row") + } + + code = codes.FailedPrecondition // Won't be retried + errCount = 0 + if err := tbl.Apply(ctx, "row", mut); err == nil { + t.Errorf("applying single mutation with no retries: no error") + } + + // Check and mutate + mutTrue := NewMutation() + mutTrue.DeleteRow() + mutFalse := NewMutation() + mutFalse.Set("cf", "col", 1, []byte("val")) + condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse) + + errCount = 0 + code = codes.Unavailable // Will be retried + if err := tbl.Apply(ctx, "row1", condMut); err != nil { + t.Errorf("conditionally mutating row with retries: %v", err) + } + row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table + if err != nil { + t.Errorf("reading single value after conditional mutation: %v", err) + } + if row != nil { + t.Errorf("reading single value after conditional mutation: row not deleted") + } + + errCount = 0 + code = codes.FailedPrecondition // Won't be retried + if err := tbl.Apply(ctx, "row", condMut); err == nil { + t.Errorf("conditionally mutating row with no retries: no error") + } +} + +func TestRetryApplyBulk(t *testing.T) { + ctx := context.Background() + + // Intercept requests and delegate to an interceptor defined by the test case + errCount := 0 + var f func(grpc.ServerStream) error + errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if strings.HasSuffix(info.FullMethod, "MutateRows") { + return f(ss) + } + return handler(ctx, ss) + } + + tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + errCount = 0 + // Test overall request failure and retries + f = func(ss grpc.ServerStream) error { + if errCount < 3 { + errCount++ + return status.Errorf(codes.Aborted, "") + } + return nil + } + mut := NewMutation() + mut.Set("cf", "col", 1, []byte{}) + errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut}) + if errors != nil || err != nil { + t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err) + } + + // Test failures and retries in one request + errCount = 0 + m1 := NewMutation() + m1.Set("cf", "col", 1, []byte{}) + m2 := NewMutation() + m2.Set("cf", "col2", 1, []byte{}) + m3 := NewMutation() + m3.Set("cf", "col3", 1, []byte{}) + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.MutateRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Retryable request failure + err = status.Errorf(codes.Unavailable, "") + case 1: + // Two mutations fail + writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted) + err = nil + case 2: + // Two failures were retried. One will succeed. + if want, got := 2, len(req.Entries); want != got { + t.Errorf("2 bulk retries, got: %d, want %d", got, want) + } + writeMutateRowsResponse(ss, codes.OK, codes.Aborted) + err = nil + case 3: + // One failure was retried and will succeed. + if want, got := 1, len(req.Entries); want != got { + t.Errorf("1 bulk retry, got: %d, want %d", got, want) + } + writeMutateRowsResponse(ss, codes.OK) + err = nil + } + errCount++ + return err + } + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) + if errors != nil || err != nil { + t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err) + } + + // Test unretryable errors + niMut := NewMutation() + niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent + errCount = 0 + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.MutateRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Give non-idempotent mutation a retryable error code. + // Nothing should be retried. + writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted) + err = nil + case 1: + t.Errorf("unretryable errors: got one retry, want no retries") + } + errCount++ + return err + } + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut}) + if err != nil { + t.Errorf("unretryable errors: request failed %v", err) + } + want := []error{ + status.Errorf(codes.FailedPrecondition, ""), + status.Errorf(codes.Aborted, ""), + } + if !testutil.Equal(want, errors) { + t.Errorf("unretryable errors: got: %v, want: %v", errors, want) + } + + // Test individual errors and a deadline exceeded + f = func(ss grpc.ServerStream) error { + writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted) + return nil + } + ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond) + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) + wantErr := context.DeadlineExceeded + if wantErr != err { + t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr) + } + if errors != nil { + t.Errorf("deadline exceeded errors: got: %v, want: nil", err) + } +} + +func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error { + res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))} + for i, code := range codes { + res.Entries[i] = &btpb.MutateRowsResponse_Entry{ + Index: int64(i), + Status: &rpcpb.Status{Code: int32(code), Message: ""}, + } + } + return ss.SendMsg(res) +} + +func TestRetainRowsAfter(t *testing.T) { + prevRowRange := NewRange("a", "z") + prevRowKey := "m" + want := NewRange("m\x00", "z") + got := prevRowRange.retainRowsAfter(prevRowKey) + if !testutil.Equal(want, got, cmp.AllowUnexported(RowRange{})) { + t.Errorf("range retry: got %v, want %v", got, want) + } + + prevRowRangeList := RowRangeList{NewRange("a", "d"), NewRange("e", "g"), NewRange("h", "l")} + prevRowKey = "f" + wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")} + got = prevRowRangeList.retainRowsAfter(prevRowKey) + if !testutil.Equal(wantRowRangeList, got, cmp.AllowUnexported(RowRange{})) { + t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList) + } + + prevRowList := RowList{"a", "b", "c", "d", "e", "f"} + prevRowKey = "b" + wantList := RowList{"c", "d", "e", "f"} + got = prevRowList.retainRowsAfter(prevRowKey) + if !testutil.Equal(wantList, got) { + t.Errorf("list retry: got %v, want %v", got, wantList) + } +} + +func TestRetryReadRows(t *testing.T) { + ctx := context.Background() + + // Intercept requests and delegate to an interceptor defined by the test case + errCount := 0 + var f func(grpc.ServerStream) error + errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if strings.HasSuffix(info.FullMethod, "ReadRows") { + return f(ss) + } + return handler(ctx, ss) + } + + tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + errCount = 0 + // Test overall request failure and retries + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.ReadRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Retryable request failure + err = status.Errorf(codes.Unavailable, "") + case 1: + // Write two rows then error + if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { + t.Errorf("first retry, no data received yet: got %q, want %q", got, want) + } + writeReadRowsResponse(ss, "a", "b") + err = status.Errorf(codes.Unavailable, "") + case 2: + // Retryable request failure + if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { + t.Errorf("2 range retries: got %q, want %q", got, want) + } + err = status.Errorf(codes.Unavailable, "") + case 3: + // Write two more rows + writeReadRowsResponse(ss, "c", "d") + err = nil + } + errCount++ + return err + } + + var got []string + tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool { + got = append(got, r.Key()) + return true + }) + want := []string{"a", "b", "c", "d"} + if !testutil.Equal(got, want) { + t.Errorf("retry range integration: got %v, want %v", got, want) + } +} + +func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error { + var chunks []*btpb.ReadRowsResponse_CellChunk + for _, key := range rowKeys { + chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(key), + FamilyName: &wrappers.StringValue{Value: "fm"}, + Qualifier: &wrappers.BytesValue{Value: []byte("col")}, + RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}, + }) + } + return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks}) +} diff --git a/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json b/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json new file mode 100644 index 0000000..4973831 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json @@ -0,0 +1,1178 @@ +{ + "tests": [ + { + "name": "invalid - no commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before commit", + "chunks": [ + "commit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before value", + "chunks": [ + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new col family must specify qualifier", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "bare commit implies ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "simple row with timestamp", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "missing timestamp, implied ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "empty cell value", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "two unsplit cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two qualifiers", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two families", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "with labels", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "L_2", + "error": false + } + ] + }, + { + "name": "split cell, bare commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "split cell", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "split four ways", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"ue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "L", + "error": false + } + ] + }, + { + "name": "two split cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier multi-split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-family split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - no commit between rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no commit after first row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - last row missing commit", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - duplicate row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new row missing row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "two rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows implicit timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows empty value", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, one with multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "E", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "F", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells, multiple families", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "M", + "qual": "O", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "N", + "qual": "P", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, four cells, 2 labels", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "L_3", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows with splits, same timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - bare reset", + "chunks": [ + "reset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - bad reset, no commit", + "chunks": [ + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - missing key after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "no data after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n" + ], + "results": null + }, + { + "name": "simple reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new val", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new qual", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "reset with splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two resets", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset then two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "B", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset in between chunks", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - reset with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\nreset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - commit with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "empty cell chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + } + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/civil/civil.go b/vendor/cloud.google.com/go/civil/civil.go new file mode 100644 index 0000000..1cb2675 --- /dev/null +++ b/vendor/cloud.google.com/go/civil/civil.go @@ -0,0 +1,277 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package civil + +import ( + "fmt" + "time" +) + +// A Date represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type Date struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// DateOf returns the Date in which a time occurs in that time's location. +func DateOf(t time.Time) Date { + var d Date + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseDate(s string) (Date, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return Date{}, err + } + return DateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d Date) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d Date) IsValid() bool { + return DateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.Date, even when time.Date returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d Date) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d Date) AddDays(n int) Date { + return DateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d Date) DaysSince(s Date) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 Date) Before(d2 Date) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 Date) After(d2 Date) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseDate. +func (d *Date) UnmarshalText(data []byte) error { + var err error + *d, err = ParseDate(string(data)) + return err +} + +// A Time represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type. +type Time struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// TimeOf returns the Time representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func TimeOf(t time.Time) Time { + var tm Time + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseTime parses a string and returns the time value it represents. +// ParseTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseTime(s string) (Time, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return Time{}, err + } + return TimeOf(t), nil +} + +// String returns the date in the format described in ParseTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t Time) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t Time) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return TimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t Time) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseTime. +func (t *Time) UnmarshalText(data []byte) error { + var err error + *t, err = ParseTime(string(data)) + return err +} + +// A DateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type DateTime struct { + Date Date + Time Time +} + +// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub. + +// DateTimeOf returns the DateTime in which a time occurs in that time's location. +func DateTimeOf(t time.Time) DateTime { + return DateTime{ + Date: DateOf(t), + Time: TimeOf(t), + } +} + +// ParseDateTime parses a string and returns the DateTime it represents. +// ParseDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseDateTime(s string) (DateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return DateTime{}, err + } + } + return DateTimeOf(t), nil +} + +// String returns the date in the format described in ParseDate. +func (dt DateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt DateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the DateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.Date. For example, if loc is America/Indiana/Vincennes, then +// both +// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.DateTime{ +// civil.Date{Year: 1955, Month: time.May, Day: 1}}, +// civil.Time{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt DateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 DateTime) Before(dt2 DateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 DateTime) After(dt2 DateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt DateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseDateTime +func (dt *DateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseDateTime(string(data)) + return err +} diff --git a/vendor/cloud.google.com/go/civil/civil_test.go b/vendor/cloud.google.com/go/civil/civil_test.go new file mode 100644 index 0000000..f50899c --- /dev/null +++ b/vendor/cloud.google.com/go/civil/civil_test.go @@ -0,0 +1,442 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package civil + +import ( + "encoding/json" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +func TestDates(t *testing.T) { + for _, test := range []struct { + date Date + loc *time.Location + wantStr string + wantTime time.Time + }{ + { + date: Date{2014, 7, 29}, + loc: time.Local, + wantStr: "2014-07-29", + wantTime: time.Date(2014, time.July, 29, 0, 0, 0, 0, time.Local), + }, + { + date: DateOf(time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local)), + loc: time.UTC, + wantStr: "2014-08-20", + wantTime: time.Date(2014, 8, 20, 0, 0, 0, 0, time.UTC), + }, + { + date: DateOf(time.Date(999, time.January, 26, 0, 0, 0, 0, time.Local)), + loc: time.UTC, + wantStr: "0999-01-26", + wantTime: time.Date(999, 1, 26, 0, 0, 0, 0, time.UTC), + }, + } { + if got := test.date.String(); got != test.wantStr { + t.Errorf("%#v.String() = %q, want %q", test.date, got, test.wantStr) + } + if got := test.date.In(test.loc); !got.Equal(test.wantTime) { + t.Errorf("%#v.In(%v) = %v, want %v", test.date, test.loc, got, test.wantTime) + } + } +} + +func TestDateIsValid(t *testing.T) { + for _, test := range []struct { + date Date + want bool + }{ + {Date{2014, 7, 29}, true}, + {Date{2000, 2, 29}, true}, + {Date{10000, 12, 31}, true}, + {Date{1, 1, 1}, true}, + {Date{0, 1, 1}, true}, // year zero is OK + {Date{-1, 1, 1}, true}, // negative year is OK + {Date{1, 0, 1}, false}, + {Date{1, 1, 0}, false}, + {Date{2016, 1, 32}, false}, + {Date{2016, 13, 1}, false}, + {Date{1, -1, 1}, false}, + {Date{1, 1, -1}, false}, + } { + got := test.date.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.date, got, test.want) + } + } +} + +func TestParseDate(t *testing.T) { + for _, test := range []struct { + str string + want Date // if empty, expect an error + }{ + {"2016-01-02", Date{2016, 1, 2}}, + {"2016-12-31", Date{2016, 12, 31}}, + {"0003-02-04", Date{3, 2, 4}}, + {"999-01-26", Date{}}, + {"", Date{}}, + {"2016-01-02x", Date{}}, + } { + got, err := ParseDate(test.str) + if got != test.want { + t.Errorf("ParseDate(%q) = %+v, want %+v", test.str, got, test.want) + } + if err != nil && test.want != (Date{}) { + t.Errorf("Unexpected error %v from ParseDate(%q)", err, test.str) + } + } +} + +func TestDateArithmetic(t *testing.T) { + for _, test := range []struct { + desc string + start Date + end Date + days int + }{ + { + desc: "zero days noop", + start: Date{2014, 5, 9}, + end: Date{2014, 5, 9}, + days: 0, + }, + { + desc: "crossing a year boundary", + start: Date{2014, 12, 31}, + end: Date{2015, 1, 1}, + days: 1, + }, + { + desc: "negative number of days", + start: Date{2015, 1, 1}, + end: Date{2014, 12, 31}, + days: -1, + }, + { + desc: "full leap year", + start: Date{2004, 1, 1}, + end: Date{2005, 1, 1}, + days: 366, + }, + { + desc: "full non-leap year", + start: Date{2001, 1, 1}, + end: Date{2002, 1, 1}, + days: 365, + }, + { + desc: "crossing a leap second", + start: Date{1972, 6, 30}, + end: Date{1972, 7, 1}, + days: 1, + }, + { + desc: "dates before the unix epoch", + start: Date{101, 1, 1}, + end: Date{102, 1, 1}, + days: 365, + }, + } { + if got := test.start.AddDays(test.days); got != test.end { + t.Errorf("[%s] %#v.AddDays(%v) = %#v, want %#v", test.desc, test.start, test.days, got, test.end) + } + if got := test.end.DaysSince(test.start); got != test.days { + t.Errorf("[%s] %#v.Sub(%#v) = %v, want %v", test.desc, test.end, test.start, got, test.days) + } + } +} + +func TestDateBefore(t *testing.T) { + for _, test := range []struct { + d1, d2 Date + want bool + }{ + {Date{2016, 12, 31}, Date{2017, 1, 1}, true}, + {Date{2016, 1, 1}, Date{2016, 1, 1}, false}, + {Date{2016, 12, 30}, Date{2016, 12, 31}, true}, + } { + if got := test.d1.Before(test.d2); got != test.want { + t.Errorf("%v.Before(%v): got %t, want %t", test.d1, test.d2, got, test.want) + } + } +} + +func TestDateAfter(t *testing.T) { + for _, test := range []struct { + d1, d2 Date + want bool + }{ + {Date{2016, 12, 31}, Date{2017, 1, 1}, false}, + {Date{2016, 1, 1}, Date{2016, 1, 1}, false}, + {Date{2016, 12, 30}, Date{2016, 12, 31}, false}, + } { + if got := test.d1.After(test.d2); got != test.want { + t.Errorf("%v.After(%v): got %t, want %t", test.d1, test.d2, got, test.want) + } + } +} + +func TestTimeToString(t *testing.T) { + for _, test := range []struct { + str string + time Time + roundTrip bool // ParseTime(str).String() == str? + }{ + {"13:26:33", Time{13, 26, 33, 0}, true}, + {"01:02:03.000023456", Time{1, 2, 3, 23456}, true}, + {"00:00:00.000000001", Time{0, 0, 0, 1}, true}, + {"13:26:03.1", Time{13, 26, 3, 100000000}, false}, + {"13:26:33.0000003", Time{13, 26, 33, 300}, false}, + } { + gotTime, err := ParseTime(test.str) + if err != nil { + t.Errorf("ParseTime(%q): got error: %v", test.str, err) + continue + } + if gotTime != test.time { + t.Errorf("ParseTime(%q) = %+v, want %+v", test.str, gotTime, test.time) + } + if test.roundTrip { + gotStr := test.time.String() + if gotStr != test.str { + t.Errorf("%#v.String() = %q, want %q", test.time, gotStr, test.str) + } + } + } +} + +func TestTimeOf(t *testing.T) { + for _, test := range []struct { + time time.Time + want Time + }{ + {time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), Time{15, 8, 43, 1}}, + {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), Time{0, 0, 0, 0}}, + } { + if got := TimeOf(test.time); got != test.want { + t.Errorf("TimeOf(%v) = %+v, want %+v", test.time, got, test.want) + } + } +} + +func TestTimeIsValid(t *testing.T) { + for _, test := range []struct { + time Time + want bool + }{ + {Time{0, 0, 0, 0}, true}, + {Time{23, 0, 0, 0}, true}, + {Time{23, 59, 59, 999999999}, true}, + {Time{24, 59, 59, 999999999}, false}, + {Time{23, 60, 59, 999999999}, false}, + {Time{23, 59, 60, 999999999}, false}, + {Time{23, 59, 59, 1000000000}, false}, + {Time{-1, 0, 0, 0}, false}, + {Time{0, -1, 0, 0}, false}, + {Time{0, 0, -1, 0}, false}, + {Time{0, 0, 0, -1}, false}, + } { + got := test.time.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.time, got, test.want) + } + } +} + +func TestDateTimeToString(t *testing.T) { + for _, test := range []struct { + str string + dateTime DateTime + roundTrip bool // ParseDateTime(str).String() == str? + }{ + {"2016-03-22T13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, true}, + {"2016-03-22T13:26:33.000000600", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 600}}, true}, + {"2016-03-22t13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, false}, + } { + gotDateTime, err := ParseDateTime(test.str) + if err != nil { + t.Errorf("ParseDateTime(%q): got error: %v", test.str, err) + continue + } + if gotDateTime != test.dateTime { + t.Errorf("ParseDateTime(%q) = %+v, want %+v", test.str, gotDateTime, test.dateTime) + } + if test.roundTrip { + gotStr := test.dateTime.String() + if gotStr != test.str { + t.Errorf("%#v.String() = %q, want %q", test.dateTime, gotStr, test.str) + } + } + } +} + +func TestParseDateTimeErrors(t *testing.T) { + for _, str := range []string{ + "", + "2016-03-22", // just a date + "13:26:33", // just a time + "2016-03-22 13:26:33", // wrong separating character + "2016-03-22T13:26:33x", // extra at end + } { + if _, err := ParseDateTime(str); err == nil { + t.Errorf("ParseDateTime(%q) succeeded, want error", str) + } + } +} + +func TestDateTimeOf(t *testing.T) { + for _, test := range []struct { + time time.Time + want DateTime + }{ + {time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), + DateTime{Date{2014, 8, 20}, Time{15, 8, 43, 1}}}, + {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), + DateTime{Date{1, 1, 1}, Time{0, 0, 0, 0}}}, + } { + if got := DateTimeOf(test.time); got != test.want { + t.Errorf("DateTimeOf(%v) = %+v, want %+v", test.time, got, test.want) + } + } +} + +func TestDateTimeIsValid(t *testing.T) { + // No need to be exhaustive here; it's just Date.IsValid && Time.IsValid. + for _, test := range []struct { + dt DateTime + want bool + }{ + {DateTime{Date{2016, 3, 20}, Time{0, 0, 0, 0}}, true}, + {DateTime{Date{2016, -3, 20}, Time{0, 0, 0, 0}}, false}, + {DateTime{Date{2016, 3, 20}, Time{24, 0, 0, 0}}, false}, + } { + got := test.dt.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.dt, got, test.want) + } + } +} + +func TestDateTimeIn(t *testing.T) { + dt := DateTime{Date{2016, 1, 2}, Time{3, 4, 5, 6}} + got := dt.In(time.UTC) + want := time.Date(2016, 1, 2, 3, 4, 5, 6, time.UTC) + if !got.Equal(want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDateTimeBefore(t *testing.T) { + d1 := Date{2016, 12, 31} + d2 := Date{2017, 1, 1} + t1 := Time{5, 6, 7, 8} + t2 := Time{5, 6, 7, 9} + for _, test := range []struct { + dt1, dt2 DateTime + want bool + }{ + {DateTime{d1, t1}, DateTime{d2, t1}, true}, + {DateTime{d1, t1}, DateTime{d1, t2}, true}, + {DateTime{d2, t1}, DateTime{d1, t1}, false}, + {DateTime{d2, t1}, DateTime{d2, t1}, false}, + } { + if got := test.dt1.Before(test.dt2); got != test.want { + t.Errorf("%v.Before(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) + } + } +} + +func TestDateTimeAfter(t *testing.T) { + d1 := Date{2016, 12, 31} + d2 := Date{2017, 1, 1} + t1 := Time{5, 6, 7, 8} + t2 := Time{5, 6, 7, 9} + for _, test := range []struct { + dt1, dt2 DateTime + want bool + }{ + {DateTime{d1, t1}, DateTime{d2, t1}, false}, + {DateTime{d1, t1}, DateTime{d1, t2}, false}, + {DateTime{d2, t1}, DateTime{d1, t1}, true}, + {DateTime{d2, t1}, DateTime{d2, t1}, false}, + } { + if got := test.dt1.After(test.dt2); got != test.want { + t.Errorf("%v.After(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) + } + } +} + +func TestMarshalJSON(t *testing.T) { + for _, test := range []struct { + value interface{} + want string + }{ + {Date{1987, 4, 15}, `"1987-04-15"`}, + {Time{18, 54, 2, 0}, `"18:54:02"`}, + {DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}, `"1987-04-15T18:54:02"`}, + } { + bgot, err := json.Marshal(test.value) + if err != nil { + t.Fatal(err) + } + if got := string(bgot); got != test.want { + t.Errorf("%#v: got %s, want %s", test.value, got, test.want) + } + } +} + +func TestUnmarshalJSON(t *testing.T) { + var d Date + var tm Time + var dt DateTime + for _, test := range []struct { + data string + ptr interface{} + want interface{} + }{ + {`"1987-04-15"`, &d, &Date{1987, 4, 15}}, + {`"1987-04-\u0031\u0035"`, &d, &Date{1987, 4, 15}}, + {`"18:54:02"`, &tm, &Time{18, 54, 2, 0}}, + {`"1987-04-15T18:54:02"`, &dt, &DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}}, + } { + if err := json.Unmarshal([]byte(test.data), test.ptr); err != nil { + t.Fatalf("%s: %v", test.data, err) + } + if !cmp.Equal(test.ptr, test.want) { + t.Errorf("%s: got %#v, want %#v", test.data, test.ptr, test.want) + } + } + + for _, bad := range []string{"", `""`, `"bad"`, `"1987-04-15x"`, + `19870415`, // a JSON number + `11987-04-15x`, // not a JSON string + + } { + if json.Unmarshal([]byte(bad), &d) == nil { + t.Errorf("%q, Date: got nil, want error", bad) + } + if json.Unmarshal([]byte(bad), &tm) == nil { + t.Errorf("%q, Time: got nil, want error", bad) + } + if json.Unmarshal([]byte(bad), &dt) == nil { + t.Errorf("%q, DateTime: got nil, want error", bad) + } + } +} diff --git a/vendor/cloud.google.com/go/cloud.go b/vendor/cloud.google.com/go/cloud.go new file mode 100644 index 0000000..0be0df3 --- /dev/null +++ b/vendor/cloud.google.com/go/cloud.go @@ -0,0 +1,40 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package cloud is the root of the packages used to access Google Cloud +Services. See https://godoc.org/cloud.google.com/go for a full list +of sub-packages. + +Examples in this package show ways to authorize and authenticate the +sub packages. + +Connection Pooling + +Connection pooling differs in clients based on their transport. Cloud +clients either rely on HTTP or gRPC transports to communicate +with Google Cloud. + +Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the +underlying HTTP transport to cache connections for later re-use. These are cached to +the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in +http.DefaultTransport. + +For gPRC clients (all others in this repo), connection pooling is configurable. Users +of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client +option to NewClient calls. This configures the underlying gRPC connections to be +pooled and addressed in a round robin fashion. + +*/ +package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go new file mode 100644 index 0000000..cbed2be --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go @@ -0,0 +1,450 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,go1.7 + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "math/rand" + "os" + "sync" + "time" + + "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints" + debuglet "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller" + "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector" + "cloud.google.com/go/compute/metadata" + "golang.org/x/debug" + "golang.org/x/debug/local" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + cd "google.golang.org/api/clouddebugger/v2" +) + +var ( + appModule = flag.String("appmodule", "", "Optional application module name.") + appVersion = flag.String("appversion", "", "Optional application module version name.") + sourceContextFile = flag.String("sourcecontext", "", "File containing JSON-encoded source context.") + verbose = flag.Bool("v", false, "Output verbose log messages.") + projectNumber = flag.String("projectnumber", "", "Project number."+ + " If this is not set, it is read from the GCP metadata server.") + projectID = flag.String("projectid", "", "Project ID."+ + " If this is not set, it is read from the GCP metadata server.") + serviceAccountFile = flag.String("serviceaccountfile", "", "File containing JSON service account credentials.") +) + +const ( + maxCapturedStackFrames = 50 + maxCapturedVariables = 1000 +) + +func main() { + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) == 0 { + // The user needs to supply the name of the executable to run. + flag.Usage() + return + } + if *projectNumber == "" { + var err error + *projectNumber, err = metadata.NumericProjectID() + if err != nil { + log.Print("Debuglet initialization: ", err) + } + } + if *projectID == "" { + var err error + *projectID, err = metadata.ProjectID() + if err != nil { + log.Print("Debuglet initialization: ", err) + } + } + sourceContexts, err := readSourceContextFile(*sourceContextFile) + if err != nil { + log.Print("Reading source context file: ", err) + } + var ts oauth2.TokenSource + ctx := context.Background() + if *serviceAccountFile != "" { + if ts, err = serviceAcctTokenSource(ctx, *serviceAccountFile, cd.CloudDebuggerScope); err != nil { + log.Fatalf("Error getting credentials from file %s: %v", *serviceAccountFile, err) + } + } else if ts, err = google.DefaultTokenSource(ctx, cd.CloudDebuggerScope); err != nil { + log.Print("Error getting application default credentials for Cloud Debugger:", err) + os.Exit(103) + } + c, err := debuglet.NewController(ctx, debuglet.Options{ + ProjectNumber: *projectNumber, + ProjectID: *projectID, + AppModule: *appModule, + AppVersion: *appVersion, + SourceContexts: sourceContexts, + Verbose: *verbose, + TokenSource: ts, + }) + if err != nil { + log.Fatal("Error connecting to Cloud Debugger: ", err) + } + prog, err := local.New(args[0]) + if err != nil { + log.Fatal("Error loading program: ", err) + } + // Load the program, but don't actually start it running yet. + if _, err = prog.Run(args[1:]...); err != nil { + log.Fatal("Error loading program: ", err) + } + bs := breakpoints.NewBreakpointStore(prog) + + // Seed the random number generator. + rand.Seed(time.Now().UnixNano()) + + // Now we want to do two things: run the user's program, and start sending + // List requests periodically to the Debuglet Controller to get breakpoints + // to set. + // + // We want to give the Debuglet Controller a chance to give us breakpoints + // before we start the program, otherwise we would miss any breakpoint + // triggers that occur during program startup -- for example, a breakpoint on + // the first line of main. But if the Debuglet Controller is not responding or + // is returning errors, we don't want to delay starting the program + // indefinitely. + // + // We pass a channel to breakpointListLoop, which will close it when the first + // List call finishes. Then we wait until either the channel is closed or a + // 5-second timer has finished before starting the program. + ch := make(chan bool) + // Start a goroutine that sends List requests to the Debuglet Controller, and + // sets any breakpoints it gets back. + go breakpointListLoop(ctx, c, bs, ch) + // Wait until 5 seconds have passed or breakpointListLoop has closed ch. + select { + case <-time.After(5 * time.Second): + case <-ch: + } + // Run the debuggee. + programLoop(ctx, c, bs, prog) +} + +// usage prints a usage message to stderr and exits. +func usage() { + me := "a.out" + if len(os.Args) >= 1 { + me = os.Args[0] + } + fmt.Fprintf(os.Stderr, "Usage of %s:\n", me) + fmt.Fprintf(os.Stderr, "\t%s [flags...] -- args...\n", me) + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, + "See https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine for more information.\n") + os.Exit(2) +} + +// readSourceContextFile reads a JSON-encoded source context from the given file. +// It returns a non-empty slice on success. +func readSourceContextFile(filename string) ([]*cd.SourceContext, error) { + if filename == "" { + return nil, nil + } + scJSON, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("reading file %q: %v", filename, err) + } + var sc cd.SourceContext + if err = json.Unmarshal(scJSON, &sc); err != nil { + return nil, fmt.Errorf("parsing file %q: %v", filename, err) + } + return []*cd.SourceContext{&sc}, nil +} + +// breakpointListLoop repeatedly calls the Debuglet Controller's List RPC, and +// passes the results to the BreakpointStore so it can set and unset breakpoints +// in the program. +// +// After the first List call finishes, ch is closed. +func breakpointListLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) { + const ( + avgTimeBetweenCalls = time.Second + errorDelay = 5 * time.Second + ) + + // randomDuration returns a random duration with expected value avg. + randomDuration := func(avg time.Duration) time.Duration { + return time.Duration(rand.Int63n(int64(2*avg + 1))) + } + + var consecutiveFailures uint + + for { + callStart := time.Now() + resp, err := c.List(ctx) + if err != nil && err != debuglet.ErrListUnchanged { + log.Printf("Debuglet controller server error: %v", err) + } + if err == nil { + bs.ProcessBreakpointList(resp.Breakpoints) + } + + if first != nil { + // We've finished one call to List and set any breakpoints we received. + close(first) + first = nil + } + + // Asynchronously send updates for any breakpoints that caused an error when + // the BreakpointStore tried to process them. We don't wait for the update + // to finish before the program can exit, as we do for normal updates. + errorBps := bs.ErrorBreakpoints() + for _, bp := range errorBps { + go func(bp *cd.Breakpoint) { + if err := c.Update(ctx, bp.Id, bp); err != nil { + log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) + } + }(bp) + } + + // Make the next call not too soon after the one we just did. + delay := randomDuration(avgTimeBetweenCalls) + + // If the call returned an error other than ErrListUnchanged, wait longer. + if err != nil && err != debuglet.ErrListUnchanged { + // Wait twice as long after each consecutive failure, to a maximum of 16x. + delay += randomDuration(errorDelay * (1 << consecutiveFailures)) + if consecutiveFailures < 4 { + consecutiveFailures++ + } + } else { + consecutiveFailures = 0 + } + + // Sleep until we reach time callStart+delay. If we've already passed that + // time, time.Sleep will return immediately -- this should be the common + // case, since the server will delay responding to List for a while when + // there are no changes to report. + time.Sleep(callStart.Add(delay).Sub(time.Now())) + } +} + +// programLoop runs the program being debugged to completion. When a breakpoint's +// conditions are satisfied, it sends an Update RPC to the Debuglet Controller. +// The function returns when the program exits and all Update RPCs have finished. +func programLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) { + var wg sync.WaitGroup + for { + // Run the program until it hits a breakpoint or exits. + status, err := prog.Resume() + if err != nil { + break + } + + // Get the breakpoints at this address whose conditions were satisfied, + // and remove the ones that aren't logpoints. + bps := bs.BreakpointsAtPC(status.PC) + bps = bpsWithConditionSatisfied(bps, prog) + for _, bp := range bps { + if bp.Action != "LOG" { + bs.RemoveBreakpoint(bp) + } + } + + if len(bps) == 0 { + continue + } + + // Evaluate expressions and get the stack. + vc := valuecollector.NewCollector(prog, maxCapturedVariables) + needStackFrames := false + for _, bp := range bps { + // If evaluating bp's condition didn't return an error, evaluate bp's + // expressions, and later get the stack frames. + if bp.Status == nil { + bp.EvaluatedExpressions = expressionValues(bp.Expressions, prog, vc) + needStackFrames = true + } + } + var ( + stack []*cd.StackFrame + stackFramesStatusMessage *cd.StatusMessage + ) + if needStackFrames { + stack, stackFramesStatusMessage = stackFrames(prog, vc) + } + + // Read variable values from the program. + variableTable := vc.ReadValues() + + // Start a goroutine to send updates to the Debuglet Controller or write + // to logs, concurrently with resuming the program. + // TODO: retry Update on failure. + for _, bp := range bps { + wg.Add(1) + switch bp.Action { + case "LOG": + go func(format string, evaluatedExpressions []*cd.Variable) { + s := valuecollector.LogString(format, evaluatedExpressions, variableTable) + log.Print(s) + wg.Done() + }(bp.LogMessageFormat, bp.EvaluatedExpressions) + bp.Status = nil + bp.EvaluatedExpressions = nil + default: + go func(bp *cd.Breakpoint) { + defer wg.Done() + bp.IsFinalState = true + if bp.Status == nil { + // If evaluating bp's condition didn't return an error, include the + // stack frames, variable table, and any status message produced when + // getting the stack frames. + bp.StackFrames = stack + bp.VariableTable = variableTable + bp.Status = stackFramesStatusMessage + } + if err := c.Update(ctx, bp.Id, bp); err != nil { + log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) + } + }(bp) + } + } + } + + // Wait for all updates to finish before returning. + wg.Wait() +} + +// bpsWithConditionSatisfied returns the breakpoints whose conditions are true +// (or that do not have a condition.) +func bpsWithConditionSatisfied(bpsIn []*cd.Breakpoint, prog debug.Program) []*cd.Breakpoint { + var bpsOut []*cd.Breakpoint + for _, bp := range bpsIn { + cond, err := condTruth(bp.Condition, prog) + if err != nil { + bp.Status = errorStatusMessage(err.Error(), refersToBreakpointCondition) + // Include bp in the list to be updated when there's an error, so that + // the user gets a response. + bpsOut = append(bpsOut, bp) + } else if cond { + bpsOut = append(bpsOut, bp) + } + } + return bpsOut +} + +// condTruth evaluates a condition. +func condTruth(condition string, prog debug.Program) (bool, error) { + if condition == "" { + // A condition wasn't set. + return true, nil + } + val, err := prog.Evaluate(condition) + if err != nil { + return false, err + } + if v, ok := val.(bool); !ok { + return false, fmt.Errorf("condition expression has type %T, should be bool", val) + } else { + return v, nil + } +} + +// expressionValues evaluates a slice of expressions and returns a []*cd.Variable +// containing the results. +// If the result of an expression evaluation refers to values from the program's +// memory (e.g., the expression evaluates to a slice) a corresponding variable is +// added to the value collector, to be read later. +func expressionValues(expressions []string, prog debug.Program, vc *valuecollector.Collector) []*cd.Variable { + evaluatedExpressions := make([]*cd.Variable, len(expressions)) + for i, exp := range expressions { + ee := &cd.Variable{Name: exp} + evaluatedExpressions[i] = ee + if val, err := prog.Evaluate(exp); err != nil { + ee.Status = errorStatusMessage(err.Error(), refersToBreakpointExpression) + } else { + vc.FillValue(val, ee) + } + } + return evaluatedExpressions +} + +// stackFrames returns a stack trace for the program. It passes references to +// function parameters and local variables to the value collector, so it can read +// their values later. +func stackFrames(prog debug.Program, vc *valuecollector.Collector) ([]*cd.StackFrame, *cd.StatusMessage) { + frames, err := prog.Frames(maxCapturedStackFrames) + if err != nil { + return nil, errorStatusMessage("Error getting stack: "+err.Error(), refersToUnspecified) + } + stackFrames := make([]*cd.StackFrame, len(frames)) + for i, f := range frames { + frame := &cd.StackFrame{} + frame.Function = f.Function + for _, v := range f.Params { + frame.Arguments = append(frame.Arguments, vc.AddVariable(debug.LocalVar(v))) + } + for _, v := range f.Vars { + frame.Locals = append(frame.Locals, vc.AddVariable(v)) + } + frame.Location = &cd.SourceLocation{ + Path: f.File, + Line: int64(f.Line), + } + stackFrames[i] = frame + } + return stackFrames, nil +} + +// errorStatusMessage returns a *cd.StatusMessage indicating an error, +// with the given message and refersTo field. +func errorStatusMessage(msg string, refersTo int) *cd.StatusMessage { + return &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, + IsError: true, + RefersTo: refersToString[refersTo], + } +} + +const ( + // RefersTo values for cd.StatusMessage. + refersToUnspecified = iota + refersToBreakpointCondition + refersToBreakpointExpression +) + +// refersToString contains the strings for each refersTo value. +// See the definition of StatusMessage in the v2/clouddebugger package. +var refersToString = map[int]string{ + refersToUnspecified: "UNSPECIFIED", + refersToBreakpointCondition: "BREAKPOINT_CONDITION", + refersToBreakpointExpression: "BREAKPOINT_EXPRESSION", +} + +func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("cannot read service account file: %v", err) + } + cfg, err := google.JWTConfigFromJSON(data, scope...) + if err != nil { + return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) + } + return cfg.TokenSource(ctx), nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go new file mode 100644 index 0000000..afe07cb --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go @@ -0,0 +1,174 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package breakpoints handles breakpoint requests we get from the user through +// the Debuglet Controller, and manages corresponding breakpoints set in the code. +package breakpoints + +import ( + "log" + "sync" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +// BreakpointStore stores the set of breakpoints for a program. +type BreakpointStore struct { + mu sync.Mutex + // prog is the program being debugged. + prog debug.Program + // idToBreakpoint is a map from breakpoint identifier to *cd.Breakpoint. The + // map value is nil if the breakpoint is inactive. A breakpoint is active if: + // - We received it from the Debuglet Controller, and it was active at the time; + // - We were able to set code breakpoints for it; + // - We have not reached any of those code breakpoints while satisfying the + // breakpoint's conditions, or the breakpoint has action LOG; and + // - The Debuglet Controller hasn't informed us the breakpoint has become inactive. + idToBreakpoint map[string]*cd.Breakpoint + // pcToBps and bpToPCs store the many-to-many relationship between breakpoints we + // received from the Debuglet Controller and the code breakpoints we set for them. + pcToBps map[uint64][]*cd.Breakpoint + bpToPCs map[*cd.Breakpoint][]uint64 + // errors contains any breakpoints which couldn't be set because they caused an + // error. These are retrieved with ErrorBreakpoints, and the caller is + // expected to handle sending updates for them. + errors []*cd.Breakpoint +} + +// NewBreakpointStore returns a BreakpointStore for the given program. +func NewBreakpointStore(prog debug.Program) *BreakpointStore { + return &BreakpointStore{ + idToBreakpoint: make(map[string]*cd.Breakpoint), + pcToBps: make(map[uint64][]*cd.Breakpoint), + bpToPCs: make(map[*cd.Breakpoint][]uint64), + prog: prog, + } +} + +// ProcessBreakpointList applies updates received from the Debuglet Controller through a List call. +func (bs *BreakpointStore) ProcessBreakpointList(bps []*cd.Breakpoint) { + bs.mu.Lock() + defer bs.mu.Unlock() + for _, bp := range bps { + if storedBp, ok := bs.idToBreakpoint[bp.Id]; ok { + if storedBp != nil && bp.IsFinalState { + // IsFinalState indicates that the breakpoint has been made inactive. + bs.removeBreakpointLocked(storedBp) + } + } else { + if bp.IsFinalState { + // The controller is notifying us that the breakpoint is no longer active, + // but we didn't know about it anyway. + continue + } + if bp.Action != "" && bp.Action != "CAPTURE" && bp.Action != "LOG" { + bp.IsFinalState = true + bp.Status = &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "Action is not supported"}, + IsError: true, + } + bs.errors = append(bs.errors, bp) + // Note in idToBreakpoint that we've already seen this breakpoint, so that we + // don't try to report it as an error multiple times. + bs.idToBreakpoint[bp.Id] = nil + continue + } + pcs, err := bs.prog.BreakpointAtLine(bp.Location.Path, uint64(bp.Location.Line)) + if err != nil { + log.Printf("error setting breakpoint at %s:%d: %v", bp.Location.Path, bp.Location.Line, err) + } + if len(pcs) == 0 { + // We can't find a PC for this breakpoint's source line, so don't make it active. + // TODO: we could snap the line to a location where we can break, or report an error to the user. + bs.idToBreakpoint[bp.Id] = nil + } else { + bs.idToBreakpoint[bp.Id] = bp + for _, pc := range pcs { + bs.pcToBps[pc] = append(bs.pcToBps[pc], bp) + } + bs.bpToPCs[bp] = pcs + } + } + } +} + +// ErrorBreakpoints returns a slice of Breakpoints that caused errors when the +// BreakpointStore tried to process them, and resets the list of such +// breakpoints. +// The caller is expected to send updates to the server to indicate the errors. +func (bs *BreakpointStore) ErrorBreakpoints() []*cd.Breakpoint { + bs.mu.Lock() + defer bs.mu.Unlock() + bps := bs.errors + bs.errors = nil + return bps +} + +// BreakpointsAtPC returns all the breakpoints for which we set a code +// breakpoint at the given address. +func (bs *BreakpointStore) BreakpointsAtPC(pc uint64) []*cd.Breakpoint { + bs.mu.Lock() + defer bs.mu.Unlock() + return bs.pcToBps[pc] +} + +// RemoveBreakpoint makes the given breakpoint inactive. +// This is called when either the debugged program hits the breakpoint, or the Debuglet +// Controller informs us that the breakpoint is now inactive. +func (bs *BreakpointStore) RemoveBreakpoint(bp *cd.Breakpoint) { + bs.mu.Lock() + bs.removeBreakpointLocked(bp) + bs.mu.Unlock() +} + +func (bs *BreakpointStore) removeBreakpointLocked(bp *cd.Breakpoint) { + // Set the ID's corresponding breakpoint to nil, so that we won't activate it + // if we see it again. + // TODO: we could delete it after a few seconds. + bs.idToBreakpoint[bp.Id] = nil + + // Delete bp from the list of cd breakpoints at each of its corresponding + // code breakpoint locations, and delete any code breakpoints which no longer + // have a corresponding cd breakpoint. + var codeBreakpointsToDelete []uint64 + for _, pc := range bs.bpToPCs[bp] { + bps := remove(bs.pcToBps[pc], bp) + if len(bps) == 0 { + // bp was the last breakpoint set at this PC, so delete the code breakpoint. + codeBreakpointsToDelete = append(codeBreakpointsToDelete, pc) + delete(bs.pcToBps, pc) + } else { + bs.pcToBps[pc] = bps + } + } + if len(codeBreakpointsToDelete) > 0 { + bs.prog.DeleteBreakpoints(codeBreakpointsToDelete) + } + delete(bs.bpToPCs, bp) +} + +// remove updates rs by removing r, then returns rs. +// The mutex in the BreakpointStore which contains rs should be held. +func remove(rs []*cd.Breakpoint, r *cd.Breakpoint) []*cd.Breakpoint { + for i := range rs { + if rs[i] == r { + rs[i] = rs[len(rs)-1] + rs = rs[0 : len(rs)-1] + return rs + } + } + // We shouldn't reach here. + return rs +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go new file mode 100644 index 0000000..d3b7750 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go @@ -0,0 +1,168 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package breakpoints + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +var ( + testPC1 uint64 = 0x1234 + testPC2 uint64 = 0x5678 + testPC3 uint64 = 0x3333 + testFile = "foo.go" + testLine uint64 = 42 + testLine2 uint64 = 99 + testLogPC uint64 = 0x9abc + testLogLine uint64 = 43 + testBadPC uint64 = 0xdef0 + testBadLine uint64 = 44 + testBP = &cd.Breakpoint{ + Action: "CAPTURE", + Id: "TestBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine)}, + } + testBP2 = &cd.Breakpoint{ + Action: "CAPTURE", + Id: "TestBreakpoint2", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine2)}, + } + testLogBP = &cd.Breakpoint{ + Action: "LOG", + Id: "TestLogBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLogLine)}, + } + testBadBP = &cd.Breakpoint{ + Action: "BEEP", + Id: "TestBadBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testBadLine)}, + } +) + +func TestBreakpointStore(t *testing.T) { + p := &Program{breakpointPCs: make(map[uint64]bool)} + bs := NewBreakpointStore(p) + checkPCs := func(expected map[uint64]bool) { + if !testutil.Equal(p.breakpointPCs, expected) { + t.Errorf("got breakpoint map %v want %v", p.breakpointPCs, expected) + } + } + bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP}) + checkPCs(map[uint64]bool{ + testPC1: true, + testPC2: true, + testPC3: true, + testLogPC: true, + }) + for _, test := range []struct { + pc uint64 + expected []*cd.Breakpoint + }{ + {testPC1, []*cd.Breakpoint{testBP}}, + {testPC2, []*cd.Breakpoint{testBP}}, + {testPC3, []*cd.Breakpoint{testBP2}}, + {testLogPC, []*cd.Breakpoint{testLogBP}}, + } { + if bps := bs.BreakpointsAtPC(test.pc); !testutil.Equal(bps, test.expected) { + t.Errorf("BreakpointsAtPC(%x): got %v want %v", test.pc, bps, test.expected) + } + } + testBP2.IsFinalState = true + bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP}) + checkPCs(map[uint64]bool{ + testPC1: true, + testPC2: true, + testPC3: false, + testLogPC: true, + }) + bs.RemoveBreakpoint(testBP) + checkPCs(map[uint64]bool{ + testPC1: false, + testPC2: false, + testPC3: false, + testLogPC: true, + }) + for _, pc := range []uint64{testPC1, testPC2, testPC3} { + if bps := bs.BreakpointsAtPC(pc); len(bps) != 0 { + t.Errorf("BreakpointsAtPC(%x): got %v want []", pc, bps) + } + } + // bs.ErrorBreakpoints should return testBadBP. + errorBps := bs.ErrorBreakpoints() + if len(errorBps) != 1 { + t.Errorf("ErrorBreakpoints: got %d want 1", len(errorBps)) + } else { + bp := errorBps[0] + if bp.Id != testBadBP.Id { + t.Errorf("ErrorBreakpoints: got id %q want 1", bp.Id) + } + if bp.Status == nil || !bp.Status.IsError { + t.Errorf("ErrorBreakpoints: got %v, want error", bp.Status) + } + } + // The error should have been removed by the last call to bs.ErrorBreakpoints. + errorBps = bs.ErrorBreakpoints() + if len(errorBps) != 0 { + t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps)) + } + // Even if testBadBP is sent in a new list, it should not be returned again. + bs.ProcessBreakpointList([]*cd.Breakpoint{testBadBP}) + errorBps = bs.ErrorBreakpoints() + if len(errorBps) != 0 { + t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps)) + } +} + +// Program implements the similarly-named interface in x/debug. +// ValueCollector should only call its BreakpointAtLine and DeleteBreakpoints methods. +type Program struct { + debug.Program + // breakpointPCs contains the state of code breakpoints -- true if the + // breakpoint is currently set, false if it has been deleted. + breakpointPCs map[uint64]bool +} + +func (p *Program) BreakpointAtLine(file string, line uint64) ([]uint64, error) { + var pcs []uint64 + switch { + case file == testFile && line == testLine: + pcs = []uint64{testPC1, testPC2} + case file == testFile && line == testLine2: + pcs = []uint64{testPC3} + case file == testFile && line == testLogLine: + pcs = []uint64{testLogPC} + default: + pcs = []uint64{0xbad} + } + for _, pc := range pcs { + p.breakpointPCs[pc] = true + } + return pcs, nil +} + +func (p *Program) DeleteBreakpoints(pcs []uint64) error { + for _, pc := range pcs { + p.breakpointPCs[pc] = false + } + return nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go new file mode 100644 index 0000000..2571583 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go @@ -0,0 +1,291 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package controller is a library for interacting with the Google Cloud Debugger's Debuglet Controller service. +package controller + +import ( + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "log" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + cd "google.golang.org/api/clouddebugger/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" +) + +const ( + // agentVersionString identifies the agent to the service. + agentVersionString = "google.com/go-gcp/v0.2" + // initWaitToken is the wait token sent in the first Update request to a server. + initWaitToken = "init" +) + +var ( + // ErrListUnchanged is returned by List if the server time limit is reached + // before the list of breakpoints changes. + ErrListUnchanged = errors.New("breakpoint list unchanged") + // ErrDebuggeeDisabled is returned by List or Update if the server has disabled + // this Debuggee. The caller can retry later. + ErrDebuggeeDisabled = errors.New("debuglet disabled by server") +) + +// Controller manages a connection to the Debuglet Controller service. +type Controller struct { + s serviceInterface + // waitToken is sent with List requests so the server knows which set of + // breakpoints this client has already seen. Each successful List request + // returns a new waitToken to send in the next request. + waitToken string + // verbose determines whether to do some logging + verbose bool + // options, uniquifier and description are used in register. + options Options + uniquifier string + description string + // labels are included when registering the debuggee. They should contain + // the module name, version and minorversion, and are used by the debug UI + // to label the correct version active for debugging. + labels map[string]string + // mu protects debuggeeID + mu sync.Mutex + // debuggeeID is returned from the server on registration, and is passed back + // to the server in List and Update requests. + debuggeeID string +} + +// Options controls how the Debuglet Controller client identifies itself to the server. +// See https://cloud.google.com/storage/docs/projects and +// https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine +// for further documentation of these parameters. +type Options struct { + ProjectNumber string // GCP Project Number. + ProjectID string // GCP Project ID. + AppModule string // Module name for the debugged program. + AppVersion string // Version number for this module. + SourceContexts []*cd.SourceContext // Description of source. + Verbose bool + TokenSource oauth2.TokenSource // Source of Credentials used for Stackdriver Debugger. +} + +type serviceInterface interface { + Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) + Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) + List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) +} + +var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) { + httpClient, endpoint, err := htransport.NewClient(ctx, option.WithTokenSource(tokenSource)) + if err != nil { + return nil, err + } + s, err := cd.New(httpClient) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return &service{s: s}, nil +} + +type service struct { + s *cd.Service +} + +func (s service) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { + call := cd.NewControllerDebuggeesService(s.s).Register(req) + return call.Context(ctx).Do() +} + +func (s service) Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { + call := cd.NewControllerDebuggeesBreakpointsService(s.s).Update(debuggeeID, breakpointID, req) + return call.Context(ctx).Do() +} + +func (s service) List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { + call := cd.NewControllerDebuggeesBreakpointsService(s.s).List(debuggeeID) + call.WaitToken(waitToken) + return call.Context(ctx).Do() +} + +// NewController connects to the Debuglet Controller server using the given options, +// and returns a Controller for that connection. +// Google Application Default Credentials are used to connect to the Debuglet Controller; +// see https://developers.google.com/identity/protocols/application-default-credentials +func NewController(ctx context.Context, o Options) (*Controller, error) { + // We build a JSON encoding of o.SourceContexts so we can hash it. + scJSON, err := json.Marshal(o.SourceContexts) + if err != nil { + scJSON = nil + o.SourceContexts = nil + } + const minorversion = "107157" // any arbitrary numeric string + + // Compute a uniquifier string by hashing the project number, app module name, + // app module version, debuglet version, and source context. + // The choice of hash function is arbitrary. + h := sha256.Sum256([]byte(fmt.Sprintf("%d %s %d %s %d %s %d %s %d %s %d %s", + len(o.ProjectNumber), o.ProjectNumber, + len(o.AppModule), o.AppModule, + len(o.AppVersion), o.AppVersion, + len(agentVersionString), agentVersionString, + len(scJSON), scJSON, + len(minorversion), minorversion))) + uniquifier := fmt.Sprintf("%X", h[0:16]) // 32 hex characters + + description := o.ProjectID + if o.AppModule != "" { + description += "-" + o.AppModule + } + if o.AppVersion != "" { + description += "-" + o.AppVersion + } + + s, err := newService(ctx, o.TokenSource) + if err != nil { + return nil, err + } + + // Construct client. + c := &Controller{ + s: s, + waitToken: initWaitToken, + verbose: o.Verbose, + options: o, + uniquifier: uniquifier, + description: description, + labels: map[string]string{ + "module": o.AppModule, + "version": o.AppVersion, + "minorversion": minorversion, + }, + } + + return c, nil +} + +func (c *Controller) getDebuggeeID(ctx context.Context) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.debuggeeID != "" { + return c.debuggeeID, nil + } + // The debuglet hasn't been registered yet, or it is disabled and we should try registering again. + if err := c.register(ctx); err != nil { + return "", err + } + return c.debuggeeID, nil +} + +// List retrieves the current list of breakpoints from the server. +// If the set of breakpoints on the server is the same as the one returned in +// the previous call to List, the server can delay responding until it changes, +// and return an error instead if no change occurs before a time limit the +// server sets. List can't be called concurrently with itself. +func (c *Controller) List(ctx context.Context) (*cd.ListActiveBreakpointsResponse, error) { + id, err := c.getDebuggeeID(ctx) + if err != nil { + return nil, err + } + resp, err := c.s.List(ctx, id, c.waitToken) + if err != nil { + if isAbortedError(err) { + return nil, ErrListUnchanged + } + // For other errors, the protocol requires that we attempt to re-register. + c.mu.Lock() + defer c.mu.Unlock() + if regError := c.register(ctx); regError != nil { + return nil, regError + } + return nil, err + } + if resp == nil { + return nil, errors.New("no response") + } + if c.verbose { + log.Printf("List response: %v", resp) + } + c.waitToken = resp.NextWaitToken + return resp, nil +} + +// isAbortedError tests if err is a *googleapi.Error, that it contains one error +// in Errors, and that that error's Reason is "aborted". +func isAbortedError(err error) bool { + e, _ := err.(*googleapi.Error) + if e == nil { + return false + } + if len(e.Errors) != 1 { + return false + } + return e.Errors[0].Reason == "aborted" +} + +// Update reports information to the server about a breakpoint that was hit. +// Update can be called concurrently with List and Update. +func (c *Controller) Update(ctx context.Context, breakpointID string, bp *cd.Breakpoint) error { + req := &cd.UpdateActiveBreakpointRequest{Breakpoint: bp} + if c.verbose { + log.Printf("sending update for %s: %v", breakpointID, req) + } + id, err := c.getDebuggeeID(ctx) + if err != nil { + return err + } + _, err = c.s.Update(ctx, id, breakpointID, req) + return err +} + +// register calls the Debuglet Controller Register method, and sets c.debuggeeID. +// c.mu should be locked while calling this function. List and Update can't +// make progress until it returns. +func (c *Controller) register(ctx context.Context) error { + req := cd.RegisterDebuggeeRequest{ + Debuggee: &cd.Debuggee{ + AgentVersion: agentVersionString, + Description: c.description, + Project: c.options.ProjectNumber, + SourceContexts: c.options.SourceContexts, + Uniquifier: c.uniquifier, + Labels: c.labels, + }, + } + resp, err := c.s.Register(ctx, &req) + if err != nil { + return err + } + if resp == nil { + return errors.New("register: no response") + } + if resp.Debuggee.IsDisabled { + // Setting c.debuggeeID to empty makes sure future List and Update calls + // will call register first. + c.debuggeeID = "" + } else { + c.debuggeeID = resp.Debuggee.Id + } + if c.debuggeeID == "" { + return ErrDebuggeeDisabled + } + return nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go new file mode 100644 index 0000000..fa06347 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go @@ -0,0 +1,254 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "testing" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + + cd "google.golang.org/api/clouddebugger/v2" + "google.golang.org/api/googleapi" +) + +const ( + testDebuggeeID = "d12345" + testBreakpointID = "bp12345" +) + +var ( + // The sequence of wait tokens in List requests and responses. + expectedWaitToken = []string{"init", "token1", "token2", "token1", "token1"} + // The set of breakpoints returned from each List call. + expectedBreakpoints = [][]*cd.Breakpoint{ + nil, + { + &cd.Breakpoint{ + Id: testBreakpointID, + IsFinalState: false, + Location: &cd.SourceLocation{Line: 42, Path: "foo.go"}, + }, + }, + nil, + } + abortedError error = &googleapi.Error{ + Code: 409, + Message: "Conflict", + Body: `{ + "error": { + "errors": [ + { + "domain": "global", + "reason": "aborted", + "message": "Conflict" + } + ], + "code": 409, + "message": "Conflict" + } + }`, + Errors: []googleapi.ErrorItem{ + {Reason: "aborted", Message: "Conflict"}, + }, + } + backendError error = &googleapi.Error{ + Code: 503, + Message: "Backend Error", + Body: `{ + "error": { + "errors": [ + { + "domain": "global", + "reason": "backendError", + "message": "Backend Error" + } + ], + "code": 503, + "message": "Backend Error" + } + }`, + Errors: []googleapi.ErrorItem{ + {Reason: "backendError", Message: "Backend Error"}, + }, + } +) + +type mockService struct { + t *testing.T + listCallsSeen int + registerCallsSeen int +} + +func (s *mockService) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { + s.registerCallsSeen++ + if req.Debuggee == nil { + s.t.Errorf("missing debuggee") + return nil, nil + } + if req.Debuggee.AgentVersion == "" { + s.t.Errorf("missing agent version") + } + if req.Debuggee.Description == "" { + s.t.Errorf("missing debuglet description") + } + if req.Debuggee.Project == "" { + s.t.Errorf("missing project id") + } + if req.Debuggee.Uniquifier == "" { + s.t.Errorf("missing uniquifier") + } + return &cd.RegisterDebuggeeResponse{ + Debuggee: &cd.Debuggee{Id: testDebuggeeID}, + }, nil +} + +func (s *mockService) Update(ctx context.Context, id, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { + if id != testDebuggeeID { + s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) + } + if breakpointID != testBreakpointID { + s.t.Errorf("got breakpoint ID %s want %s", breakpointID, testBreakpointID) + } + if !req.Breakpoint.IsFinalState { + s.t.Errorf("got IsFinalState = false, want true") + } + return nil, nil +} + +func (s *mockService) List(ctx context.Context, id, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { + if id != testDebuggeeID { + s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) + } + if waitToken != expectedWaitToken[s.listCallsSeen] { + s.t.Errorf("got wait token %s want %s", waitToken, expectedWaitToken[s.listCallsSeen]) + } + s.listCallsSeen++ + if s.listCallsSeen == 4 { + return nil, backendError + } + if s.listCallsSeen == 5 { + return nil, abortedError + } + resp := &cd.ListActiveBreakpointsResponse{ + Breakpoints: expectedBreakpoints[s.listCallsSeen-1], + NextWaitToken: expectedWaitToken[s.listCallsSeen], + } + return resp, nil +} + +func TestDebugletControllerClientLibrary(t *testing.T) { + var ( + m *mockService + c *Controller + list *cd.ListActiveBreakpointsResponse + err error + ) + m = &mockService{t: t} + newService = func(context.Context, oauth2.TokenSource) (serviceInterface, error) { return m, nil } + opts := Options{ + ProjectNumber: "5", + ProjectID: "p1", + AppModule: "mod1", + AppVersion: "v1", + } + ctx := context.Background() + if c, err = NewController(ctx, opts); err != nil { + t.Fatal("Initializing Controller client:", err) + } + if err := validateLabels(c, opts); err != nil { + t.Fatalf("Invalid labels:\n%v", err) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if m.registerCallsSeen != 1 { + t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if len(list.Breakpoints) != 1 { + t.Fatalf("got %d breakpoints, want 1", len(list.Breakpoints)) + } + if err = c.Update(ctx, list.Breakpoints[0].Id, &cd.Breakpoint{Id: testBreakpointID, IsFinalState: true}); err != nil { + t.Fatal("Update:", err) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if m.registerCallsSeen != 1 { + t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) + } + // The next List call produces an error that should cause a Register call. + if list, err = c.List(ctx); err == nil { + t.Fatal("List should have returned an error") + } + if m.registerCallsSeen != 2 { + t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) + } + // The next List call produces an error that should not cause a Register call. + if list, err = c.List(ctx); err == nil { + t.Fatal("List should have returned an error") + } + if m.registerCallsSeen != 2 { + t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) + } + if m.listCallsSeen != 5 { + t.Errorf("saw %d list calls, want 5", m.listCallsSeen) + } +} + +func validateLabels(c *Controller, o Options) error { + errMsg := new(bytes.Buffer) + if m, ok := c.labels["module"]; ok { + if m != o.AppModule { + errMsg.WriteString(fmt.Sprintf("label module: want %s, got %s\n", o.AppModule, m)) + } + } else { + errMsg.WriteString("Missing \"module\" label\n") + } + if v, ok := c.labels["version"]; ok { + if v != o.AppVersion { + errMsg.WriteString(fmt.Sprintf("label version: want %s, got %s\n", o.AppVersion, v)) + } + } else { + errMsg.WriteString("Missing \"version\" label\n") + } + if mv, ok := c.labels["minorversion"]; ok { + if _, err := strconv.Atoi(mv); err != nil { + errMsg.WriteString(fmt.Sprintln("label minorversion: not a numeric string:", mv)) + } + } else { + errMsg.WriteString("Missing \"minorversion\" label\n") + } + if errMsg.Len() != 0 { + return errors.New(errMsg.String()) + } + return nil +} + +func TestIsAbortedError(t *testing.T) { + if !isAbortedError(abortedError) { + t.Errorf("isAborted(%+v): got false, want true", abortedError) + } + if isAbortedError(backendError) { + t.Errorf("isAborted(%+v): got true, want false", backendError) + } +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go new file mode 100644 index 0000000..8dadc2f --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go @@ -0,0 +1,460 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package valuecollector is used to collect the values of variables in a program. +package valuecollector + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +const ( + maxArrayLength = 50 + maxMapLength = 20 +) + +// Collector is given references to variables from a program being debugged +// using AddVariable. Then when ReadValues is called, the Collector will fetch +// the values of those variables. Any variables referred to by those values +// will also be fetched; e.g. the targets of pointers, members of structs, +// elements of slices, etc. This continues iteratively, building a graph of +// values, until all the reachable values are fetched, or a size limit is +// reached. +// +// Variables are passed to the Collector as debug.Var, which is used by x/debug +// to represent references to variables. Values are returned as cd.Variable, +// which is used by the Debuglet Controller to represent the graph of values. +// +// For example, if the program has a struct variable: +// +// foo := SomeStruct{a:42, b:"xyz"} +// +// and we call AddVariable with a reference to foo, we will get back a result +// like: +// +// cd.Variable{Name:"foo", VarTableIndex:10} +// +// which denotes a variable named "foo" which will have its value stored in +// element 10 of the table that will later be returned by ReadValues. That +// element might be: +// +// out[10] = &cd.Variable{Members:{{Name:"a", VarTableIndex:11},{Name:"b", VarTableIndex:12}}} +// +// which denotes a struct with two members a and b, whose values are in elements +// 11 and 12 of the output table: +// +// out[11] = &cd.Variable{Value:"42"} +// out[12] = &cd.Variable{Value:"xyz"} +type Collector struct { + // prog is the program being debugged. + prog debug.Program + // limit is the maximum size of the output slice of values. + limit int + // index is a map from references (variables and map elements) to their + // locations in the table. + index map[reference]int + // table contains the references, including those given to the + // Collector directly and those the Collector itself found. + // If VarTableIndex is set to 0 in a cd.Variable, it is ignored, so the first entry + // of table can't be used. On initialization we put a dummy value there. + table []reference +} + +// reference represents a value which is in the queue to be read by the +// collector. It is either a debug.Var, or a mapElement. +type reference interface{} + +// mapElement represents an element of a map in the debugged program's memory. +type mapElement struct { + debug.Map + index uint64 +} + +// NewCollector returns a Collector for the given program and size limit. +// The limit is the maximum size of the slice of values returned by ReadValues. +func NewCollector(prog debug.Program, limit int) *Collector { + return &Collector{ + prog: prog, + limit: limit, + index: make(map[reference]int), + table: []reference{debug.Var{}}, + } +} + +// AddVariable adds another variable to be collected. +// The Collector doesn't get the value immediately; it returns a cd.Variable +// that contains an index into the table which will later be returned by +// ReadValues. +func (c *Collector) AddVariable(lv debug.LocalVar) *cd.Variable { + ret := &cd.Variable{Name: lv.Name} + if index, ok := c.add(lv.Var); !ok { + // If the add call failed, it's because we reached the size limit. + // The Debuglet Controller's convention is to pass it a "Not Captured" error + // in this case. + ret.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + ret.VarTableIndex = int64(index) + } + return ret +} + +// add adds a reference to the set of values to be read from the +// program. It returns the index in the output table that will contain the +// corresponding value. It fails if the table has reached the size limit. +// It deduplicates references, so the index may be the same as one that was +// returned from an earlier add call. +func (c *Collector) add(r reference) (outputIndex int, ok bool) { + if i, ok := c.index[r]; ok { + return i, true + } + i := len(c.table) + if i >= c.limit { + return 0, false + } + c.index[r] = i + c.table = append(c.table, r) + return i, true +} + +func addMember(v *cd.Variable, name string) *cd.Variable { + v2 := &cd.Variable{Name: name} + v.Members = append(v.Members, v2) + return v2 +} + +// ReadValues fetches values of the variables that were passed to the Collector +// with AddVariable. The values of any new variables found are also fetched, +// e.g. the targets of pointers or the members of structs, until we reach the +// size limit or we run out of values to fetch. +// The results are output as a []*cd.Variable, which is the type we need to send +// to the Debuglet Controller after we trigger a breakpoint. +func (c *Collector) ReadValues() (out []*cd.Variable) { + for i := 0; i < len(c.table); i++ { + // Create a new cd.Variable for this value, and append it to the output. + dcv := new(cd.Variable) + out = append(out, dcv) + if i == 0 { + // The first element is unused. + continue + } + switch x := c.table[i].(type) { + case mapElement: + key, value, err := c.prog.MapElement(x.Map, x.index) + if err != nil { + dcv.Status = statusMessage(err.Error(), true, refersToVariableValue) + continue + } + // Add a member for the key. + member := addMember(dcv, "key") + if index, ok := c.add(key); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + continue + } else { + member.VarTableIndex = int64(index) + } + // Add a member for the value. + member = addMember(dcv, "value") + if index, ok := c.add(value); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + member.VarTableIndex = int64(index) + } + case debug.Var: + if v, err := c.prog.Value(x); err != nil { + dcv.Status = statusMessage(err.Error(), true, refersToVariableValue) + } else { + c.FillValue(v, dcv) + } + } + } + return out +} + +// indexable is an interface for arrays, slices and channels. +type indexable interface { + Len() uint64 + Element(uint64) debug.Var +} + +// channel implements indexable. +type channel struct { + debug.Channel +} + +func (c channel) Len() uint64 { + return c.Length +} + +var ( + _ indexable = debug.Array{} + _ indexable = debug.Slice{} + _ indexable = channel{} +) + +// FillValue copies a value into a cd.Variable. Any variables referred to by +// that value, e.g. struct members and pointer targets, are added to the +// collector's queue, to be fetched later by ReadValues. +func (c *Collector) FillValue(v debug.Value, dcv *cd.Variable) { + if c, ok := v.(debug.Channel); ok { + // Convert to channel, which implements indexable. + v = channel{c} + } + // Fill in dcv in a manner depending on the type of the value we got. + switch val := v.(type) { + case int8, int16, int32, int64, bool, uint8, uint16, uint32, uint64, float32, float64, complex64, complex128: + // For simple types, we just print the value to dcv.Value. + dcv.Value = fmt.Sprint(val) + case string: + // Put double quotes around strings. + dcv.Value = strconv.Quote(val) + case debug.String: + if uint64(len(val.String)) < val.Length { + // This string value was truncated. + dcv.Value = strconv.Quote(val.String + "...") + } else { + dcv.Value = strconv.Quote(val.String) + } + case debug.Struct: + // For structs, we add an entry to dcv.Members for each field in the + // struct. + // Each member will contain the name of the field, and the index in the + // output table which will contain the value of that field. + for _, f := range val.Fields { + member := addMember(dcv, f.Name) + if index, ok := c.add(f.Var); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + member.VarTableIndex = int64(index) + } + } + case debug.Map: + dcv.Value = fmt.Sprintf("len = %d", val.Length) + for i := uint64(0); i < val.Length; i++ { + field := addMember(dcv, `⚫`) + if i == maxMapLength { + field.Name = "..." + field.Status = statusMessage(messageTruncated, true, refersToVariableName) + break + } + if index, ok := c.add(mapElement{val, i}); !ok { + // The value table is full; add a member to contain the error message. + field.Name = "..." + field.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + break + } else { + field.VarTableIndex = int64(index) + } + } + case debug.Pointer: + if val.Address == 0 { + dcv.Value = "" + } else if val.TypeID == 0 { + // We don't know the type of the pointer, so just output the address as + // the value. + dcv.Value = fmt.Sprintf("0x%X", val.Address) + dcv.Status = statusMessage(messageUnknownPointerType, false, refersToVariableName) + } else { + // Adds the pointed-to variable to the table, and links this value to + // that table entry through VarTableIndex. + dcv.Value = fmt.Sprintf("0x%X", val.Address) + target := addMember(dcv, "") + if index, ok := c.add(debug.Var(val)); !ok { + target.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + target.VarTableIndex = int64(index) + } + } + case indexable: + // Arrays, slices and channels. + dcv.Value = "len = " + fmt.Sprint(val.Len()) + for j := uint64(0); j < val.Len(); j++ { + field := addMember(dcv, fmt.Sprint(`[`, j, `]`)) + if j == maxArrayLength { + field.Name = "..." + field.Status = statusMessage(messageTruncated, true, refersToVariableName) + break + } + vr := val.Element(j) + if index, ok := c.add(vr); !ok { + // The value table is full; add a member to contain the error message. + field.Name = "..." + field.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + break + } else { + // Add a member with the index as the name. + field.VarTableIndex = int64(index) + } + } + default: + dcv.Status = statusMessage(messageUnknownType, false, refersToVariableName) + } +} + +// statusMessage returns a *cd.StatusMessage with the given message, IsError +// field and refersTo field. +func statusMessage(msg string, isError bool, refersTo int) *cd.StatusMessage { + return &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, + IsError: isError, + RefersTo: refersToString[refersTo], + } +} + +// LogString produces a string for a logpoint, substituting in variable values +// using evaluatedExpressions and varTable. +func LogString(s string, evaluatedExpressions []*cd.Variable, varTable []*cd.Variable) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "LOGPOINT: ") + seen := make(map[*cd.Variable]bool) + for i := 0; i < len(s); { + if s[i] == '$' { + i++ + if num, n, ok := parseToken(s[i:], len(evaluatedExpressions)-1); ok { + // This token is one of $0, $1, etc. Write the corresponding expression. + writeExpression(&buf, evaluatedExpressions[num], false, varTable, seen) + i += n + } else { + // Something else, like $$. + buf.WriteByte(s[i]) + i++ + } + } else { + buf.WriteByte(s[i]) + i++ + } + } + return buf.String() +} + +func parseToken(s string, max int) (num int, bytesRead int, ok bool) { + var i int + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + num, err := strconv.Atoi(s[:i]) + return num, i, err == nil && num <= max +} + +// writeExpression recursively writes variables to buf, in a format suitable +// for logging. If printName is true, writes the name of the variable. +func writeExpression(buf *bytes.Buffer, v *cd.Variable, printName bool, varTable []*cd.Variable, seen map[*cd.Variable]bool) { + if v == nil { + // Shouldn't happen. + return + } + name, value, status, members := v.Name, v.Value, v.Status, v.Members + + // If v.VarTableIndex is not zero, it refers to an element of varTable. + // We merge its fields with the fields we got from v. + var other *cd.Variable + if idx := int(v.VarTableIndex); idx > 0 && idx < len(varTable) { + other = varTable[idx] + } + if other != nil { + if name == "" { + name = other.Name + } + if value == "" { + value = other.Value + } + if status == nil { + status = other.Status + } + if len(members) == 0 { + members = other.Members + } + } + if printName && name != "" { + buf.WriteString(name) + buf.WriteByte(':') + } + + // If we have seen this value before, write "..." rather than repeating it. + if seen[v] { + buf.WriteString("...") + return + } + seen[v] = true + if other != nil { + if seen[other] { + buf.WriteString("...") + return + } + seen[other] = true + } + + if value != "" && !strings.HasPrefix(value, "len = ") { + // A plain value. + buf.WriteString(value) + } else if status != nil && status.Description != nil { + // An error. + for _, p := range status.Description.Parameters { + buf.WriteByte('(') + buf.WriteString(p) + buf.WriteByte(')') + } + } else if name == `⚫` { + // A map element. + first := true + for _, member := range members { + if first { + first = false + } else { + buf.WriteByte(':') + } + writeExpression(buf, member, false, varTable, seen) + } + } else { + // A map, array, slice, channel, or struct. + isStruct := value == "" + first := true + buf.WriteByte('{') + for _, member := range members { + if first { + first = false + } else { + buf.WriteString(", ") + } + writeExpression(buf, member, isStruct, varTable, seen) + } + buf.WriteByte('}') + } +} + +const ( + // Error messages for cd.StatusMessage + messageNotCaptured = "Not captured" + messageTruncated = "Truncated" + messageUnknownPointerType = "Unknown pointer type" + messageUnknownType = "Unknown type" + // RefersTo values for cd.StatusMessage. + refersToVariableName = iota + refersToVariableValue +) + +// refersToString contains the strings for each refersTo value. +// See the definition of StatusMessage in the v2/clouddebugger package. +var refersToString = map[int]string{ + refersToVariableName: "VARIABLE_NAME", + refersToVariableValue: "VARIABLE_VALUE", +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go new file mode 100644 index 0000000..980cbfa --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go @@ -0,0 +1,418 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package valuecollector + +import ( + "fmt" + "testing" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +const ( + // Some arbitrary type IDs for the test, for use in debug.Var's TypeID field. + // A TypeID of 0 means the type is unknown, so we start at 1. + int16Type = iota + 1 + stringType + structType + pointerType + arrayType + int32Type + debugStringType + mapType + channelType + sliceType +) + +func TestValueCollector(t *testing.T) { + // Construct the collector. + c := NewCollector(&Program{}, 26) + // Add some variables of various types, whose values we want the collector to read. + variablesToAdd := []debug.LocalVar{ + {Name: "a", Var: debug.Var{TypeID: int16Type, Address: 0x1}}, + {Name: "b", Var: debug.Var{TypeID: stringType, Address: 0x2}}, + {Name: "c", Var: debug.Var{TypeID: structType, Address: 0x3}}, + {Name: "d", Var: debug.Var{TypeID: pointerType, Address: 0x4}}, + {Name: "e", Var: debug.Var{TypeID: arrayType, Address: 0x5}}, + {Name: "f", Var: debug.Var{TypeID: debugStringType, Address: 0x6}}, + {Name: "g", Var: debug.Var{TypeID: mapType, Address: 0x7}}, + {Name: "h", Var: debug.Var{TypeID: channelType, Address: 0x8}}, + {Name: "i", Var: debug.Var{TypeID: sliceType, Address: 0x9}}, + } + expectedResults := []*cd.Variable{ + &cd.Variable{Name: "a", VarTableIndex: 1}, + &cd.Variable{Name: "b", VarTableIndex: 2}, + &cd.Variable{Name: "c", VarTableIndex: 3}, + &cd.Variable{Name: "d", VarTableIndex: 4}, + &cd.Variable{Name: "e", VarTableIndex: 5}, + &cd.Variable{Name: "f", VarTableIndex: 6}, + &cd.Variable{Name: "g", VarTableIndex: 7}, + &cd.Variable{Name: "h", VarTableIndex: 8}, + &cd.Variable{Name: "i", VarTableIndex: 9}, + } + for i, v := range variablesToAdd { + added := c.AddVariable(v) + if !testutil.Equal(added, expectedResults[i]) { + t.Errorf("AddVariable: got %+v want %+v", *added, *expectedResults[i]) + } + } + // Read the values, compare the output to what we expect. + v := c.ReadValues() + expectedValues := []*cd.Variable{ + &cd.Variable{}, + &cd.Variable{Value: "1"}, + &cd.Variable{Value: `"hello"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "x", VarTableIndex: 1}, + &cd.Variable{Name: "y", VarTableIndex: 2}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{VarTableIndex: 1}, + }, + Value: "0x1", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 10}, + &cd.Variable{Name: "[1]", VarTableIndex: 11}, + &cd.Variable{Name: "[2]", VarTableIndex: 12}, + &cd.Variable{Name: "[3]", VarTableIndex: 13}, + }, + Value: "len = 4", + }, + &cd.Variable{Value: `"world"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "⚫", VarTableIndex: 14}, + &cd.Variable{Name: "⚫", VarTableIndex: 15}, + &cd.Variable{Name: "⚫", VarTableIndex: 16}, + }, + Value: "len = 3", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 17}, + &cd.Variable{Name: "[1]", VarTableIndex: 18}, + }, + Value: "len = 2", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 19}, + &cd.Variable{Name: "[1]", VarTableIndex: 20}, + }, + Value: "len = 2", + }, + &cd.Variable{Value: "100"}, + &cd.Variable{Value: "104"}, + &cd.Variable{Value: "108"}, + &cd.Variable{Value: "112"}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 21}, + &cd.Variable{Name: "value", VarTableIndex: 22}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 23}, + &cd.Variable{Name: "value", VarTableIndex: 24}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 25}, + &cd.Variable{ + Name: "value", + Status: &cd.StatusMessage{ + Description: &cd.FormatMessage{ + Format: "$0", + Parameters: []string{"Not captured"}, + }, + IsError: true, + RefersTo: "VARIABLE_NAME", + }, + }, + }, + }, + &cd.Variable{Value: "246"}, + &cd.Variable{Value: "210"}, + &cd.Variable{Value: "300"}, + &cd.Variable{Value: "304"}, + &cd.Variable{Value: "400"}, + &cd.Variable{Value: "404"}, + &cd.Variable{Value: "1400"}, + &cd.Variable{Value: "1404"}, + &cd.Variable{Value: "2400"}, + } + if !testutil.Equal(v, expectedValues) { + t.Errorf("ReadValues: got %v want %v", v, expectedValues) + // Do element-by-element comparisons, for more useful error messages. + for i := range v { + if i < len(expectedValues) && !testutil.Equal(v[i], expectedValues[i]) { + t.Errorf("element %d: got %+v want %+v", i, *v[i], *expectedValues[i]) + } + } + } +} + +// Program implements the similarly-named interface in x/debug. +// ValueCollector should only call its Value and MapElement methods. +type Program struct { + debug.Program +} + +func (p *Program) Value(v debug.Var) (debug.Value, error) { + // We determine what to return using v.TypeID. + switch v.TypeID { + case int16Type: + // We use the address as the value, so that we're testing whether the right + // address was calculated. + return int16(v.Address), nil + case stringType: + // A string. + return "hello", nil + case structType: + // A struct with two elements. + return debug.Struct{ + Fields: []debug.StructField{ + { + Name: "x", + Var: debug.Var{TypeID: int16Type, Address: 0x1}, + }, + { + Name: "y", + Var: debug.Var{TypeID: stringType, Address: 0x2}, + }, + }, + }, nil + case pointerType: + // A pointer to the first variable above. + return debug.Pointer{TypeID: int16Type, Address: 0x1}, nil + case arrayType: + // An array of 4 32-bit-wide elements. + return debug.Array{ + ElementTypeID: int32Type, + Address: 0x64, + Length: 4, + StrideBits: 32, + }, nil + case debugStringType: + return debug.String{ + Length: 5, + String: "world", + }, nil + case mapType: + return debug.Map{ + TypeID: 99, + Address: 0x100, + Length: 3, + }, nil + case channelType: + return debug.Channel{ + ElementTypeID: int32Type, + Address: 200, + Buffer: 210, + Length: 2, + Capacity: 10, + Stride: 4, + BufferStart: 9, + }, nil + case sliceType: + // A slice of 2 32-bit-wide elements. + return debug.Slice{ + Array: debug.Array{ + ElementTypeID: int32Type, + Address: 300, + Length: 2, + StrideBits: 32, + }, + Capacity: 50, + }, nil + case int32Type: + // We use the address as the value, so that we're testing whether the right + // address was calculated. + return int32(v.Address), nil + } + return nil, fmt.Errorf("unexpected Value request") +} + +func (p *Program) MapElement(m debug.Map, index uint64) (debug.Var, debug.Var, error) { + return debug.Var{TypeID: int16Type, Address: 1000*index + 400}, + debug.Var{TypeID: int32Type, Address: 1000*index + 404}, + nil +} + +func TestLogString(t *testing.T) { + bp := cd.Breakpoint{ + Action: "LOG", + LogMessageFormat: "$0 hello, $$7world! $1 $2 $3 $4 $5$6 $7 $8", + EvaluatedExpressions: []*cd.Variable{ + &cd.Variable{Name: "a", VarTableIndex: 1}, + &cd.Variable{Name: "b", VarTableIndex: 2}, + &cd.Variable{Name: "c", VarTableIndex: 3}, + &cd.Variable{Name: "d", VarTableIndex: 4}, + &cd.Variable{Name: "e", VarTableIndex: 5}, + &cd.Variable{Name: "f", VarTableIndex: 6}, + &cd.Variable{Name: "g", VarTableIndex: 7}, + &cd.Variable{Name: "h", VarTableIndex: 8}, + &cd.Variable{Name: "i", VarTableIndex: 9}, + }, + } + varTable := []*cd.Variable{ + &cd.Variable{}, + &cd.Variable{Value: "1"}, + &cd.Variable{Value: `"hello"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "x", Value: "1"}, + &cd.Variable{Name: "y", Value: `"hello"`}, + &cd.Variable{Name: "z", VarTableIndex: 3}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{VarTableIndex: 1}, + }, + Value: "0x1", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 10}, + &cd.Variable{Name: "[1]", VarTableIndex: 11}, + &cd.Variable{Name: "[2]", VarTableIndex: 12}, + &cd.Variable{Name: "[3]", VarTableIndex: 13}, + }, + Value: "len = 4", + }, + &cd.Variable{Value: `"world"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "⚫", VarTableIndex: 14}, + &cd.Variable{Name: "⚫", VarTableIndex: 15}, + &cd.Variable{Name: "⚫", VarTableIndex: 16}, + }, + Value: "len = 3", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 17}, + &cd.Variable{Name: "[1]", VarTableIndex: 18}, + }, + Value: "len = 2", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 19}, + &cd.Variable{Name: "[1]", VarTableIndex: 20}, + }, + Value: "len = 2", + }, + &cd.Variable{Value: "100"}, + &cd.Variable{Value: "104"}, + &cd.Variable{Value: "108"}, + &cd.Variable{Value: "112"}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 21}, + &cd.Variable{Name: "value", VarTableIndex: 22}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 23}, + &cd.Variable{Name: "value", VarTableIndex: 24}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 25}, + &cd.Variable{ + Name: "value", + Status: &cd.StatusMessage{ + Description: &cd.FormatMessage{ + Format: "$0", + Parameters: []string{"Not captured"}, + }, + IsError: true, + RefersTo: "VARIABLE_NAME", + }, + }, + }, + }, + &cd.Variable{Value: "246"}, + &cd.Variable{Value: "210"}, + &cd.Variable{Value: "300"}, + &cd.Variable{Value: "304"}, + &cd.Variable{Value: "400"}, + &cd.Variable{Value: "404"}, + &cd.Variable{Value: "1400"}, + &cd.Variable{Value: "1404"}, + &cd.Variable{Value: "2400"}, + } + s := LogString(bp.LogMessageFormat, bp.EvaluatedExpressions, varTable) + expected := `LOGPOINT: 1 hello, $7world! "hello" {x:1, y:"hello", z:...} ` + + `0x1 {100, 104, 108, 112} "world"{400:404, 1400:1404, 2400:(Not captured)} ` + + `{246, 210} {300, 304}` + if s != expected { + t.Errorf("LogString: got %q want %q", s, expected) + } +} + +func TestParseToken(t *testing.T) { + for _, c := range []struct { + s string + max int + num int + n int + ok bool + }{ + {"", 0, 0, 0, false}, + {".", 0, 0, 0, false}, + {"0", 0, 0, 1, true}, + {"0", 1, 0, 1, true}, + {"00", 0, 0, 2, true}, + {"1.", 1, 1, 1, true}, + {"1.", 0, 0, 0, false}, + {"10", 10, 10, 2, true}, + {"10..", 10, 10, 2, true}, + {"10", 11, 10, 2, true}, + {"10..", 11, 10, 2, true}, + {"10", 9, 0, 0, false}, + {"10..", 9, 0, 0, false}, + {" 10", 10, 0, 0, false}, + {"010", 10, 10, 3, true}, + {"123456789", 123456789, 123456789, 9, true}, + {"123456789", 123456788, 0, 0, false}, + {"123456789123456789123456789", 999999999, 0, 0, false}, + } { + num, n, ok := parseToken(c.s, c.max) + if ok != c.ok { + t.Errorf("parseToken(%q, %d): got ok=%t want ok=%t", c.s, c.max, ok, c.ok) + continue + } + if !ok { + continue + } + if num != c.num || n != c.n { + t.Errorf("parseToken(%q, %d): got %d,%d,%t want %d,%d,%t", c.s, c.max, num, n, ok, c.num, c.n, c.ok) + } + } +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 0000000..e708c03 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,437 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + } + subscribeClient = &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := ctxhttp.Do(ctx, metaClient, req) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata_test.go b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go new file mode 100644 index 0000000..9ac5926 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go @@ -0,0 +1,48 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "os" + "sync" + "testing" +) + +func TestOnGCE_Stress(t *testing.T) { + if testing.Short() { + t.Skip("skipping in -short mode") + } + var last bool + for i := 0; i < 100; i++ { + onGCEOnce = sync.Once{} + + now := OnGCE() + if i > 0 && now != last { + t.Errorf("%d. changed from %v to %v", i, last, now) + } + last = now + } + t.Logf("OnGCE() = %v", last) +} + +func TestOnGCE_Force(t *testing.T) { + onGCEOnce = sync.Once{} + old := os.Getenv(metadataHostEnv) + defer os.Setenv(metadataHostEnv, old) + os.Setenv(metadataHostEnv, "127.0.0.1") + if !OnGCE() { + t.Error("OnGCE() = false; want true") + } +} diff --git a/vendor/cloud.google.com/go/container/apiv1/ListClusters_smoke_test.go b/vendor/cloud.google.com/go/container/apiv1/ListClusters_smoke_test.go new file mode 100644 index 0000000..a8e1af9 --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/ListClusters_smoke_test.go @@ -0,0 +1,68 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package container + +import ( + containerpb "google.golang.org/genproto/googleapis/container/v1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestClusterManagerSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClusterManagerClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var projectId2 string = projectId + var zone string = "us-central1-a" + var request = &containerpb.ListClustersRequest{ + ProjectId: projectId2, + Zone: zone, + } + + if _, err := c.ListClusters(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go new file mode 100644 index 0000000..e1e5dfc --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go @@ -0,0 +1,674 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package container + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + containerpb "google.golang.org/genproto/googleapis/container/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient. +type ClusterManagerCallOptions struct { + ListClusters []gax.CallOption + GetCluster []gax.CallOption + CreateCluster []gax.CallOption + UpdateCluster []gax.CallOption + UpdateNodePool []gax.CallOption + SetNodePoolAutoscaling []gax.CallOption + SetLoggingService []gax.CallOption + SetMonitoringService []gax.CallOption + SetAddonsConfig []gax.CallOption + SetLocations []gax.CallOption + UpdateMaster []gax.CallOption + SetMasterAuth []gax.CallOption + DeleteCluster []gax.CallOption + ListOperations []gax.CallOption + GetOperation []gax.CallOption + CancelOperation []gax.CallOption + GetServerConfig []gax.CallOption + ListNodePools []gax.CallOption + GetNodePool []gax.CallOption + CreateNodePool []gax.CallOption + DeleteNodePool []gax.CallOption + RollbackNodePoolUpgrade []gax.CallOption + SetNodePoolManagement []gax.CallOption + SetLabels []gax.CallOption + SetLegacyAbac []gax.CallOption + StartIPRotation []gax.CallOption + CompleteIPRotation []gax.CallOption + SetNodePoolSize []gax.CallOption + SetNetworkPolicy []gax.CallOption + SetMaintenancePolicy []gax.CallOption +} + +func defaultClusterManagerClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("container.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultClusterManagerCallOptions() *ClusterManagerCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ClusterManagerCallOptions{ + ListClusters: retry[[2]string{"default", "idempotent"}], + GetCluster: retry[[2]string{"default", "idempotent"}], + CreateCluster: retry[[2]string{"default", "non_idempotent"}], + UpdateCluster: retry[[2]string{"default", "non_idempotent"}], + UpdateNodePool: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolAutoscaling: retry[[2]string{"default", "non_idempotent"}], + SetLoggingService: retry[[2]string{"default", "non_idempotent"}], + SetMonitoringService: retry[[2]string{"default", "non_idempotent"}], + SetAddonsConfig: retry[[2]string{"default", "non_idempotent"}], + SetLocations: retry[[2]string{"default", "non_idempotent"}], + UpdateMaster: retry[[2]string{"default", "non_idempotent"}], + SetMasterAuth: retry[[2]string{"default", "non_idempotent"}], + DeleteCluster: retry[[2]string{"default", "idempotent"}], + ListOperations: retry[[2]string{"default", "idempotent"}], + GetOperation: retry[[2]string{"default", "idempotent"}], + CancelOperation: retry[[2]string{"default", "non_idempotent"}], + GetServerConfig: retry[[2]string{"default", "idempotent"}], + ListNodePools: retry[[2]string{"default", "idempotent"}], + GetNodePool: retry[[2]string{"default", "idempotent"}], + CreateNodePool: retry[[2]string{"default", "non_idempotent"}], + DeleteNodePool: retry[[2]string{"default", "idempotent"}], + RollbackNodePoolUpgrade: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolManagement: retry[[2]string{"default", "non_idempotent"}], + SetLabels: retry[[2]string{"default", "non_idempotent"}], + SetLegacyAbac: retry[[2]string{"default", "non_idempotent"}], + StartIPRotation: retry[[2]string{"default", "non_idempotent"}], + CompleteIPRotation: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolSize: retry[[2]string{"default", "non_idempotent"}], + SetNetworkPolicy: retry[[2]string{"default", "non_idempotent"}], + SetMaintenancePolicy: retry[[2]string{"default", "non_idempotent"}], + } +} + +// ClusterManagerClient is a client for interacting with Google Container Engine API. +type ClusterManagerClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + clusterManagerClient containerpb.ClusterManagerClient + + // The call options for this service. + CallOptions *ClusterManagerCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClusterManagerClient creates a new cluster manager client. +// +// Google Container Engine Cluster Manager v1 +func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClusterManagerClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ClusterManagerClient{ + conn: conn, + CallOptions: defaultClusterManagerCallOptions(), + + clusterManagerClient: containerpb.NewClusterManagerClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ClusterManagerClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ClusterManagerClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListClusters lists all clusters owned by a project in either the specified zone or all +// zones. +func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...) + var resp *containerpb.ListClustersResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetCluster gets the details of a specific cluster. +func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...) + var resp *containerpb.Cluster + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateCluster creates a cluster, consisting of the specified number and type of Google +// Compute Engine instances. +// +// By default, the cluster is created in the project's +// default network (at /compute/docs/networks-and-firewalls#networks). +// +// One firewall is added for the cluster. After cluster creation, +// the cluster creates routes for each node to allow the containers +// on that node to communicate with all other instances in the +// cluster. +// +// Finally, an entry is added to the project's global metadata indicating +// which CIDR range is being used by the cluster. +func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateCluster updates the settings of a specific cluster. +func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateNodePool updates the version and/or image type of a specific node pool. +func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolAutoscaling sets the autoscaling settings of a specific node pool. +func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLoggingService sets the logging service of a specific cluster. +func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMonitoringService sets the monitoring service of a specific cluster. +func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetAddonsConfig sets the addons of a specific cluster. +func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLocations sets the locations of a specific cluster. +func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateMaster updates the master of a specific cluster. +func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMasterAuth used to set master auth materials. Currently supports :- +// Changing the admin password of a specific cluster. +// This can be either via password generation or explicitly set the password. +func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker +// nodes. +// +// Firewalls and routes that were configured during cluster creation +// are also deleted. +// +// Other Google Compute Engine resources that might be in use by the cluster +// (e.g. load balancer resources) will not be deleted if they weren't present +// at the initial create time. +func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListOperations lists all operations in a project in a specific zone or all zones. +func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) + var resp *containerpb.ListOperationsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetOperation gets the specified operation. +func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CancelOperation cancels the specified operation. +func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetServerConfig returns configuration info about the Container Engine service. +func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...) + var resp *containerpb.ServerConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListNodePools lists the node pools for a cluster. +func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...) + var resp *containerpb.ListNodePoolsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetNodePool retrieves the node pool requested. +func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...) + var resp *containerpb.NodePool + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateNodePool creates a node pool for a cluster. +func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteNodePool deletes a node pool from a cluster. +func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RollbackNodePoolUpgrade roll back the previously Aborted or Failed NodePool upgrade. +// This will be an no-op if the last upgrade successfully completed. +func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolManagement sets the NodeManagement options for a node pool. +func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLabels sets labels on a cluster. +func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster. +func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StartIPRotation start master IP rotation. +func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CompleteIPRotation completes master IP rotation. +func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolSize sets the size of a specific node pool. +func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNetworkPolicy enables/Disables Network Policy for a cluster. +func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMaintenancePolicy sets the maintenance policy for a cluster. +func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go new file mode 100644 index 0000000..e76586a --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go @@ -0,0 +1,571 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package container_test + +import ( + "cloud.google.com/go/container/apiv1" + "golang.org/x/net/context" + containerpb "google.golang.org/genproto/googleapis/container/v1" +) + +func ExampleNewClusterManagerClient() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClusterManagerClient_ListClusters() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.ListClustersRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListClusters(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_GetCluster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.GetClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_CreateCluster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.CreateClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_UpdateCluster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.UpdateClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_UpdateNodePool() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.UpdateNodePoolRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateNodePool(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetNodePoolAutoscaling() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetNodePoolAutoscalingRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetNodePoolAutoscaling(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetLoggingService() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetLoggingServiceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetLoggingService(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetMonitoringService() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetMonitoringServiceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetMonitoringService(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetAddonsConfig() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetAddonsConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetAddonsConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetLocations() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetLocationsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetLocations(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_UpdateMaster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.UpdateMasterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateMaster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetMasterAuth() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetMasterAuthRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetMasterAuth(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_DeleteCluster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.DeleteClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeleteCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_ListOperations() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.ListOperationsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListOperations(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_GetOperation() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.GetOperationRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_CancelOperation() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.CancelOperationRequest{ + // TODO: Fill request struct fields. + } + err = c.CancelOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClusterManagerClient_GetServerConfig() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.GetServerConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetServerConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_ListNodePools() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.ListNodePoolsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListNodePools(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_GetNodePool() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.GetNodePoolRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetNodePool(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_CreateNodePool() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.CreateNodePoolRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateNodePool(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_DeleteNodePool() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.DeleteNodePoolRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeleteNodePool(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_RollbackNodePoolUpgrade() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.RollbackNodePoolUpgradeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RollbackNodePoolUpgrade(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetNodePoolManagement() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetNodePoolManagementRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetNodePoolManagement(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetLabels() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetLabelsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetLabels(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetLegacyAbac() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetLegacyAbacRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetLegacyAbac(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_StartIPRotation() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.StartIPRotationRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.StartIPRotation(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_CompleteIPRotation() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.CompleteIPRotationRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CompleteIPRotation(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetNodePoolSize() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetNodePoolSizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetNodePoolSize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetNetworkPolicy() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetNetworkPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetNetworkPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetMaintenancePolicy() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetMaintenancePolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetMaintenancePolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/container/apiv1/doc.go b/vendor/cloud.google.com/go/container/apiv1/doc.go new file mode 100644 index 0000000..a963842 --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/doc.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package container is an auto-generated package for the +// Google Container Engine API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// The Google Kubernetes Engine API is used for building and managing +// container +// based applications, powered by the open source Kubernetes technology. +package container // import "cloud.google.com/go/container/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/container/apiv1/mock_test.go b/vendor/cloud.google.com/go/container/apiv1/mock_test.go new file mode 100644 index 0000000..002f755 --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/mock_test.go @@ -0,0 +1,2912 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package container + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + containerpb "google.golang.org/genproto/googleapis/container/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockClusterManagerServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + containerpb.ClusterManagerServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockClusterManagerServer) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest) (*containerpb.ListClustersResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.ListClustersResponse), nil +} + +func (s *mockClusterManagerServer) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest) (*containerpb.Cluster, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Cluster), nil +} + +func (s *mockClusterManagerServer) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest) (*containerpb.ListOperationsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.ListOperationsResponse), nil +} + +func (s *mockClusterManagerServer) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockClusterManagerServer) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest) (*containerpb.ServerConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.ServerConfig), nil +} + +func (s *mockClusterManagerServer) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest) (*containerpb.ListNodePoolsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.ListNodePoolsResponse), nil +} + +func (s *mockClusterManagerServer) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest) (*containerpb.NodePool, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.NodePool), nil +} + +func (s *mockClusterManagerServer) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockClusterManager mockClusterManagerServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + containerpb.RegisterClusterManagerServer(serv, &mockClusterManager) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestClusterManagerListClusters(t *testing.T) { + var expectedResponse *containerpb.ListClustersResponse = &containerpb.ListClustersResponse{} + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.ListClustersRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListClusters(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerListClustersError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.ListClustersRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListClusters(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerGetCluster(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var initialNodeCount int32 = 1682564205 + var loggingService string = "loggingService-1700501035" + var monitoringService string = "monitoringService1469270462" + var network string = "network1843485230" + var clusterIpv4Cidr string = "clusterIpv4Cidr-141875831" + var subnetwork string = "subnetwork-1302785042" + var enableKubernetesAlpha bool = false + var labelFingerprint string = "labelFingerprint714995737" + var selfLink string = "selfLink-1691268851" + var zone2 string = "zone2-696322977" + var endpoint string = "endpoint1741102485" + var initialClusterVersion string = "initialClusterVersion-276373352" + var currentMasterVersion string = "currentMasterVersion-920953983" + var currentNodeVersion string = "currentNodeVersion-407476063" + var createTime string = "createTime-493574096" + var statusMessage string = "statusMessage-239442758" + var nodeIpv4CidrSize int32 = 1181176815 + var servicesIpv4Cidr string = "servicesIpv4Cidr1966438125" + var currentNodeCount int32 = 178977560 + var expireTime string = "expireTime-96179731" + var expectedResponse = &containerpb.Cluster{ + Name: name, + Description: description, + InitialNodeCount: initialNodeCount, + LoggingService: loggingService, + MonitoringService: monitoringService, + Network: network, + ClusterIpv4Cidr: clusterIpv4Cidr, + Subnetwork: subnetwork, + EnableKubernetesAlpha: enableKubernetesAlpha, + LabelFingerprint: labelFingerprint, + SelfLink: selfLink, + Zone: zone2, + Endpoint: endpoint, + InitialClusterVersion: initialClusterVersion, + CurrentMasterVersion: currentMasterVersion, + CurrentNodeVersion: currentNodeVersion, + CreateTime: createTime, + StatusMessage: statusMessage, + NodeIpv4CidrSize: nodeIpv4CidrSize, + ServicesIpv4Cidr: servicesIpv4Cidr, + CurrentNodeCount: currentNodeCount, + ExpireTime: expireTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.GetClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerGetClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.GetClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerCreateCluster(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var cluster *containerpb.Cluster = &containerpb.Cluster{} + var request = &containerpb.CreateClusterRequest{ + ProjectId: projectId, + Zone: zone, + Cluster: cluster, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerCreateClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var cluster *containerpb.Cluster = &containerpb.Cluster{} + var request = &containerpb.CreateClusterRequest{ + ProjectId: projectId, + Zone: zone, + Cluster: cluster, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerUpdateCluster(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var update *containerpb.ClusterUpdate = &containerpb.ClusterUpdate{} + var request = &containerpb.UpdateClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Update: update, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerUpdateClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var update *containerpb.ClusterUpdate = &containerpb.ClusterUpdate{} + var request = &containerpb.UpdateClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Update: update, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerUpdateNodePool(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var nodeVersion string = "nodeVersion1790136219" + var imageType string = "imageType-1442758754" + var request = &containerpb.UpdateNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + NodeVersion: nodeVersion, + ImageType: imageType, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateNodePool(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerUpdateNodePoolError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var nodeVersion string = "nodeVersion1790136219" + var imageType string = "imageType-1442758754" + var request = &containerpb.UpdateNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + NodeVersion: nodeVersion, + ImageType: imageType, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateNodePool(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetNodePoolAutoscaling(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var autoscaling *containerpb.NodePoolAutoscaling = &containerpb.NodePoolAutoscaling{} + var request = &containerpb.SetNodePoolAutoscalingRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + Autoscaling: autoscaling, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolAutoscaling(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetNodePoolAutoscalingError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var autoscaling *containerpb.NodePoolAutoscaling = &containerpb.NodePoolAutoscaling{} + var request = &containerpb.SetNodePoolAutoscalingRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + Autoscaling: autoscaling, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolAutoscaling(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetLoggingService(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var loggingService string = "loggingService-1700501035" + var request = &containerpb.SetLoggingServiceRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + LoggingService: loggingService, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLoggingService(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetLoggingServiceError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var loggingService string = "loggingService-1700501035" + var request = &containerpb.SetLoggingServiceRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + LoggingService: loggingService, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLoggingService(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetMonitoringService(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var monitoringService string = "monitoringService1469270462" + var request = &containerpb.SetMonitoringServiceRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MonitoringService: monitoringService, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMonitoringService(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetMonitoringServiceError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var monitoringService string = "monitoringService1469270462" + var request = &containerpb.SetMonitoringServiceRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MonitoringService: monitoringService, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMonitoringService(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetAddonsConfig(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var addonsConfig *containerpb.AddonsConfig = &containerpb.AddonsConfig{} + var request = &containerpb.SetAddonsConfigRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + AddonsConfig: addonsConfig, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetAddonsConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetAddonsConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var addonsConfig *containerpb.AddonsConfig = &containerpb.AddonsConfig{} + var request = &containerpb.SetAddonsConfigRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + AddonsConfig: addonsConfig, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetAddonsConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetLocations(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var locations []string = nil + var request = &containerpb.SetLocationsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Locations: locations, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLocations(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetLocationsError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var locations []string = nil + var request = &containerpb.SetLocationsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Locations: locations, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLocations(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerUpdateMaster(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var masterVersion string = "masterVersion-2139460613" + var request = &containerpb.UpdateMasterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MasterVersion: masterVersion, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateMaster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerUpdateMasterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var masterVersion string = "masterVersion-2139460613" + var request = &containerpb.UpdateMasterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MasterVersion: masterVersion, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateMaster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetMasterAuth(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var action containerpb.SetMasterAuthRequest_Action = containerpb.SetMasterAuthRequest_UNKNOWN + var update *containerpb.MasterAuth = &containerpb.MasterAuth{} + var request = &containerpb.SetMasterAuthRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Action: action, + Update: update, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMasterAuth(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetMasterAuthError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var action containerpb.SetMasterAuthRequest_Action = containerpb.SetMasterAuthRequest_UNKNOWN + var update *containerpb.MasterAuth = &containerpb.MasterAuth{} + var request = &containerpb.SetMasterAuthRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Action: action, + Update: update, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMasterAuth(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerDeleteCluster(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.DeleteClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerDeleteClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.DeleteClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerListOperations(t *testing.T) { + var expectedResponse *containerpb.ListOperationsResponse = &containerpb.ListOperationsResponse{} + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.ListOperationsRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListOperations(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerListOperationsError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.ListOperationsRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListOperations(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerGetOperation(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var operationId string = "operationId-274116877" + var request = &containerpb.GetOperationRequest{ + ProjectId: projectId, + Zone: zone, + OperationId: operationId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerGetOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var operationId string = "operationId-274116877" + var request = &containerpb.GetOperationRequest{ + ProjectId: projectId, + Zone: zone, + OperationId: operationId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerCancelOperation(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var operationId string = "operationId-274116877" + var request = &containerpb.CancelOperationRequest{ + ProjectId: projectId, + Zone: zone, + OperationId: operationId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestClusterManagerCancelOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var operationId string = "operationId-274116877" + var request = &containerpb.CancelOperationRequest{ + ProjectId: projectId, + Zone: zone, + OperationId: operationId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestClusterManagerGetServerConfig(t *testing.T) { + var defaultClusterVersion string = "defaultClusterVersion111003029" + var defaultImageType string = "defaultImageType-918225828" + var expectedResponse = &containerpb.ServerConfig{ + DefaultClusterVersion: defaultClusterVersion, + DefaultImageType: defaultImageType, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.GetServerConfigRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServerConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerGetServerConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.GetServerConfigRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServerConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerListNodePools(t *testing.T) { + var expectedResponse *containerpb.ListNodePoolsResponse = &containerpb.ListNodePoolsResponse{} + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.ListNodePoolsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNodePools(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerListNodePoolsError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.ListNodePoolsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNodePools(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerGetNodePool(t *testing.T) { + var name string = "name3373707" + var initialNodeCount int32 = 1682564205 + var selfLink string = "selfLink-1691268851" + var version string = "version351608024" + var statusMessage string = "statusMessage-239442758" + var expectedResponse = &containerpb.NodePool{ + Name: name, + InitialNodeCount: initialNodeCount, + SelfLink: selfLink, + Version: version, + StatusMessage: statusMessage, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.GetNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNodePool(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerGetNodePoolError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.GetNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNodePool(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerCreateNodePool(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePool *containerpb.NodePool = &containerpb.NodePool{} + var request = &containerpb.CreateNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePool: nodePool, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateNodePool(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerCreateNodePoolError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePool *containerpb.NodePool = &containerpb.NodePool{} + var request = &containerpb.CreateNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePool: nodePool, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateNodePool(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerDeleteNodePool(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.DeleteNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteNodePool(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerDeleteNodePoolError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.DeleteNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteNodePool(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerRollbackNodePoolUpgrade(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.RollbackNodePoolUpgradeRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RollbackNodePoolUpgrade(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerRollbackNodePoolUpgradeError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.RollbackNodePoolUpgradeRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RollbackNodePoolUpgrade(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetNodePoolManagement(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var management *containerpb.NodeManagement = &containerpb.NodeManagement{} + var request = &containerpb.SetNodePoolManagementRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + Management: management, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolManagement(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetNodePoolManagementError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var management *containerpb.NodeManagement = &containerpb.NodeManagement{} + var request = &containerpb.SetNodePoolManagementRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + Management: management, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolManagement(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetLabels(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var resourceLabels map[string]string = nil + var labelFingerprint string = "labelFingerprint714995737" + var request = &containerpb.SetLabelsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + ResourceLabels: resourceLabels, + LabelFingerprint: labelFingerprint, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLabels(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetLabelsError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var resourceLabels map[string]string = nil + var labelFingerprint string = "labelFingerprint714995737" + var request = &containerpb.SetLabelsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + ResourceLabels: resourceLabels, + LabelFingerprint: labelFingerprint, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLabels(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetLegacyAbac(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var enabled bool = false + var request = &containerpb.SetLegacyAbacRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Enabled: enabled, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLegacyAbac(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetLegacyAbacError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var enabled bool = false + var request = &containerpb.SetLegacyAbacRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Enabled: enabled, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLegacyAbac(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerStartIPRotation(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.StartIPRotationRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.StartIPRotation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerStartIPRotationError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.StartIPRotationRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.StartIPRotation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerCompleteIPRotation(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.CompleteIPRotationRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CompleteIPRotation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerCompleteIPRotationError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.CompleteIPRotationRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CompleteIPRotation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetNodePoolSize(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var nodeCount int32 = 1539922066 + var request = &containerpb.SetNodePoolSizeRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + NodeCount: nodeCount, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolSize(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetNodePoolSizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var nodeCount int32 = 1539922066 + var request = &containerpb.SetNodePoolSizeRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + NodeCount: nodeCount, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolSize(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetNetworkPolicy(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var networkPolicy *containerpb.NetworkPolicy = &containerpb.NetworkPolicy{} + var request = &containerpb.SetNetworkPolicyRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NetworkPolicy: networkPolicy, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNetworkPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetNetworkPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var networkPolicy *containerpb.NetworkPolicy = &containerpb.NetworkPolicy{} + var request = &containerpb.SetNetworkPolicyRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NetworkPolicy: networkPolicy, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNetworkPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetMaintenancePolicy(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var maintenancePolicy *containerpb.MaintenancePolicy = &containerpb.MaintenancePolicy{} + var request = &containerpb.SetMaintenancePolicyRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MaintenancePolicy: maintenancePolicy, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMaintenancePolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetMaintenancePolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var maintenancePolicy *containerpb.MaintenancePolicy = &containerpb.MaintenancePolicy{} + var request = &containerpb.SetMaintenancePolicyRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MaintenancePolicy: maintenancePolicy, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMaintenancePolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/container/container.go b/vendor/cloud.google.com/go/container/container.go new file mode 100644 index 0000000..44af7cb --- /dev/null +++ b/vendor/cloud.google.com/go/container/container.go @@ -0,0 +1,272 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package container contains a deprecated Google Container Engine client. +// +// Deprecated: Use cloud.google.com/go/container/apiv1 instead. +package container // import "cloud.google.com/go/container" + +import ( + "errors" + "fmt" + "time" + + "golang.org/x/net/context" + raw "google.golang.org/api/container/v1" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" +) + +type Type string + +const ( + TypeCreate = Type("createCluster") + TypeDelete = Type("deleteCluster") +) + +type Status string + +const ( + StatusDone = Status("done") + StatusPending = Status("pending") + StatusRunning = Status("running") + StatusError = Status("error") + StatusProvisioning = Status("provisioning") + StatusStopping = Status("stopping") +) + +const prodAddr = "https://container.googleapis.com/" +const userAgent = "gcloud-golang-container/20151008" + +// Client is a Google Container Engine client, which may be used to manage +// clusters with a project. It must be constructed via NewClient. +type Client struct { + projectID string + svc *raw.Service +} + +// NewClient creates a new Google Container Engine client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(raw.CloudPlatformScope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + httpClient, endpoint, err := htransport.NewClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + svc, err := raw.New(httpClient) + if err != nil { + return nil, fmt.Errorf("constructing container client: %v", err) + } + svc.BasePath = endpoint + + c := &Client{ + projectID: projectID, + svc: svc, + } + + return c, nil +} + +// Resource is a Google Container Engine cluster resource. +type Resource struct { + // Name is the name of this cluster. The name must be unique + // within this project and zone, and can be up to 40 characters. + Name string + + // Description is the description of the cluster. Optional. + Description string + + // Zone is the Google Compute Engine zone in which the cluster resides. + Zone string + + // Status is the current status of the cluster. It could either be + // StatusError, StatusProvisioning, StatusRunning or StatusStopping. + Status Status + + // Num is the number of the nodes in this cluster resource. + Num int64 + + // APIVersion is the version of the Kubernetes master and kubelets running + // in this cluster. Allowed value is 0.4.2, or leave blank to + // pick up the latest stable release. + APIVersion string + + // Endpoint is the IP address of this cluster's Kubernetes master. + // The endpoint can be accessed at https://username:password@endpoint/. + // See Username and Password fields for the username and password information. + Endpoint string + + // Username is the username to use when accessing the Kubernetes master endpoint. + Username string + + // Password is the password to use when accessing the Kubernetes master endpoint. + Password string + + // ContainerIPv4CIDR is the IP addresses of the container pods in + // this cluster, in CIDR notation (e.g. 1.2.3.4/29). + ContainerIPv4CIDR string + + // ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this + // cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are + // always in the 10.0.0.0/16 range. + ServicesIPv4CIDR string + + // MachineType is a Google Compute Engine machine type (e.g. n1-standard-1). + // If none set, the default type is used while creating a new cluster. + MachineType string + + // This field is ignored. It was removed from the underlying container API in v1. + SourceImage string + + // Created is the creation time of this cluster. + Created time.Time +} + +func resourceFromRaw(c *raw.Cluster) *Resource { + if c == nil { + return nil + } + r := &Resource{ + Name: c.Name, + Description: c.Description, + Zone: c.Zone, + Status: Status(c.Status), + Num: c.CurrentNodeCount, + APIVersion: c.InitialClusterVersion, + Endpoint: c.Endpoint, + Username: c.MasterAuth.Username, + Password: c.MasterAuth.Password, + ContainerIPv4CIDR: c.ClusterIpv4Cidr, + ServicesIPv4CIDR: c.ServicesIpv4Cidr, + MachineType: c.NodeConfig.MachineType, + } + r.Created, _ = time.Parse(time.RFC3339, c.CreateTime) + return r +} + +func resourcesFromRaw(c []*raw.Cluster) []*Resource { + r := make([]*Resource, len(c)) + for i, val := range c { + r[i] = resourceFromRaw(val) + } + return r +} + +// Op represents a Google Container Engine API operation. +type Op struct { + // Name is the name of the operation. + Name string + + // Zone is the Google Compute Engine zone. + Zone string + + // This field is ignored. It was removed from the underlying container API in v1. + TargetURL string + + // Type is the operation type. It could be either be TypeCreate or TypeDelete. + Type Type + + // Status is the current status of this operation. It could be either + // OpDone or OpPending. + Status Status +} + +func opFromRaw(o *raw.Operation) *Op { + if o == nil { + return nil + } + return &Op{ + Name: o.Name, + Zone: o.Zone, + Type: Type(o.OperationType), + Status: Status(o.Status), + } +} + +func opsFromRaw(o []*raw.Operation) []*Op { + ops := make([]*Op, len(o)) + for i, val := range o { + ops[i] = opFromRaw(val) + } + return ops +} + +// Clusters returns a list of cluster resources from the specified zone. +// If no zone is specified, it returns all clusters under the user project. +func (c *Client) Clusters(ctx context.Context, zone string) ([]*Resource, error) { + if zone == "" { + zone = "-" + } + resp, err := c.svc.Projects.Zones.Clusters.List(c.projectID, zone).Do() + if err != nil { + return nil, err + } + return resourcesFromRaw(resp.Clusters), nil +} + +// Cluster returns metadata about the specified cluster. +func (c *Client) Cluster(ctx context.Context, zone, name string) (*Resource, error) { + resp, err := c.svc.Projects.Zones.Clusters.Get(c.projectID, zone, name).Do() + if err != nil { + return nil, err + } + return resourceFromRaw(resp), nil +} + +// CreateCluster creates a new cluster with the provided metadata +// in the specified zone. +func (c *Client) CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) { + panic("not implemented") +} + +// DeleteCluster deletes a cluster. +func (c *Client) DeleteCluster(ctx context.Context, zone, name string) error { + _, err := c.svc.Projects.Zones.Clusters.Delete(c.projectID, zone, name).Do() + return err +} + +// Operations returns a list of operations from the specified zone. +// If no zone is specified, it looks up for all of the operations +// that are running under the user's project. +func (c *Client) Operations(ctx context.Context, zone string) ([]*Op, error) { + if zone == "" { + resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, "-").Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil + } + resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, zone).Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil +} + +// Operation returns an operation. +func (c *Client) Operation(ctx context.Context, zone, name string) (*Op, error) { + resp, err := c.svc.Projects.Zones.Operations.Get(c.projectID, zone, name).Do() + if err != nil { + return nil, err + } + if resp.StatusMessage != "" { + return nil, errors.New(resp.StatusMessage) + } + return opFromRaw(resp), nil +} diff --git a/vendor/cloud.google.com/go/dataproc/apiv1/ListClusters_smoke_test.go b/vendor/cloud.google.com/go/dataproc/apiv1/ListClusters_smoke_test.go new file mode 100644 index 0000000..732c4b0 --- /dev/null +++ b/vendor/cloud.google.com/go/dataproc/apiv1/ListClusters_smoke_test.go @@ -0,0 +1,69 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dataproc + +import ( + dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestClusterControllerSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClusterControllerClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var projectId2 string = projectId + var region string = "global" + var request = &dataprocpb.ListClustersRequest{ + ProjectId: projectId2, + Region: region, + } + + iter := c.ListClusters(ctx, request) + if _, err := iter.Next(); err != nil && err != iterator.Done { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client.go b/vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client.go new file mode 100644 index 0000000..015d74c --- /dev/null +++ b/vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client.go @@ -0,0 +1,593 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dataproc + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ClusterControllerCallOptions contains the retry settings for each method of ClusterControllerClient. +type ClusterControllerCallOptions struct { + CreateCluster []gax.CallOption + UpdateCluster []gax.CallOption + DeleteCluster []gax.CallOption + GetCluster []gax.CallOption + ListClusters []gax.CallOption + DiagnoseCluster []gax.CallOption +} + +func defaultClusterControllerClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("dataproc.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultClusterControllerCallOptions() *ClusterControllerCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ClusterControllerCallOptions{ + CreateCluster: retry[[2]string{"default", "non_idempotent"}], + UpdateCluster: retry[[2]string{"default", "non_idempotent"}], + DeleteCluster: retry[[2]string{"default", "idempotent"}], + GetCluster: retry[[2]string{"default", "idempotent"}], + ListClusters: retry[[2]string{"default", "idempotent"}], + DiagnoseCluster: retry[[2]string{"default", "non_idempotent"}], + } +} + +// ClusterControllerClient is a client for interacting with Google Cloud Dataproc API. +type ClusterControllerClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + clusterControllerClient dataprocpb.ClusterControllerClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *ClusterControllerCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClusterControllerClient creates a new cluster controller client. +// +// The ClusterControllerService provides methods to manage clusters +// of Google Compute Engine instances. +func NewClusterControllerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterControllerClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClusterControllerClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ClusterControllerClient{ + conn: conn, + CallOptions: defaultClusterControllerCallOptions(), + + clusterControllerClient: dataprocpb.NewClusterControllerClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ClusterControllerClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ClusterControllerClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ClusterControllerClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// CreateCluster creates a cluster in a project. +func (c *ClusterControllerClient) CreateCluster(ctx context.Context, req *dataprocpb.CreateClusterRequest, opts ...gax.CallOption) (*CreateClusterOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterControllerClient.CreateCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CreateClusterOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// UpdateCluster updates a cluster in a project. +func (c *ClusterControllerClient) UpdateCluster(ctx context.Context, req *dataprocpb.UpdateClusterRequest, opts ...gax.CallOption) (*UpdateClusterOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterControllerClient.UpdateCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &UpdateClusterOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// DeleteCluster deletes a cluster in a project. +func (c *ClusterControllerClient) DeleteCluster(ctx context.Context, req *dataprocpb.DeleteClusterRequest, opts ...gax.CallOption) (*DeleteClusterOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterControllerClient.DeleteCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &DeleteClusterOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// GetCluster gets the resource representation for a cluster in a project. +func (c *ClusterControllerClient) GetCluster(ctx context.Context, req *dataprocpb.GetClusterRequest, opts ...gax.CallOption) (*dataprocpb.Cluster, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...) + var resp *dataprocpb.Cluster + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterControllerClient.GetCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListClusters lists all regions/{region}/clusters in a project. +func (c *ClusterControllerClient) ListClusters(ctx context.Context, req *dataprocpb.ListClustersRequest, opts ...gax.CallOption) *ClusterIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...) + it := &ClusterIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dataprocpb.Cluster, string, error) { + var resp *dataprocpb.ListClustersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterControllerClient.ListClusters(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Clusters, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DiagnoseCluster gets cluster diagnostic information. +// After the operation completes, the Operation.response field +// contains DiagnoseClusterOutputLocation. +func (c *ClusterControllerClient) DiagnoseCluster(ctx context.Context, req *dataprocpb.DiagnoseClusterRequest, opts ...gax.CallOption) (*DiagnoseClusterOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DiagnoseCluster[0:len(c.CallOptions.DiagnoseCluster):len(c.CallOptions.DiagnoseCluster)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterControllerClient.DiagnoseCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &DiagnoseClusterOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// ClusterIterator manages a stream of *dataprocpb.Cluster. +type ClusterIterator struct { + items []*dataprocpb.Cluster + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dataprocpb.Cluster, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ClusterIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ClusterIterator) Next() (*dataprocpb.Cluster, error) { + var item *dataprocpb.Cluster + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ClusterIterator) bufLen() int { + return len(it.items) +} + +func (it *ClusterIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// CreateClusterOperation manages a long-running operation from CreateCluster. +type CreateClusterOperation struct { + lro *longrunning.Operation +} + +// CreateClusterOperation returns a new CreateClusterOperation from a given name. +// The name must be that of a previously created CreateClusterOperation, possibly from a different process. +func (c *ClusterControllerClient) CreateClusterOperation(name string) *CreateClusterOperation { + return &CreateClusterOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CreateClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) { + var resp dataprocpb.Cluster + if err := op.lro.WaitWithInterval(ctx, &resp, 10000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CreateClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) { + var resp dataprocpb.Cluster + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CreateClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) { + var meta dataprocpb.ClusterOperationMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CreateClusterOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CreateClusterOperation) Name() string { + return op.lro.Name() +} + +// Delete deletes a long-running operation. +// This method indicates that the client is no longer interested in the operation result. +// It does not cancel the operation. +func (op *CreateClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.Delete(ctx, opts...) +} + +// DeleteClusterOperation manages a long-running operation from DeleteCluster. +type DeleteClusterOperation struct { + lro *longrunning.Operation +} + +// DeleteClusterOperation returns a new DeleteClusterOperation from a given name. +// The name must be that of a previously created DeleteClusterOperation, possibly from a different process. +func (c *ClusterControllerClient) DeleteClusterOperation(name string) *DeleteClusterOperation { + return &DeleteClusterOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning any error encountered. +// +// See documentation of Poll for error-handling information. +func (op *DeleteClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.WaitWithInterval(ctx, nil, 10000*time.Millisecond, opts...) +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, op.Done will return true. +func (op *DeleteClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.Poll(ctx, nil, opts...) +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *DeleteClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) { + var meta dataprocpb.ClusterOperationMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *DeleteClusterOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *DeleteClusterOperation) Name() string { + return op.lro.Name() +} + +// Delete deletes a long-running operation. +// This method indicates that the client is no longer interested in the operation result. +// It does not cancel the operation. +func (op *DeleteClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.Delete(ctx, opts...) +} + +// DiagnoseClusterOperation manages a long-running operation from DiagnoseCluster. +type DiagnoseClusterOperation struct { + lro *longrunning.Operation +} + +// DiagnoseClusterOperation returns a new DiagnoseClusterOperation from a given name. +// The name must be that of a previously created DiagnoseClusterOperation, possibly from a different process. +func (c *ClusterControllerClient) DiagnoseClusterOperation(name string) *DiagnoseClusterOperation { + return &DiagnoseClusterOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning any error encountered. +// +// See documentation of Poll for error-handling information. +func (op *DiagnoseClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.WaitWithInterval(ctx, nil, 10000*time.Millisecond, opts...) +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, op.Done will return true. +func (op *DiagnoseClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.Poll(ctx, nil, opts...) +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *DiagnoseClusterOperation) Metadata() (*dataprocpb.DiagnoseClusterResults, error) { + var meta dataprocpb.DiagnoseClusterResults + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *DiagnoseClusterOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *DiagnoseClusterOperation) Name() string { + return op.lro.Name() +} + +// Delete deletes a long-running operation. +// This method indicates that the client is no longer interested in the operation result. +// It does not cancel the operation. +func (op *DiagnoseClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.Delete(ctx, opts...) +} + +// UpdateClusterOperation manages a long-running operation from UpdateCluster. +type UpdateClusterOperation struct { + lro *longrunning.Operation +} + +// UpdateClusterOperation returns a new UpdateClusterOperation from a given name. +// The name must be that of a previously created UpdateClusterOperation, possibly from a different process. +func (c *ClusterControllerClient) UpdateClusterOperation(name string) *UpdateClusterOperation { + return &UpdateClusterOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *UpdateClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) { + var resp dataprocpb.Cluster + if err := op.lro.WaitWithInterval(ctx, &resp, 10000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *UpdateClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) { + var resp dataprocpb.Cluster + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *UpdateClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) { + var meta dataprocpb.ClusterOperationMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *UpdateClusterOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *UpdateClusterOperation) Name() string { + return op.lro.Name() +} + +// Delete deletes a long-running operation. +// This method indicates that the client is no longer interested in the operation result. +// It does not cancel the operation. +func (op *UpdateClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.Delete(ctx, opts...) +} diff --git a/vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client_example_test.go b/vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client_example_test.go new file mode 100644 index 0000000..7a99093 --- /dev/null +++ b/vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client_example_test.go @@ -0,0 +1,160 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dataproc_test + +import ( + "cloud.google.com/go/dataproc/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1" +) + +func ExampleNewClusterControllerClient() { + ctx := context.Background() + c, err := dataproc.NewClusterControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClusterControllerClient_CreateCluster() { + ctx := context.Background() + c, err := dataproc.NewClusterControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.CreateClusterRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterControllerClient_UpdateCluster() { + ctx := context.Background() + c, err := dataproc.NewClusterControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.UpdateClusterRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterControllerClient_DeleteCluster() { + ctx := context.Background() + c, err := dataproc.NewClusterControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.DeleteClusterRequest{ + // TODO: Fill request struct fields. + } + op, err := c.DeleteCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + + err = op.Wait(ctx) + // TODO: Handle error. +} + +func ExampleClusterControllerClient_GetCluster() { + ctx := context.Background() + c, err := dataproc.NewClusterControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.GetClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterControllerClient_ListClusters() { + ctx := context.Background() + c, err := dataproc.NewClusterControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.ListClustersRequest{ + // TODO: Fill request struct fields. + } + it := c.ListClusters(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClusterControllerClient_DiagnoseCluster() { + ctx := context.Background() + c, err := dataproc.NewClusterControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.DiagnoseClusterRequest{ + // TODO: Fill request struct fields. + } + op, err := c.DiagnoseCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + + err = op.Wait(ctx) + // TODO: Handle error. +} diff --git a/vendor/cloud.google.com/go/dataproc/apiv1/doc.go b/vendor/cloud.google.com/go/dataproc/apiv1/doc.go new file mode 100644 index 0000000..80c61ee --- /dev/null +++ b/vendor/cloud.google.com/go/dataproc/apiv1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package dataproc is an auto-generated package for the +// Google Cloud Dataproc API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Manages Hadoop-based clusters and jobs on Google Cloud Platform. +package dataproc // import "cloud.google.com/go/dataproc/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client.go b/vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client.go new file mode 100644 index 0000000..0022840 --- /dev/null +++ b/vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client.go @@ -0,0 +1,285 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dataproc + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// JobControllerCallOptions contains the retry settings for each method of JobControllerClient. +type JobControllerCallOptions struct { + SubmitJob []gax.CallOption + GetJob []gax.CallOption + ListJobs []gax.CallOption + UpdateJob []gax.CallOption + CancelJob []gax.CallOption + DeleteJob []gax.CallOption +} + +func defaultJobControllerClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("dataproc.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultJobControllerCallOptions() *JobControllerCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &JobControllerCallOptions{ + SubmitJob: retry[[2]string{"default", "non_idempotent"}], + GetJob: retry[[2]string{"default", "idempotent"}], + ListJobs: retry[[2]string{"default", "idempotent"}], + UpdateJob: retry[[2]string{"default", "non_idempotent"}], + CancelJob: retry[[2]string{"default", "non_idempotent"}], + DeleteJob: retry[[2]string{"default", "idempotent"}], + } +} + +// JobControllerClient is a client for interacting with Google Cloud Dataproc API. +type JobControllerClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + jobControllerClient dataprocpb.JobControllerClient + + // The call options for this service. + CallOptions *JobControllerCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewJobControllerClient creates a new job controller client. +// +// The JobController provides methods to manage jobs. +func NewJobControllerClient(ctx context.Context, opts ...option.ClientOption) (*JobControllerClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultJobControllerClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &JobControllerClient{ + conn: conn, + CallOptions: defaultJobControllerCallOptions(), + + jobControllerClient: dataprocpb.NewJobControllerClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *JobControllerClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *JobControllerClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *JobControllerClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// SubmitJob submits a job to a cluster. +func (c *JobControllerClient) SubmitJob(ctx context.Context, req *dataprocpb.SubmitJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SubmitJob[0:len(c.CallOptions.SubmitJob):len(c.CallOptions.SubmitJob)], opts...) + var resp *dataprocpb.Job + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.jobControllerClient.SubmitJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetJob gets the resource representation for a job in a project. +func (c *JobControllerClient) GetJob(ctx context.Context, req *dataprocpb.GetJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetJob[0:len(c.CallOptions.GetJob):len(c.CallOptions.GetJob)], opts...) + var resp *dataprocpb.Job + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.jobControllerClient.GetJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListJobs lists regions/{region}/jobs in a project. +func (c *JobControllerClient) ListJobs(ctx context.Context, req *dataprocpb.ListJobsRequest, opts ...gax.CallOption) *JobIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListJobs[0:len(c.CallOptions.ListJobs):len(c.CallOptions.ListJobs)], opts...) + it := &JobIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dataprocpb.Job, string, error) { + var resp *dataprocpb.ListJobsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.jobControllerClient.ListJobs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Jobs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// UpdateJob updates a job in a project. +func (c *JobControllerClient) UpdateJob(ctx context.Context, req *dataprocpb.UpdateJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateJob[0:len(c.CallOptions.UpdateJob):len(c.CallOptions.UpdateJob)], opts...) + var resp *dataprocpb.Job + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.jobControllerClient.UpdateJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CancelJob starts a job cancellation request. To access the job resource +// after cancellation, call +// regions/{region}/jobs.list (at /dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or +// regions/{region}/jobs.get (at /dataproc/docs/reference/rest/v1/projects.regions.jobs/get). +func (c *JobControllerClient) CancelJob(ctx context.Context, req *dataprocpb.CancelJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelJob[0:len(c.CallOptions.CancelJob):len(c.CallOptions.CancelJob)], opts...) + var resp *dataprocpb.Job + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.jobControllerClient.CancelJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteJob deletes the job from the project. If the job is active, the delete fails, +// and the response returns FAILED_PRECONDITION. +func (c *JobControllerClient) DeleteJob(ctx context.Context, req *dataprocpb.DeleteJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteJob[0:len(c.CallOptions.DeleteJob):len(c.CallOptions.DeleteJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.jobControllerClient.DeleteJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// JobIterator manages a stream of *dataprocpb.Job. +type JobIterator struct { + items []*dataprocpb.Job + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dataprocpb.Job, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *JobIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *JobIterator) Next() (*dataprocpb.Job, error) { + var item *dataprocpb.Job + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *JobIterator) bufLen() int { + return len(it.items) +} + +func (it *JobIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client_example_test.go b/vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client_example_test.go new file mode 100644 index 0000000..50c1f26 --- /dev/null +++ b/vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client_example_test.go @@ -0,0 +1,146 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dataproc_test + +import ( + "cloud.google.com/go/dataproc/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1" +) + +func ExampleNewJobControllerClient() { + ctx := context.Background() + c, err := dataproc.NewJobControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleJobControllerClient_SubmitJob() { + ctx := context.Background() + c, err := dataproc.NewJobControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.SubmitJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SubmitJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleJobControllerClient_GetJob() { + ctx := context.Background() + c, err := dataproc.NewJobControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.GetJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleJobControllerClient_ListJobs() { + ctx := context.Background() + c, err := dataproc.NewJobControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.ListJobsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListJobs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleJobControllerClient_UpdateJob() { + ctx := context.Background() + c, err := dataproc.NewJobControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.UpdateJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleJobControllerClient_CancelJob() { + ctx := context.Background() + c, err := dataproc.NewJobControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.CancelJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CancelJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleJobControllerClient_DeleteJob() { + ctx := context.Background() + c, err := dataproc.NewJobControllerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dataprocpb.DeleteJobRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/dataproc/apiv1/mock_test.go b/vendor/cloud.google.com/go/dataproc/apiv1/mock_test.go new file mode 100644 index 0000000..1f4669f --- /dev/null +++ b/vendor/cloud.google.com/go/dataproc/apiv1/mock_test.go @@ -0,0 +1,1196 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dataproc + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockClusterControllerServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + dataprocpb.ClusterControllerServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockClusterControllerServer) CreateCluster(ctx context.Context, req *dataprocpb.CreateClusterRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockClusterControllerServer) UpdateCluster(ctx context.Context, req *dataprocpb.UpdateClusterRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockClusterControllerServer) DeleteCluster(ctx context.Context, req *dataprocpb.DeleteClusterRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockClusterControllerServer) GetCluster(ctx context.Context, req *dataprocpb.GetClusterRequest) (*dataprocpb.Cluster, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dataprocpb.Cluster), nil +} + +func (s *mockClusterControllerServer) ListClusters(ctx context.Context, req *dataprocpb.ListClustersRequest) (*dataprocpb.ListClustersResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dataprocpb.ListClustersResponse), nil +} + +func (s *mockClusterControllerServer) DiagnoseCluster(ctx context.Context, req *dataprocpb.DiagnoseClusterRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +type mockJobControllerServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + dataprocpb.JobControllerServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockJobControllerServer) SubmitJob(ctx context.Context, req *dataprocpb.SubmitJobRequest) (*dataprocpb.Job, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dataprocpb.Job), nil +} + +func (s *mockJobControllerServer) GetJob(ctx context.Context, req *dataprocpb.GetJobRequest) (*dataprocpb.Job, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dataprocpb.Job), nil +} + +func (s *mockJobControllerServer) ListJobs(ctx context.Context, req *dataprocpb.ListJobsRequest) (*dataprocpb.ListJobsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dataprocpb.ListJobsResponse), nil +} + +func (s *mockJobControllerServer) UpdateJob(ctx context.Context, req *dataprocpb.UpdateJobRequest) (*dataprocpb.Job, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dataprocpb.Job), nil +} + +func (s *mockJobControllerServer) CancelJob(ctx context.Context, req *dataprocpb.CancelJobRequest) (*dataprocpb.Job, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dataprocpb.Job), nil +} + +func (s *mockJobControllerServer) DeleteJob(ctx context.Context, req *dataprocpb.DeleteJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockClusterController mockClusterControllerServer + mockJobController mockJobControllerServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + dataprocpb.RegisterClusterControllerServer(serv, &mockClusterController) + dataprocpb.RegisterJobControllerServer(serv, &mockJobController) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestClusterControllerCreateCluster(t *testing.T) { + var projectId2 string = "projectId2939242356" + var clusterName string = "clusterName-1018081872" + var clusterUuid string = "clusterUuid-1017854240" + var expectedResponse = &dataprocpb.Cluster{ + ProjectId: projectId2, + ClusterName: clusterName, + ClusterUuid: clusterUuid, + } + + mockClusterController.err = nil + mockClusterController.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockClusterController.resps = append(mockClusterController.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var cluster *dataprocpb.Cluster = &dataprocpb.Cluster{} + var request = &dataprocpb.CreateClusterRequest{ + ProjectId: projectId, + Region: region, + Cluster: cluster, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateCluster(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterControllerCreateClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterController.err = nil + mockClusterController.resps = append(mockClusterController.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var cluster *dataprocpb.Cluster = &dataprocpb.Cluster{} + var request = &dataprocpb.CreateClusterRequest{ + ProjectId: projectId, + Region: region, + Cluster: cluster, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateCluster(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterControllerUpdateCluster(t *testing.T) { + var projectId2 string = "projectId2939242356" + var clusterName2 string = "clusterName2875867491" + var clusterUuid string = "clusterUuid-1017854240" + var expectedResponse = &dataprocpb.Cluster{ + ProjectId: projectId2, + ClusterName: clusterName2, + ClusterUuid: clusterUuid, + } + + mockClusterController.err = nil + mockClusterController.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockClusterController.resps = append(mockClusterController.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var clusterName string = "clusterName-1018081872" + var cluster *dataprocpb.Cluster = &dataprocpb.Cluster{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &dataprocpb.UpdateClusterRequest{ + ProjectId: projectId, + Region: region, + ClusterName: clusterName, + Cluster: cluster, + UpdateMask: updateMask, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateCluster(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterControllerUpdateClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterController.err = nil + mockClusterController.resps = append(mockClusterController.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var clusterName string = "clusterName-1018081872" + var cluster *dataprocpb.Cluster = &dataprocpb.Cluster{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &dataprocpb.UpdateClusterRequest{ + ProjectId: projectId, + Region: region, + ClusterName: clusterName, + Cluster: cluster, + UpdateMask: updateMask, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateCluster(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterControllerDeleteCluster(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockClusterController.err = nil + mockClusterController.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockClusterController.resps = append(mockClusterController.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var clusterName string = "clusterName-1018081872" + var request = &dataprocpb.DeleteClusterRequest{ + ProjectId: projectId, + Region: region, + ClusterName: clusterName, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.DeleteCluster(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestClusterControllerDeleteClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterController.err = nil + mockClusterController.resps = append(mockClusterController.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var clusterName string = "clusterName-1018081872" + var request = &dataprocpb.DeleteClusterRequest{ + ProjectId: projectId, + Region: region, + ClusterName: clusterName, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.DeleteCluster(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestClusterControllerGetCluster(t *testing.T) { + var projectId2 string = "projectId2939242356" + var clusterName2 string = "clusterName2875867491" + var clusterUuid string = "clusterUuid-1017854240" + var expectedResponse = &dataprocpb.Cluster{ + ProjectId: projectId2, + ClusterName: clusterName2, + ClusterUuid: clusterUuid, + } + + mockClusterController.err = nil + mockClusterController.reqs = nil + + mockClusterController.resps = append(mockClusterController.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var clusterName string = "clusterName-1018081872" + var request = &dataprocpb.GetClusterRequest{ + ProjectId: projectId, + Region: region, + ClusterName: clusterName, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterControllerGetClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterController.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var clusterName string = "clusterName-1018081872" + var request = &dataprocpb.GetClusterRequest{ + ProjectId: projectId, + Region: region, + ClusterName: clusterName, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterControllerListClusters(t *testing.T) { + var nextPageToken string = "" + var clustersElement *dataprocpb.Cluster = &dataprocpb.Cluster{} + var clusters = []*dataprocpb.Cluster{clustersElement} + var expectedResponse = &dataprocpb.ListClustersResponse{ + NextPageToken: nextPageToken, + Clusters: clusters, + } + + mockClusterController.err = nil + mockClusterController.reqs = nil + + mockClusterController.resps = append(mockClusterController.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var request = &dataprocpb.ListClustersRequest{ + ProjectId: projectId, + Region: region, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListClusters(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Clusters[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterControllerListClustersError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterController.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var request = &dataprocpb.ListClustersRequest{ + ProjectId: projectId, + Region: region, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListClusters(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterControllerDiagnoseCluster(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockClusterController.err = nil + mockClusterController.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockClusterController.resps = append(mockClusterController.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var clusterName string = "clusterName-1018081872" + var request = &dataprocpb.DiagnoseClusterRequest{ + ProjectId: projectId, + Region: region, + ClusterName: clusterName, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.DiagnoseCluster(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestClusterControllerDiagnoseClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterController.err = nil + mockClusterController.resps = append(mockClusterController.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var clusterName string = "clusterName-1018081872" + var request = &dataprocpb.DiagnoseClusterRequest{ + ProjectId: projectId, + Region: region, + ClusterName: clusterName, + } + + c, err := NewClusterControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.DiagnoseCluster(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestJobControllerSubmitJob(t *testing.T) { + var driverOutputResourceUri string = "driverOutputResourceUri-542229086" + var driverControlFilesUri string = "driverControlFilesUri207057643" + var expectedResponse = &dataprocpb.Job{ + DriverOutputResourceUri: driverOutputResourceUri, + DriverControlFilesUri: driverControlFilesUri, + } + + mockJobController.err = nil + mockJobController.reqs = nil + + mockJobController.resps = append(mockJobController.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var job *dataprocpb.Job = &dataprocpb.Job{} + var request = &dataprocpb.SubmitJobRequest{ + ProjectId: projectId, + Region: region, + Job: job, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SubmitJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockJobController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestJobControllerSubmitJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockJobController.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var job *dataprocpb.Job = &dataprocpb.Job{} + var request = &dataprocpb.SubmitJobRequest{ + ProjectId: projectId, + Region: region, + Job: job, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SubmitJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestJobControllerGetJob(t *testing.T) { + var driverOutputResourceUri string = "driverOutputResourceUri-542229086" + var driverControlFilesUri string = "driverControlFilesUri207057643" + var expectedResponse = &dataprocpb.Job{ + DriverOutputResourceUri: driverOutputResourceUri, + DriverControlFilesUri: driverControlFilesUri, + } + + mockJobController.err = nil + mockJobController.reqs = nil + + mockJobController.resps = append(mockJobController.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var jobId string = "jobId-1154752291" + var request = &dataprocpb.GetJobRequest{ + ProjectId: projectId, + Region: region, + JobId: jobId, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockJobController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestJobControllerGetJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockJobController.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var jobId string = "jobId-1154752291" + var request = &dataprocpb.GetJobRequest{ + ProjectId: projectId, + Region: region, + JobId: jobId, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestJobControllerListJobs(t *testing.T) { + var nextPageToken string = "" + var jobsElement *dataprocpb.Job = &dataprocpb.Job{} + var jobs = []*dataprocpb.Job{jobsElement} + var expectedResponse = &dataprocpb.ListJobsResponse{ + NextPageToken: nextPageToken, + Jobs: jobs, + } + + mockJobController.err = nil + mockJobController.reqs = nil + + mockJobController.resps = append(mockJobController.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var request = &dataprocpb.ListJobsRequest{ + ProjectId: projectId, + Region: region, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListJobs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockJobController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Jobs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestJobControllerListJobsError(t *testing.T) { + errCode := codes.PermissionDenied + mockJobController.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var request = &dataprocpb.ListJobsRequest{ + ProjectId: projectId, + Region: region, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListJobs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestJobControllerUpdateJob(t *testing.T) { + var driverOutputResourceUri string = "driverOutputResourceUri-542229086" + var driverControlFilesUri string = "driverControlFilesUri207057643" + var expectedResponse = &dataprocpb.Job{ + DriverOutputResourceUri: driverOutputResourceUri, + DriverControlFilesUri: driverControlFilesUri, + } + + mockJobController.err = nil + mockJobController.reqs = nil + + mockJobController.resps = append(mockJobController.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var jobId string = "jobId-1154752291" + var job *dataprocpb.Job = &dataprocpb.Job{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &dataprocpb.UpdateJobRequest{ + ProjectId: projectId, + Region: region, + JobId: jobId, + Job: job, + UpdateMask: updateMask, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockJobController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestJobControllerUpdateJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockJobController.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var jobId string = "jobId-1154752291" + var job *dataprocpb.Job = &dataprocpb.Job{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &dataprocpb.UpdateJobRequest{ + ProjectId: projectId, + Region: region, + JobId: jobId, + Job: job, + UpdateMask: updateMask, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestJobControllerCancelJob(t *testing.T) { + var driverOutputResourceUri string = "driverOutputResourceUri-542229086" + var driverControlFilesUri string = "driverControlFilesUri207057643" + var expectedResponse = &dataprocpb.Job{ + DriverOutputResourceUri: driverOutputResourceUri, + DriverControlFilesUri: driverControlFilesUri, + } + + mockJobController.err = nil + mockJobController.reqs = nil + + mockJobController.resps = append(mockJobController.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var jobId string = "jobId-1154752291" + var request = &dataprocpb.CancelJobRequest{ + ProjectId: projectId, + Region: region, + JobId: jobId, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CancelJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockJobController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestJobControllerCancelJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockJobController.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var jobId string = "jobId-1154752291" + var request = &dataprocpb.CancelJobRequest{ + ProjectId: projectId, + Region: region, + JobId: jobId, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CancelJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestJobControllerDeleteJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockJobController.err = nil + mockJobController.reqs = nil + + mockJobController.resps = append(mockJobController.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var jobId string = "jobId-1154752291" + var request = &dataprocpb.DeleteJobRequest{ + ProjectId: projectId, + Region: region, + JobId: jobId, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockJobController.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestJobControllerDeleteJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockJobController.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var region string = "region-934795532" + var jobId string = "jobId-1154752291" + var request = &dataprocpb.DeleteJobRequest{ + ProjectId: projectId, + Region: region, + JobId: jobId, + } + + c, err := NewJobControllerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/datastore/client.go b/vendor/cloud.google.com/go/datastore/client.go new file mode 100644 index 0000000..940bfec --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/client.go @@ -0,0 +1,118 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + + gax "github.com/googleapis/gax-go" + + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/version" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC +// metadata to be sent in each request for server-side traffic management. +type datastoreClient struct { + // Embed so we still implement the DatastoreClient interface, + // if the interface adds more methods. + pb.DatastoreClient + + c pb.DatastoreClient + md metadata.MD +} + +func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient { + return &datastoreClient{ + c: pb.NewDatastoreClient(conn), + md: metadata.Pairs( + resourcePrefixHeader, "projects/"+projectID, + "x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)), + } +} + +func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (res *pb.LookupResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.Lookup(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (res *pb.RunQueryResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.RunQuery(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (res *pb.BeginTransactionResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.BeginTransaction(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (res *pb.CommitResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.Commit(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (res *pb.RollbackResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.Rollback(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (res *pb.AllocateIdsResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.AllocateIds(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) invoke(ctx context.Context, f func(ctx context.Context) error) error { + ctx = metadata.NewOutgoingContext(ctx, dc.md) + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = f(ctx) + return !shouldRetry(err), err + }) +} + +func shouldRetry(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + // See https://cloud.google.com/datastore/docs/concepts/errors. + return s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded +} diff --git a/vendor/cloud.google.com/go/datastore/datastore.go b/vendor/cloud.google.com/go/datastore/datastore.go new file mode 100644 index 0000000..1a80dc4 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/datastore.go @@ -0,0 +1,627 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "log" + "os" + "reflect" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +const ( + prodAddr = "datastore.googleapis.com:443" + userAgent = "gcloud-golang-datastore/20160401" +) + +// ScopeDatastore grants permissions to view and/or manage datastore entities +const ScopeDatastore = "https://www.googleapis.com/auth/datastore" + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" + +// Client is a client for reading and writing data in a datastore dataset. +type Client struct { + conn *grpc.ClientConn + client pb.DatastoreClient + endpoint string + dataset string // Called dataset by the datastore API, synonym for project ID. +} + +// NewClient creates a new Client for a given dataset. +// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable. +// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value +// to connect to a locally-running datastore emulator. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + var o []option.ClientOption + // Environment variables for gcd emulator: + // https://cloud.google.com/datastore/docs/tools/datastore-emulator + // If the emulator is available, dial it directly (and don't pass any credentials). + if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(ScopeDatastore), + option.WithUserAgent(userAgent), + } + } + // Warn if we see the legacy emulator environment variables. + if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" { + log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.") + } + if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" { + log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.") + } + if projectID == "" { + projectID = os.Getenv("DATASTORE_PROJECT_ID") + } + if projectID == "" { + return nil, errors.New("datastore: missing project/dataset id") + } + o = append(o, opts...) + conn, err := gtransport.Dial(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + conn: conn, + client: newDatastoreClient(conn, projectID), + dataset: projectID, + }, nil + +} + +var ( + // ErrInvalidEntityType is returned when functions like Get or Next are + // passed a dst or src argument of invalid type. + ErrInvalidEntityType = errors.New("datastore: invalid entity type") + // ErrInvalidKey is returned when an invalid key is presented. + ErrInvalidKey = errors.New("datastore: invalid key") + // ErrNoSuchEntity is returned when no entity was found for a given key. + ErrNoSuchEntity = errors.New("datastore: no such entity") +) + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypePropertyLoadSaver + multiArgTypeStruct + multiArgTypeStructPtr + multiArgTypeInterface +) + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument +// passed to Get or to Iterator.Next. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +func keyToProto(k *Key) *pb.Key { + if k == nil { + return nil + } + + var path []*pb.Key_PathElement + for { + el := &pb.Key_PathElement{Kind: k.Kind} + if k.ID != 0 { + el.IdType = &pb.Key_PathElement_Id{Id: k.ID} + } else if k.Name != "" { + el.IdType = &pb.Key_PathElement_Name{Name: k.Name} + } + path = append(path, el) + if k.Parent == nil { + break + } + k = k.Parent + } + + // The path should be in order [grandparent, parent, child] + // We did it backward above, so reverse back. + for i := 0; i < len(path)/2; i++ { + path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i] + } + + key := &pb.Key{Path: path} + if k.Namespace != "" { + key.PartitionId = &pb.PartitionId{ + NamespaceId: k.Namespace, + } + } + return key +} + +// protoToKey decodes a protocol buffer representation of a key into an +// equivalent *Key object. If the key is invalid, protoToKey will return the +// invalid key along with ErrInvalidKey. +func protoToKey(p *pb.Key) (*Key, error) { + var key *Key + var namespace string + if partition := p.PartitionId; partition != nil { + namespace = partition.NamespaceId + } + for _, el := range p.Path { + key = &Key{ + Namespace: namespace, + Kind: el.Kind, + ID: el.GetId(), + Name: el.GetName(), + Parent: key, + } + } + if !key.valid() { // Also detects key == nil. + return key, ErrInvalidKey + } + return key, nil +} + +// multiKeyToProto is a batch version of keyToProto. +func multiKeyToProto(keys []*Key) []*pb.Key { + ret := make([]*pb.Key, len(keys)) + for i, k := range keys { + ret[i] = keyToProto(k) + } + return ret +} + +// multiKeyToProto is a batch version of keyToProto. +func multiProtoToKey(keys []*pb.Key) ([]*Key, error) { + hasErr := false + ret := make([]*Key, len(keys)) + err := make(MultiError, len(keys)) + for i, k := range keys { + ret[i], err[i] = protoToKey(k) + if err[i] != nil { + hasErr = true + } + } + if hasErr { + return nil, err + } + return ret, nil +} + +// multiValid is a batch version of Key.valid. It returns an error, not a +// []bool. +func multiValid(key []*Key) error { + invalid := false + for _, k := range key { + if !k.valid() { + invalid = true + break + } + } + if !invalid { + return nil + } + err := make(MultiError, len(key)) + for i, k := range key { + if !k.valid() { + err[i] = ErrInvalidKey + } + } + return err +} + +// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct +// type S, for some interface type I, or some non-interface non-pointer type P +// such that P or *P implements PropertyLoadSaver. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S, I or P. +// +// As a special case, PropertyList is an invalid type for v. +// +// TODO(djd): multiArg is very confusing. Fold this logic into the +// relevant Put/Get methods to make the logic less opaque. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + if v.Type() == typeOfPropertyList { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { + return multiArgTypePropertyLoadSaver, elemType + } + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Interface: + return multiArgTypeInterface, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +// Close closes the Client. +func (c *Client) Close() error { + return c.conn.Close() +} + +// Get loads the entity stored for key into dst, which must be a struct pointer +// or implement PropertyLoadSaver. If there is no such entity for the key, Get +// returns ErrNoSuchEntity. +// +// The values of dst's unmatched struct fields are not modified, and matching +// slice-typed fields are not reset before appending to them. In particular, it +// is recommended to pass a pointer to a zero valued struct on each Get call. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. +func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Get") + defer func() { trace.EndSpan(ctx, err) }() + + if dst == nil { // get catches nil interfaces; we need to catch nil ptr here + return ErrInvalidEntityType + } + err = c.get(ctx, []*Key{key}, []interface{}{dst}, nil) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +// +// dst must be a []S, []*S, []I or []P, for some struct type S, some interface +// type I, or some non-interface non-pointer type P such that P or *P +// implements PropertyLoadSaver. If an []I, each element must be a valid dst +// for Get: it must be a struct pointer or implement PropertyLoadSaver. +// +// As a special case, PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when []PropertyList was intended. +func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.GetMulti") + defer func() { trace.EndSpan(ctx, err) }() + + return c.get(ctx, keys, dst, nil) +} + +func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { + v := reflect.ValueOf(dst) + multiArgType, _ := checkMultiArg(v) + + // Sanity checks + if multiArgType == multiArgTypeInvalid { + return errors.New("datastore: dst has invalid type") + } + if len(keys) != v.Len() { + return errors.New("datastore: keys and dst slices have different length") + } + if len(keys) == 0 { + return nil + } + + // Go through keys, validate them, serialize then, and create a dict mapping them to their indices. + // Equal keys are deduped. + multiErr, any := make(MultiError, len(keys)), false + keyMap := make(map[string][]int, len(keys)) + pbKeys := make([]*pb.Key, 0, len(keys)) + for i, k := range keys { + if !k.valid() { + multiErr[i] = ErrInvalidKey + any = true + } else { + ks := k.String() + if _, ok := keyMap[ks]; !ok { + pbKeys = append(pbKeys, keyToProto(k)) + } + keyMap[ks] = append(keyMap[ks], i) + } + } + if any { + return multiErr + } + req := &pb.LookupRequest{ + ProjectId: c.dataset, + Keys: pbKeys, + ReadOptions: opts, + } + resp, err := c.client.Lookup(ctx, req) + if err != nil { + return err + } + found := resp.Found + missing := resp.Missing + // Upper bound 100 iterations to prevent infinite loop. + // We choose 100 iterations somewhat logically: + // Max number of Entities you can request from Datastore is 1,000. + // Max size for a Datastore Entity is 1 MiB. + // Max request size is 10 MiB, so we assume max response size is also 10 MiB. + // 1,000 / 10 = 100. + // Note that if ctx has a deadline, the deadline will probably + // be hit before we reach 100 iterations. + for i := 0; len(resp.Deferred) > 0 && i < 100; i++ { + req.Keys = resp.Deferred + resp, err = c.client.Lookup(ctx, req) + if err != nil { + return err + } + found = append(found, resp.Found...) + missing = append(missing, resp.Missing...) + } + + filled := 0 + for _, e := range found { + k, err := protoToKey(e.Entity.Key) + if err != nil { + return errors.New("datastore: internal error: server returned an invalid key") + } + filled += len(keyMap[k.String()]) + for _, index := range keyMap[k.String()] { + elem := v.Index(index) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + if multiArgType == multiArgTypeStructPtr && elem.IsNil() { + elem.Set(reflect.New(elem.Type().Elem())) + } + if err := loadEntityProto(elem.Interface(), e.Entity); err != nil { + multiErr[index] = err + any = true + } + } + } + for _, e := range missing { + k, err := protoToKey(e.Entity.Key) + if err != nil { + return errors.New("datastore: internal error: server returned an invalid key") + } + filled += len(keyMap[k.String()]) + for _, index := range keyMap[k.String()] { + multiErr[index] = ErrNoSuchEntity + } + any = true + } + + if filled != len(keys) { + return errors.New("datastore: internal error: server returned the wrong number of entities") + } + + if any { + return multiErr + } + return nil +} + +// Put saves the entity src into the datastore with key k. src must be a struct +// pointer or implement PropertyLoadSaver; if a struct pointer then any +// unexported fields of that struct will be skipped. If k is an incomplete key, +// the returned key will be a unique key generated by the datastore. +func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { + k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return k[0], nil +} + +// PutMulti is a batch version of Put. +// +// src must satisfy the same conditions as the dst argument to GetMulti. +// TODO(jba): rewrite in terms of Mutate. +func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) (ret []*Key, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.PutMulti") + defer func() { trace.EndSpan(ctx, err) }() + + mutations, err := putMutations(keys, src) + if err != nil { + return nil, err + } + + // Make the request. + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: mutations, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + resp, err := c.client.Commit(ctx, req) + if err != nil { + return nil, err + } + + // Copy any newly minted keys into the returned keys. + ret = make([]*Key, len(keys)) + for i, key := range keys { + if key.Incomplete() { + // This key is in the mutation results. + ret[i], err = protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + } else { + ret[i] = key + } + } + return ret, nil +} + +func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) { + v := reflect.ValueOf(src) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return nil, errors.New("datastore: src has invalid type") + } + if len(keys) != v.Len() { + return nil, errors.New("datastore: key and src slices have different length") + } + if len(keys) == 0 { + return nil, nil + } + if err := multiValid(keys); err != nil { + return nil, err + } + mutations := make([]*pb.Mutation, 0, len(keys)) + multiErr := make(MultiError, len(keys)) + hasErr := false + for i, k := range keys { + elem := v.Index(i) + // Two cases where we need to take the address: + // 1) multiArgTypePropertyLoadSaver => &elem implements PLS + // 2) multiArgTypeStruct => saveEntity needs *struct + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + p, err := saveEntity(k, elem.Interface()) + if err != nil { + multiErr[i] = err + hasErr = true + } + var mut *pb.Mutation + if k.Incomplete() { + mut = &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}} + } else { + mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}} + } + mutations = append(mutations, mut) + } + if hasErr { + return nil, multiErr + } + return mutations, nil +} + +// Delete deletes the entity for the given key. +func (c *Client) Delete(ctx context.Context, key *Key) error { + err := c.DeleteMulti(ctx, []*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +// TODO(jba): rewrite in terms of Mutate. +func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.DeleteMulti") + defer func() { trace.EndSpan(ctx, err) }() + + mutations, err := deleteMutations(keys) + if err != nil { + return err + } + + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: mutations, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + _, err = c.client.Commit(ctx, req) + return err +} + +func deleteMutations(keys []*Key) ([]*pb.Mutation, error) { + mutations := make([]*pb.Mutation, 0, len(keys)) + set := make(map[string]bool, len(keys)) + for _, k := range keys { + if k.Incomplete() { + return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) + } + ks := k.String() + if !set[ks] { + mutations = append(mutations, &pb.Mutation{ + Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}, + }) + } + set[ks] = true + } + return mutations, nil +} + +// Mutate applies one or more mutations atomically. +// It returns the keys of the argument Mutations, in the same order. +// +// If any of the mutations are invalid, Mutate returns a MultiError with the errors. +// Mutate returns a MultiError in this case even if there is only one Mutation. +func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) (ret []*Key, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Mutate") + defer func() { trace.EndSpan(ctx, err) }() + + pmuts, err := mutationProtos(muts) + if err != nil { + return nil, err + } + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: pmuts, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + resp, err := c.client.Commit(ctx, req) + if err != nil { + return nil, err + } + // Copy any newly minted keys into the returned keys. + ret = make([]*Key, len(muts)) + for i, mut := range muts { + if mut.key.Incomplete() { + // This key is in the mutation results. + ret[i], err = protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + } else { + ret[i] = mut.key + } + } + return ret, nil +} diff --git a/vendor/cloud.google.com/go/datastore/datastore_test.go b/vendor/cloud.google.com/go/datastore/datastore_test.go new file mode 100644 index 0000000..7b184fd --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/datastore_test.go @@ -0,0 +1,3493 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +type ( + myBlob []byte + myByte byte + myString string +) + +func makeMyByteSlice(n int) []myByte { + b := make([]myByte, n) + for i := range b { + b[i] = myByte(i) + } + return b +} + +func makeInt8Slice(n int) []int8 { + b := make([]int8, n) + for i := range b { + b[i] = int8(i) + } + return b +} + +func makeUint8Slice(n int) []uint8 { + b := make([]uint8, n) + for i := range b { + b[i] = uint8(i) + } + return b +} + +func newKey(stringID string, parent *Key) *Key { + return NameKey("kind", stringID, parent) +} + +var ( + testKey0 = newKey("name0", nil) + testKey1a = newKey("name1", nil) + testKey1b = newKey("name1", nil) + testKey2a = newKey("name2", testKey0) + testKey2b = newKey("name2", testKey0) + testGeoPt0 = GeoPoint{Lat: 1.2, Lng: 3.4} + testGeoPt1 = GeoPoint{Lat: 5, Lng: 10} + testBadGeoPt = GeoPoint{Lat: 1000, Lng: 34} + + ts = time.Unix(1e9, 0).UTC() +) + +type B0 struct { + B []byte `datastore:",noindex"` +} + +type B1 struct { + B []int8 +} + +type B2 struct { + B myBlob `datastore:",noindex"` +} + +type B3 struct { + B []myByte `datastore:",noindex"` +} + +type B4 struct { + B [][]byte +} + +type C0 struct { + I int + C chan int +} + +type C1 struct { + I int + C *chan int +} + +type C2 struct { + I int + C []chan int +} + +type C3 struct { + C string +} + +type c4 struct { + C string +} + +type E struct{} + +type G0 struct { + G GeoPoint +} + +type G1 struct { + G []GeoPoint +} + +type K0 struct { + K *Key +} + +type K1 struct { + K []*Key +} + +type S struct { + St string +} + +type NoOmit struct { + A string + B int `datastore:"Bb"` + C bool `datastore:",noindex"` +} + +type OmitAll struct { + A string `datastore:",omitempty"` + B int `datastore:"Bb,omitempty"` + C bool `datastore:",omitempty,noindex"` + F []int `datastore:",omitempty"` +} + +type Omit struct { + A string `datastore:",omitempty"` + B int `datastore:"Bb,omitempty"` + C bool `datastore:",omitempty,noindex"` + F []int `datastore:",omitempty"` + S `datastore:",omitempty"` +} + +type NoOmits struct { + No []NoOmit `datastore:",omitempty"` + S `datastore:",omitempty"` + Ss S `datastore:",omitempty"` +} + +type N0 struct { + X0 + Nonymous X0 + Ignore string `datastore:"-"` + Other string +} + +type N1 struct { + X0 + Nonymous []X0 + Ignore string `datastore:"-"` + Other string +} + +type N2 struct { + N1 `datastore:"red"` + Green N1 `datastore:"green"` + Blue N1 + White N1 `datastore:"-"` +} + +type N3 struct { + C3 `datastore:"red"` +} + +type N4 struct { + c4 +} + +type N5 struct { + c4 `datastore:"red"` +} + +type O0 struct { + I int64 +} + +type O1 struct { + I int32 +} + +type U0 struct { + U uint +} + +type U1 struct { + U string +} + +type T struct { + T time.Time +} + +type X0 struct { + S string + I int + i int +} + +type X1 struct { + S myString + I int32 + J int64 +} + +type X2 struct { + Z string + i int +} + +type X3 struct { + S bool + I int +} + +type Y0 struct { + B bool + F []float64 + G []float64 +} + +type Y1 struct { + B bool + F float64 +} + +type Y2 struct { + B bool + F []int64 +} + +type Pointers struct { + Pi *int + Ps *string + Pb *bool + Pf *float64 + Pg *GeoPoint + Pt *time.Time +} + +type PointersOmitEmpty struct { + Pi *int `datastore:",omitempty"` + Ps *string `datastore:",omitempty"` + Pb *bool `datastore:",omitempty"` + Pf *float64 `datastore:",omitempty"` + Pg *GeoPoint `datastore:",omitempty"` + Pt *time.Time `datastore:",omitempty"` +} + +func populatedPointers() *Pointers { + var ( + i int + s string + b bool + f float64 + g GeoPoint + t time.Time + ) + return &Pointers{ + Pi: &i, + Ps: &s, + Pb: &b, + Pf: &f, + Pg: &g, + Pt: &t, + } +} + +type Tagged struct { + A int `datastore:"a,noindex"` + B []int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + + Y0 `datastore:"-"` + Z chan int `datastore:"-"` +} + +type InvalidTagged1 struct { + I int `datastore:"\t"` +} + +type InvalidTagged2 struct { + I int + J int `datastore:"I"` +} + +type InvalidTagged3 struct { + X string `datastore:"-,noindex"` +} + +type InvalidTagged4 struct { + X string `datastore:",garbage"` +} + +type Inner1 struct { + W int32 + X string +} + +type Inner2 struct { + Y float64 +} + +type Inner3 struct { + Z bool +} + +type Inner5 struct { + WW int +} + +type Inner4 struct { + X Inner5 +} + +type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 +} + +type OuterFlatten struct { + A int16 + I []Inner1 `datastore:",flatten"` + J Inner2 `datastore:",flatten,noindex"` + Inner3 `datastore:",flatten"` + K Inner4 `datastore:",flatten"` +} + +type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool +} + +type Dotted struct { + A DottedA `datastore:"A0.A1.A2"` +} + +type DottedA struct { + B DottedB `datastore:"B3"` +} + +type DottedB struct { + C int `datastore:"C4.C5"` +} + +type SliceOfSlices struct { + I int + S []struct { + J int + F []float64 + } `datastore:",flatten"` +} + +type Recursive struct { + I int + R []Recursive +} + +type MutuallyRecursive0 struct { + I int + R []MutuallyRecursive1 +} + +type MutuallyRecursive1 struct { + I int + R []MutuallyRecursive0 +} + +type EntityWithKey struct { + I int + S string + K *Key `datastore:"__key__"` +} + +type EntityWithKey2 EntityWithKey + +type WithNestedEntityWithKey struct { + N EntityWithKey +} + +type WithNonKeyField struct { + I int + K string `datastore:"__key__"` +} + +type NestedWithNonKeyField struct { + N WithNonKeyField +} + +type Basic struct { + A string +} + +type PtrToStructField struct { + B *Basic + C *Basic `datastore:"c,noindex"` + *Basic + D []*Basic +} + +var two int = 2 + +type EmbeddedTime struct { + time.Time +} + +type SpecialTime struct { + MyTime EmbeddedTime +} + +type Doubler struct { + S string + I int64 + B bool +} + +type Repeat struct { + Key string + Value []byte +} + +type Repeated struct { + Repeats []Repeat +} + +func (d *Doubler) Load(props []Property) error { + return LoadStruct(d, props) +} + +func (d *Doubler) Save() ([]Property, error) { + // Save the default Property slice to an in-memory buffer (a PropertyList). + props, err := SaveStruct(d) + if err != nil { + return nil, err + } + var list PropertyList + if err := list.Load(props); err != nil { + return nil, err + } + + // Edit that PropertyList, and send it on. + for i := range list { + switch v := list[i].Value.(type) { + case string: + // + means string concatenation. + list[i].Value = v + v + case int64: + // + means integer addition. + list[i].Value = v + v + } + } + return list.Save() +} + +var _ PropertyLoadSaver = (*Doubler)(nil) + +type Deriver struct { + S, Derived, Ignored string +} + +func (e *Deriver) Load(props []Property) error { + for _, p := range props { + if p.Name != "S" { + continue + } + e.S = p.Value.(string) + e.Derived = "derived+" + e.S + } + return nil +} + +func (e *Deriver) Save() ([]Property, error) { + return []Property{ + { + Name: "S", + Value: e.S, + }, + }, nil +} + +var _ PropertyLoadSaver = (*Deriver)(nil) + +type BadMultiPropEntity struct{} + +func (e *BadMultiPropEntity) Load(props []Property) error { + return errors.New("unimplemented") +} + +func (e *BadMultiPropEntity) Save() ([]Property, error) { + // Write multiple properties with the same name "I". + var props []Property + for i := 0; i < 3; i++ { + props = append(props, Property{ + Name: "I", + Value: int64(i), + }) + } + return props, nil +} + +var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) + +type testCase struct { + desc string + src interface{} + want interface{} + putErr string + getErr string +} + +var testCases = []testCase{ + { + "chan save fails", + &C0{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "*chan save fails", + &C1{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "[]chan save fails", + &C2{I: -1, C: make([]chan int, 8)}, + &E{}, + "unsupported struct field", + "", + }, + { + "chan load fails", + &C3{C: "not a chan"}, + &C0{}, + "", + "type mismatch", + }, + { + "*chan load fails", + &C3{C: "not a *chan"}, + &C1{}, + "", + "type mismatch", + }, + { + "[]chan load fails", + &C3{C: "not a []chan"}, + &C2{}, + "", + "type mismatch", + }, + { + "empty struct", + &E{}, + &E{}, + "", + "", + }, + { + "geopoint", + &G0{G: testGeoPt0}, + &G0{G: testGeoPt0}, + "", + "", + }, + { + "geopoint invalid", + &G0{G: testBadGeoPt}, + &G0{}, + "invalid GeoPoint value", + "", + }, + { + "geopoint as props", + &G0{G: testGeoPt0}, + &PropertyList{ + Property{Name: "G", Value: testGeoPt0, NoIndex: false}, + }, + "", + "", + }, + { + "geopoint slice", + &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, + &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, + "", + "", + }, + { + "omit empty, all", + &OmitAll{}, + new(PropertyList), + "", + "", + }, + { + "omit empty", + &Omit{}, + &PropertyList{ + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty, fields populated", + &Omit{ + A: "a", + B: 10, + C: true, + F: []int{11}, + }, + &PropertyList{ + Property{Name: "A", Value: "a", NoIndex: false}, + Property{Name: "Bb", Value: int64(10), NoIndex: false}, + Property{Name: "C", Value: true, NoIndex: true}, + Property{Name: "F", Value: []interface{}{int64(11)}, NoIndex: false}, + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty, fields populated", + &Omit{ + A: "a", + B: 10, + C: true, + F: []int{11}, + S: S{St: "string"}, + }, + &PropertyList{ + Property{Name: "A", Value: "a", NoIndex: false}, + Property{Name: "Bb", Value: int64(10), NoIndex: false}, + Property{Name: "C", Value: true, NoIndex: true}, + Property{Name: "F", Value: []interface{}{int64(11)}, NoIndex: false}, + Property{Name: "St", Value: "string", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty does not propagate", + &NoOmits{ + No: []NoOmit{ + NoOmit{}, + }, + S: S{}, + Ss: S{}, + }, + &PropertyList{ + Property{Name: "No", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "", NoIndex: false}, + Property{Name: "Bb", Value: int64(0), NoIndex: false}, + Property{Name: "C", Value: false, NoIndex: true}, + }, + }, + }, NoIndex: false}, + Property{Name: "Ss", Value: &Entity{ + Properties: []Property{ + Property{Name: "St", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "key", + &K0{K: testKey1a}, + &K0{K: testKey1b}, + "", + "", + }, + { + "key with parent", + &K0{K: testKey2a}, + &K0{K: testKey2b}, + "", + "", + }, + { + "nil key", + &K0{}, + &K0{}, + "", + "", + }, + { + "all nil keys in slice", + &K1{[]*Key{nil, nil}}, + &K1{[]*Key{nil, nil}}, + "", + "", + }, + { + "some nil keys in slice", + &K1{[]*Key{testKey1a, nil, testKey2a}}, + &K1{[]*Key{testKey1b, nil, testKey2b}}, + "", + "", + }, + { + "overflow", + &O0{I: 1 << 48}, + &O1{}, + "", + "overflow", + }, + { + "time", + &T{T: time.Unix(1e9, 0)}, + &T{T: time.Unix(1e9, 0)}, + "", + "", + }, + { + "time as props", + &T{T: time.Unix(1e9, 0)}, + &PropertyList{ + Property{Name: "T", Value: time.Unix(1e9, 0), NoIndex: false}, + }, + "", + "", + }, + { + "uint save", + &U0{U: 1}, + &U0{}, + "unsupported struct field", + "", + }, + { + "uint load", + &U1{U: "not a uint"}, + &U0{}, + "", + "type mismatch", + }, + { + "zero", + &X0{}, + &X0{}, + "", + "", + }, + { + "basic", + &X0{S: "one", I: 2, i: 3}, + &X0{S: "one", I: 2}, + "", + "", + }, + { + "save string/int load myString/int32", + &X0{S: "one", I: 2, i: 3}, + &X1{S: "one", I: 2}, + "", + "", + }, + { + "missing fields", + &X0{S: "one", I: 2, i: 3}, + &X2{}, + "", + "no such struct field", + }, + { + "save string load bool", + &X0{S: "one", I: 2, i: 3}, + &X3{I: 2}, + "", + "type mismatch", + }, + { + "basic slice", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y0{B: true, F: []float64{7, 8, 9}}, + "", + "", + }, + { + "save []float64 load float64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y1{B: true}, + "", + "requires a slice", + }, + { + "save []float64 load []int64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y2{B: true}, + "", + "type mismatch", + }, + { + "single slice is too long", + &Y0{F: make([]float64, maxIndexedProperties+1)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "two slices are too long", + &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "one slice and one scalar are too long", + &Y0{F: make([]float64, maxIndexedProperties), B: true}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "slice of slices of bytes", + &Repeated{ + Repeats: []Repeat{ + { + Key: "key 1", + Value: []byte("value 1"), + }, + { + Key: "key 2", + Value: []byte("value 2"), + }, + }, + }, + &Repeated{ + Repeats: []Repeat{ + { + Key: "key 1", + Value: []byte("value 1"), + }, + { + Key: "key 2", + Value: []byte("value 2"), + }, + }, + }, + "", + "", + }, + { + "long blob", + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "long []int8 is too long", + &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, + &B1{}, + "too many indexed properties", + "", + }, + { + "short []int8", + &B1{B: makeInt8Slice(3)}, + &B1{B: makeInt8Slice(3)}, + "", + "", + }, + { + "long myBlob", + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short myBlob", + &B2{B: makeUint8Slice(3)}, + &B2{B: makeUint8Slice(3)}, + "", + "", + }, + { + "long []myByte", + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short []myByte", + &B3{B: makeMyByteSlice(3)}, + &B3{B: makeMyByteSlice(3)}, + "", + "", + }, + { + "slice of blobs", + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + "", + "", + }, + { + "[]byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: makeUint8Slice(1501), NoIndex: false}, + }, + nil, + "[]byte property too long to index", + "", + }, + { + "string must be noindex", + &PropertyList{ + Property{Name: "B", Value: strings.Repeat("x", 1501), NoIndex: false}, + }, + nil, + "string property too long to index", + "", + }, + { + "slice of []byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: []interface{}{ + []byte("short"), + makeUint8Slice(1501), + }, NoIndex: false}, + }, + nil, + "[]byte property too long to index", + "", + }, + { + "slice of string must be noindex", + &PropertyList{ + Property{Name: "B", Value: []interface{}{ + "short", + strings.Repeat("x", 1501), + }, NoIndex: false}, + }, + nil, + "string property too long to index", + "", + }, + { + "save tagged load props", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &PropertyList{ + // A and B are renamed to a and b; A and C are noindex, I is ignored. + // Order is sorted as per byName. + Property{Name: "C", Value: int64(3), NoIndex: true}, + Property{Name: "D", Value: int64(4), NoIndex: false}, + Property{Name: "E", Value: int64(5), NoIndex: false}, + Property{Name: "J", Value: int64(7), NoIndex: true}, + Property{Name: "a", Value: int64(1), NoIndex: true}, + Property{Name: "b", Value: []interface{}{int64(21), int64(22), int64(23)}, NoIndex: false}, + }, + "", + "", + }, + { + "save tagged load tagged", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, + "", + "", + }, + { + "invalid tagged1", + &InvalidTagged1{I: 1}, + &InvalidTagged1{}, + "struct tag has invalid property name", + "", + }, + { + "invalid tagged2", + &InvalidTagged2{I: 1, J: 2}, + &InvalidTagged2{J: 2}, + "", + "", + }, + { + "invalid tagged3", + &InvalidTagged3{X: "hello"}, + &InvalidTagged3{}, + "struct tag has invalid property name: \"-\"", + "", + }, + { + "invalid tagged4", + &InvalidTagged4{X: "hello"}, + &InvalidTagged4{}, + "struct tag has invalid option: \"garbage\"", + "", + }, + { + "doubler", + &Doubler{S: "s", I: 1, B: true}, + &Doubler{S: "ss", I: 2, B: true}, + "", + "", + }, + { + "save struct load props", + &X0{S: "s", I: 1}, + &PropertyList{ + Property{Name: "I", Value: int64(1), NoIndex: false}, + Property{Name: "S", Value: "s", NoIndex: false}, + }, + "", + "", + }, + { + "save props load struct", + &PropertyList{ + Property{Name: "I", Value: int64(1), NoIndex: false}, + Property{Name: "S", Value: "s", NoIndex: false}, + }, + &X0{S: "s", I: 1}, + "", + "", + }, + { + "nil-value props", + &PropertyList{ + Property{Name: "I", Value: nil, NoIndex: false}, + Property{Name: "B", Value: nil, NoIndex: false}, + Property{Name: "S", Value: nil, NoIndex: false}, + Property{Name: "F", Value: nil, NoIndex: false}, + Property{Name: "K", Value: nil, NoIndex: false}, + Property{Name: "T", Value: nil, NoIndex: false}, + Property{Name: "J", Value: []interface{}{nil, int64(7), nil}, NoIndex: false}, + }, + &struct { + I int64 + B bool + S string + F float64 + K *Key + T time.Time + J []int64 + }{ + J: []int64{0, 7, 0}, + }, + "", + "", + }, + { + "save outer load props flatten", + &OuterFlatten{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + K: Inner4{ + X: Inner5{ + WW: 12, + }, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: true}, + Property{Name: "K.X.WW", Value: int64(12), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + "", + "", + }, + { + "load outer props flatten", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: true}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + &OuterFlatten{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + "", + "", + }, + { + "save outer load props", + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(10), NoIndex: false}, + Property{Name: "X", Value: "ten", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(20), NoIndex: false}, + Property{Name: "X", Value: "twenty", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(30), NoIndex: false}, + Property{Name: "X", Value: "thirty", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "J", Value: &Entity{ + Properties: []Property{ + Property{Name: "Y", Value: float64(3.14), NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + "", + "", + }, + { + "save props load outer-equivalent", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + "", + "", + }, + { + "dotted names save", + &Dotted{A: DottedA{B: DottedB{C: 88}}}, + &PropertyList{ + Property{Name: "A0.A1.A2", Value: &Entity{ + Properties: []Property{ + Property{Name: "B3", Value: &Entity{ + Properties: []Property{ + Property{Name: "C4.C5", Value: int64(88), NoIndex: false}, + }, + }, NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "dotted names load", + &PropertyList{ + Property{Name: "A0.A1.A2", Value: &Entity{ + Properties: []Property{ + Property{Name: "B3", Value: &Entity{ + Properties: []Property{ + Property{Name: "C4.C5", Value: 99, NoIndex: false}, + }, + }, NoIndex: false}, + }, + }, NoIndex: false}, + }, + &Dotted{A: DottedA{B: DottedB{C: 99}}}, + "", + "", + }, + { + "save struct load deriver", + &X0{S: "s", I: 1}, + &Deriver{S: "s", Derived: "derived+s"}, + "", + "", + }, + { + "save deriver load struct", + &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, + &X0{S: "s"}, + "", + "", + }, + { + "zero time.Time", + &T{T: time.Time{}}, + &T{T: time.Time{}}, + "", + "", + }, + { + "time.Time near Unix zero time", + &T{T: time.Unix(0, 4e3)}, + &T{T: time.Unix(0, 4e3)}, + "", + "", + }, + { + "time.Time, far in the future", + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + "", + "", + }, + { + "time.Time, very far in the past", + &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "time.Time, very far in the future", + &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "structs", + &N0{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: X0{S: "four", I: 5, i: 6}, + Ignore: "ignore", + Other: "other", + }, + &N0{ + X0: X0{S: "one", I: 2}, + Nonymous: X0{S: "four", I: 5}, + Other: "other", + }, + "", + "", + }, + { + "slice of structs", + &N1{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: []X0{ + {S: "four", I: 5, i: 6}, + {S: "seven", I: 8, i: 9}, + {S: "ten", I: 11, i: 12}, + {S: "thirteen", I: 14, i: 15}, + }, + Ignore: "ignore", + Other: "other", + }, + &N1{ + X0: X0{S: "one", I: 2}, + Nonymous: []X0{ + {S: "four", I: 5}, + {S: "seven", I: 8}, + {S: "ten", I: 11}, + {S: "thirteen", I: 14}, + }, + Other: "other", + }, + "", + "", + }, + { + "structs with slices of structs", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + "", + "", + }, + { + "save structs load props", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &PropertyList{ + Property{Name: "Blue", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu1", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu2", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu3", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "bleu", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "green", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde1", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde2", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "vert", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "red", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "rosso0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "rosso1", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "rouge", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "nested entity with key", + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + "", + "", + }, + { + "entity with key at top level", + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + "", + "", + }, + { + "entity with key at top level (key is populated on load)", + &EntityWithKey{ + I: 12, + S: "abc", + }, + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + "", + "", + }, + { + "__key__ field not a *Key", + &NestedWithNonKeyField{ + N: WithNonKeyField{ + I: 12, + K: "abcd", + }, + }, + &NestedWithNonKeyField{ + N: WithNonKeyField{ + I: 12, + K: "abcd", + }, + }, + "datastore: __key__ field on struct datastore.WithNonKeyField is not a *datastore.Key", + "", + }, + { + "save struct with ptr to struct fields", + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + &PropertyList{ + Property{Name: "A", Value: "anon", NoIndex: false}, + Property{Name: "B", Value: &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "b", NoIndex: false}, + }, + }}, + Property{Name: "D", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "slice0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "slice1", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "c", Value: &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "c", NoIndex: true}, + }, + }, NoIndex: true}, + }, + "", + "", + }, + { + "save and load struct with ptr to struct fields", + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + "", + "", + }, + { + "struct with nil ptr to struct fields", + &PtrToStructField{ + nil, + nil, + nil, + nil, + }, + new(PropertyList), + "", + "", + }, + { + "nested load entity with key", + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + &PropertyList{ + Property{Name: "N", Value: &Entity{ + Key: testKey0, + Properties: []Property{ + Property{Name: "I", Value: int64(12), NoIndex: false}, + Property{Name: "S", Value: "abcd", NoIndex: false}, + }, + }, + NoIndex: false}, + }, + "", + "", + }, + { + "nested save entity with key", + &PropertyList{ + Property{Name: "N", Value: &Entity{ + Key: testKey0, + Properties: []Property{ + Property{Name: "I", Value: int64(12), NoIndex: false}, + Property{Name: "S", Value: "abcd", NoIndex: false}, + }, + }, NoIndex: false}, + }, + + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + "", + "", + }, + { + "anonymous field with tag", + &N3{ + C3: C3{C: "s"}, + }, + &PropertyList{ + Property{Name: "red", Value: &Entity{ + Properties: []Property{ + Property{Name: "C", Value: "s", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "unexported anonymous field", + &N4{ + c4: c4{C: "s"}, + }, + &PropertyList{ + Property{Name: "C", Value: "s", NoIndex: false}, + }, + "", + "", + }, + { + "unexported anonymous field with tag", + &N5{ + c4: c4{C: "s"}, + }, + new(PropertyList), + "", + "", + }, + { + "save props load structs with ragged fields", + &PropertyList{ + Property{Name: "red.S", Value: "rot", NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: []interface{}{int64(10), int64(11), int64(12), int64(13)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.I", Value: []interface{}{int64(20), int64(21)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: []interface{}{"blau0", "blau1", "blau2"}, NoIndex: false}, + }, + &N2{ + N1: N1{ + X0: X0{S: "rot"}, + }, + Green: N1{ + Nonymous: []X0{ + {I: 10}, + {I: 11}, + {I: 12}, + {I: 13}, + }, + }, + Blue: N1{ + Nonymous: []X0{ + {S: "blau0", I: 20}, + {S: "blau1", I: 21}, + {S: "blau2"}, + }, + }, + }, + "", + "", + }, + { + "save structs with noindex tags", + &struct { + A struct { + X string `datastore:",noindex"` + Y string + } `datastore:",noindex"` + B struct { + X string `datastore:",noindex"` + Y string + } + }{}, + &PropertyList{ + Property{Name: "A", Value: &Entity{ + Properties: []Property{ + Property{Name: "X", Value: "", NoIndex: true}, + Property{Name: "Y", Value: "", NoIndex: true}, + }, + }, NoIndex: true}, + Property{Name: "B", Value: &Entity{ + Properties: []Property{ + Property{Name: "X", Value: "", NoIndex: true}, + Property{Name: "Y", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "embedded struct with name override", + &struct { + Inner1 `datastore:"foo"` + }{}, + &PropertyList{ + Property{Name: "foo", Value: &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(0), NoIndex: false}, + Property{Name: "X", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "slice of slices", + &SliceOfSlices{}, + nil, + "flattening nested structs leads to a slice of slices", + "", + }, + { + "recursive struct", + &Recursive{}, + &Recursive{}, + "", + "", + }, + { + "mutually recursive struct", + &MutuallyRecursive0{}, + &MutuallyRecursive0{}, + "", + "", + }, + { + "non-exported struct fields", + &struct { + i, J int64 + }{i: 1, J: 2}, + &PropertyList{ + Property{Name: "J", Value: int64(2), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage", + &struct { + J json.RawMessage + }{ + J: json.RawMessage("rawr"), + }, + &PropertyList{ + Property{Name: "J", Value: []byte("rawr"), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage to myBlob", + &struct { + B json.RawMessage + }{ + B: json.RawMessage("rawr"), + }, + &B2{B: myBlob("rawr")}, + "", + "", + }, + { + "repeated property names", + &PropertyList{ + Property{Name: "A", Value: ""}, + Property{Name: "A", Value: ""}, + }, + nil, + "duplicate Property", + "", + }, + { + "embedded time field", + &SpecialTime{MyTime: EmbeddedTime{ts}}, + &SpecialTime{MyTime: EmbeddedTime{ts}}, + "", + "", + }, + { + "embedded time load", + &PropertyList{ + Property{Name: "MyTime.Time", Value: ts}, + }, + &SpecialTime{MyTime: EmbeddedTime{ts}}, + "", + "", + }, + { + "pointer fields: nil", + &Pointers{}, + &Pointers{}, + "", + "", + }, + { + "pointer fields: populated with zeroes", + populatedPointers(), + populatedPointers(), + "", + "", + }, +} + +// checkErr returns the empty string if either both want and err are zero, +// or if want is a non-empty substring of err's string representation. +func checkErr(want string, err error) string { + if err != nil { + got := err.Error() + if want == "" || strings.Index(got, want) == -1 { + return got + } + } else if want != "" { + return fmt.Sprintf("want error %q", want) + } + return "" +} + +func TestRoundTrip(t *testing.T) { + for _, tc := range testCases { + p, err := saveEntity(testKey0, tc.src) + if s := checkErr(tc.putErr, err); s != "" { + t.Errorf("%s: save: %s", tc.desc, s) + continue + } + if p == nil { + continue + } + var got interface{} + if _, ok := tc.want.(*PropertyList); ok { + got = new(PropertyList) + } else { + got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + } + err = loadEntityProto(got, p) + if s := checkErr(tc.getErr, err); s != "" { + t.Errorf("%s: load: %s", tc.desc, s) + continue + } + if pl, ok := got.(*PropertyList); ok { + // Sort by name to make sure we have a deterministic order. + sortPL(*pl) + } + + if !testutil.Equal(got, tc.want, cmp.AllowUnexported(X0{}, X2{})) { + t.Errorf("%s: compare:\ngot: %+#v\nwant: %+#v", tc.desc, got, tc.want) + continue + } + } +} + +type aPtrPLS struct { + Count int +} + +func (pls *aPtrPLS) Load([]Property) error { + pls.Count += 1 + return nil +} + +func (pls *aPtrPLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 4}}, nil +} + +type aValuePLS struct { + Count int +} + +func (pls aValuePLS) Load([]Property) error { + pls.Count += 2 + return nil +} + +func (pls aValuePLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 8}}, nil +} + +type aValuePtrPLS struct { + Count int +} + +func (pls *aValuePtrPLS) Load([]Property) error { + pls.Count = 11 + return nil +} + +func (pls *aValuePtrPLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 12}}, nil +} + +type aNotPLS struct { + Count int +} + +type plsString string + +func (s *plsString) Load([]Property) error { + *s = "LOADED" + return nil +} + +func (s *plsString) Save() ([]Property, error) { + return []Property{{Name: "SS", Value: "SAVED"}}, nil +} + +func ptrToplsString(s string) *plsString { + plsStr := plsString(s) + return &plsStr +} + +type aSubPLS struct { + Foo string + Bar *aPtrPLS + Baz aValuePtrPLS + S plsString +} + +type aSubNotPLS struct { + Foo string + Bar *aNotPLS +} + +type aSubPLSErr struct { + Foo string + Bar aValuePLS +} + +type aSubPLSNoErr struct { + Foo string + Bar aPtrPLS +} + +type GrandparentFlatten struct { + Parent Parent `datastore:",flatten"` +} + +type GrandparentOfPtrFlatten struct { + Parent ParentOfPtr `datastore:",flatten"` +} + +type GrandparentOfSlice struct { + Parent ParentOfSlice +} + +type GrandparentOfSlicePtrs struct { + Parent ParentOfSlicePtrs +} + +type GrandparentOfSliceFlatten struct { + Parent ParentOfSlice `datastore:",flatten"` +} + +type GrandparentOfSlicePtrsFlatten struct { + Parent ParentOfSlicePtrs `datastore:",flatten"` +} + +type Grandparent struct { + Parent Parent +} + +type Parent struct { + Child Child + String plsString +} + +type ParentOfPtr struct { + Child *Child + String *plsString +} + +type ParentOfSlice struct { + Children []Child + Strings []plsString +} + +type ParentOfSlicePtrs struct { + Children []*Child + Strings []*plsString +} + +type Child struct { + I int + Grandchild Grandchild +} + +type Grandchild struct { + S string +} + +func (c *Child) Load(props []Property) error { + for _, p := range props { + if p.Name == "I" { + c.I += 1 + } else if p.Name == "Grandchild.S" { + c.Grandchild.S = "grandchild loaded" + } + } + + return nil +} + +func (c *Child) Save() ([]Property, error) { + v := c.I + 1 + return []Property{ + {Name: "I", Value: v}, + {Name: "Grandchild.S", Value: fmt.Sprintf("grandchild saved %d", v)}, + }, nil +} + +func TestLoadSavePLS(t *testing.T) { + type testCase struct { + desc string + src interface{} + wantSave *pb.Entity + wantLoad interface{} + saveErr string + loadErr string + } + + testCases := []testCase{ + { + desc: "non-struct implements PLS (top-level)", + src: ptrToplsString("hello"), + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + wantLoad: ptrToplsString("LOADED"), + }, + { + desc: "substructs do implement PLS", + src: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 2}, Baz: aValuePtrPLS{Count: 15}, S: "something"}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, + }, + }, + }}, + "Baz": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, + }, + }, + }}, + "S": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 1}, Baz: aValuePtrPLS{Count: 11}, S: "LOADED"}, + }, + { + desc: "substruct (ptr) does implement PLS, nil valued substruct", + src: &aSubPLS{Foo: "foo", S: "something"}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, + "Baz": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, + }, + }, + }}, + "S": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubPLS{Foo: "foo", Baz: aValuePtrPLS{Count: 11}, S: "LOADED"}, + }, + { + desc: "substruct (ptr) does not implement PLS", + src: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}}, + }, + { + desc: "substruct (value) does implement PLS, error on save", + src: &aSubPLSErr{Foo: "foo", Bar: aValuePLS{Count: 2}}, + wantSave: (*pb.Entity)(nil), + wantLoad: &aSubPLSErr{}, + saveErr: "PropertyLoadSaver methods must be implemented on a pointer", + }, + { + desc: "substruct (value) does implement PLS, error on load", + src: &aSubPLSNoErr{Foo: "foo", Bar: aPtrPLS{Count: 2}}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubPLSErr{}, + loadErr: "PropertyLoadSaver methods must be implemented on a pointer", + }, + + { + desc: "parent does not have flatten option, child impl PLS", + src: &Grandparent{ + Parent: Parent{ + Child: Child{ + I: 9, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + String: plsString("something"), + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Child": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + "String": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + }}, + }, + }, + wantLoad: &Grandparent{ + Parent: Parent{ + Child: Child{ + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + String: "LOADED", + }, + }, + }, + { + desc: "parent has flatten option enabled, child impl PLS", + src: &GrandparentFlatten{ + Parent: Parent{ + Child: Child{ + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + String: plsString("something"), + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent.Child.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + "Parent.Child.Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + "Parent.String.SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + wantLoad: &GrandparentFlatten{ + Parent: Parent{ + Child: Child{ + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + String: "LOADED", + }, + }, + }, + + { + desc: "parent has flatten option enabled, child (ptr to) impl PLS", + src: &GrandparentOfPtrFlatten{ + Parent: ParentOfPtr{ + Child: &Child{ + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + String: ptrToplsString("something"), + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent.Child.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + "Parent.Child.Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + "Parent.String.SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + wantLoad: &GrandparentOfPtrFlatten{ + Parent: ParentOfPtr{ + Child: &Child{ + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + String: ptrToplsString("LOADED"), + }, + }, + }, + { + desc: "children (slice of) impl PLS", + src: &GrandparentOfSlice{ + Parent: ParentOfSlice{ + Children: []Child{ + { + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + { + I: 9, + Grandchild: Grandchild{ + S: "BAD2", + }, + }, + }, + Strings: []plsString{ + "something1", + "something2", + }, + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Children": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + }}, + }}, + "Strings": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }}, + }}, + }, + }, + }}, + }, + }, + wantLoad: &GrandparentOfSlice{ + Parent: ParentOfSlice{ + Children: []Child{ + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + }, + Strings: []plsString{ + "LOADED", + "LOADED", + }, + }, + }, + }, + { + desc: "children (slice of ptrs) impl PLS", + src: &GrandparentOfSlicePtrs{ + Parent: ParentOfSlicePtrs{ + Children: []*Child{ + { + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + { + I: 9, + Grandchild: Grandchild{ + S: "BAD2", + }, + }, + }, + Strings: []*plsString{ + ptrToplsString("something1"), + ptrToplsString("something2"), + }, + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Children": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + }}, + }}, + "Strings": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }}, + }}, + }, + }, + }}, + }, + }, + wantLoad: &GrandparentOfSlicePtrs{ + Parent: ParentOfSlicePtrs{ + Children: []*Child{ + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + }, + Strings: []*plsString{ + ptrToplsString("LOADED"), + ptrToplsString("LOADED"), + }, + }, + }, + }, + { + desc: "parent has flatten option, children (slice of) impl PLS", + src: &GrandparentOfSliceFlatten{ + Parent: ParentOfSlice{ + Children: []Child{ + { + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + { + I: 9, + Grandchild: Grandchild{ + S: "BAD2", + }, + }, + }, + Strings: []plsString{ + "something1", + "something2", + }, + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent.Children.I": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + }, + }, + }}, + "Parent.Children.Grandchild.S": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + "Parent.Strings.SS": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + wantLoad: &GrandparentOfSliceFlatten{ + Parent: ParentOfSlice{ + Children: []Child{ + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + }, + Strings: []plsString{ + "LOADED", + "LOADED", + }, + }, + }, + }, + { + desc: "parent has flatten option, children (slice of ptrs) impl PLS", + src: &GrandparentOfSlicePtrsFlatten{ + Parent: ParentOfSlicePtrs{ + Children: []*Child{ + { + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + { + I: 9, + Grandchild: Grandchild{ + S: "BAD2", + }, + }, + }, + Strings: []*plsString{ + ptrToplsString("something1"), + ptrToplsString("something1"), + }, + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent.Children.I": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + }, + }, + }}, + "Parent.Children.Grandchild.S": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + "Parent.Strings.SS": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + wantLoad: &GrandparentOfSlicePtrsFlatten{ + Parent: ParentOfSlicePtrs{ + Children: []*Child{ + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + }, + Strings: []*plsString{ + ptrToplsString("LOADED"), + ptrToplsString("LOADED"), + }, + }, + }, + }, + } + + for _, tc := range testCases { + e, err := saveEntity(testKey0, tc.src) + if tc.saveErr == "" { // Want no error. + if err != nil { + t.Errorf("%s: save: %v", tc.desc, err) + continue + } + if !testutil.Equal(e, tc.wantSave) { + t.Errorf("%s: save: \ngot: %+v\nwant: %+v", tc.desc, e, tc.wantSave) + continue + } + } else { // Want error. + if err == nil { + t.Errorf("%s: save: want err", tc.desc) + continue + } + if !strings.Contains(err.Error(), tc.saveErr) { + t.Errorf("%s: save: \ngot err '%s'\nwant err '%s'", tc.desc, err.Error(), tc.saveErr) + } + continue + } + + gota := reflect.New(reflect.TypeOf(tc.wantLoad).Elem()).Interface() + err = loadEntityProto(gota, e) + if tc.loadErr == "" { // Want no error. + if err != nil { + t.Errorf("%s: load: %v", tc.desc, err) + continue + } + if !testutil.Equal(gota, tc.wantLoad) { + t.Errorf("%s: load: \ngot: %+v\nwant: %+v", tc.desc, gota, tc.wantLoad) + continue + } + } else { // Want error. + if err == nil { + t.Errorf("%s: load: want err", tc.desc) + continue + } + if !strings.Contains(err.Error(), tc.loadErr) { + t.Errorf("%s: load: \ngot err '%s'\nwant err '%s'", tc.desc, err.Error(), tc.loadErr) + } + } + } +} + +func TestQueryConstruction(t *testing.T) { + tests := []struct { + q, exp *Query + err string + }{ + { + q: NewQuery("Foo"), + exp: &Query{ + kind: "Foo", + limit: -1, + }, + }, + { + // Regular filtered query with standard spacing. + q: NewQuery("Foo").Filter("foo >", 7), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterThan, + Value: 7, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with no spacing. + q: NewQuery("Foo").Filter("foo=", 6), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: equal, + Value: 6, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with funky spacing. + q: NewQuery("Foo").Filter(" foo< ", 8), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: lessThan, + Value: 8, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with multicharacter op. + q: NewQuery("Foo").Filter("foo >=", 9), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterEq, + Value: 9, + }, + }, + limit: -1, + }, + }, + { + // Query with ordering. + q: NewQuery("Foo").Order("bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: ascending, + }, + }, + limit: -1, + }, + }, + { + // Query with reverse ordering, and funky spacing. + q: NewQuery("Foo").Order(" - bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: descending, + }, + }, + limit: -1, + }, + }, + { + // Query with an empty ordering. + q: NewQuery("Foo").Order(""), + err: "empty order", + }, + { + // Query with a + ordering. + q: NewQuery("Foo").Order("+bar"), + err: "invalid order", + }, + } + for i, test := range tests { + if test.q.err != nil { + got := test.q.err.Error() + if !strings.Contains(got, test.err) { + t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) + } + continue + } + if !testutil.Equal(test.q, test.exp, cmp.AllowUnexported(Query{})) { + t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) + } + } +} + +func TestPutMultiTypes(t *testing.T) { + ctx := context.Background() + type S struct { + A int + B string + } + + testCases := []struct { + desc string + src interface{} + wantErr bool + }{ + // Test cases to check each of the valid input types for src. + // Each case has the same elements. + { + desc: "type []struct", + src: []S{ + {1, "one"}, {2, "two"}, + }, + }, + { + desc: "type []*struct", + src: []*S{ + {1, "one"}, {2, "two"}, + }, + }, + { + desc: "type []interface{} with PLS elems", + src: []interface{}{ + &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + { + desc: "type []interface{} with struct ptr elems", + src: []interface{}{ + &S{1, "one"}, &S{2, "two"}, + }, + }, + { + desc: "type []PropertyLoadSaver{}", + src: []PropertyLoadSaver{ + &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + { + desc: "type []P (non-pointer, *P implements PropertyLoadSaver)", + src: []PropertyList{ + {Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + {Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + // Test some invalid cases. + { + desc: "type []interface{} with struct elems", + src: []interface{}{ + S{1, "one"}, S{2, "two"}, + }, + wantErr: true, + }, + { + desc: "PropertyList", + src: PropertyList{ + Property{Name: "A", Value: 1}, + Property{Name: "B", Value: "one"}, + }, + wantErr: true, + }, + { + desc: "type []int", + src: []int{1, 2}, + wantErr: true, + }, + { + desc: "not a slice", + src: S{1, "one"}, + wantErr: true, + }, + } + + // Use the same keys and expected entities for all tests. + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + want := []*pb.Mutation{ + {Operation: &pb.Mutation_Upsert{ + Upsert: &pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, + }, + }}}, + {Operation: &pb.Mutation_Upsert{ + Upsert: &pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + }, + }}}, + } + + for _, tt := range testCases { + // Set up a fake client which captures upserts. + var got []*pb.Mutation + client := &Client{ + client: &fakeClient{ + commitFn: func(req *pb.CommitRequest) (*pb.CommitResponse, error) { + got = req.Mutations + return &pb.CommitResponse{}, nil + }, + }, + } + + _, err := client.PutMulti(ctx, keys, tt.src) + if err != nil { + if !tt.wantErr { + t.Errorf("%s: error %v", tt.desc, err) + } + continue + } + if tt.wantErr { + t.Errorf("%s: wanted error, but none returned", tt.desc) + continue + } + if len(got) != len(want) { + t.Errorf("%s: got %d entities, want %d", tt.desc, len(got), len(want)) + continue + } + for i, e := range got { + if !proto.Equal(e, want[i]) { + t.Logf("%s: entity %d doesn't match\ngot: %v\nwant: %v", tt.desc, i, e, want[i]) + } + } + } +} + +func TestNoIndexOnSliceProperties(t *testing.T) { + // Check that ExcludeFromIndexes is set on the inner elements, + // rather than the top-level ArrayValue value. + pl := PropertyList{ + Property{ + Name: "repeated", + Value: []interface{}{ + 123, + false, + "short", + strings.Repeat("a", 1503), + }, + NoIndex: true, + }, + } + key := NameKey("dummy", "dummy", nil) + + entity, err := saveEntity(key, &pl) + if err != nil { + t.Fatalf("saveEntity: %v", err) + } + + want := &pb.Value{ + ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_IntegerValue{IntegerValue: 123}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_BooleanValue{BooleanValue: false}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_StringValue{StringValue: "short"}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_StringValue{StringValue: strings.Repeat("a", 1503)}, ExcludeFromIndexes: true}, + }}}, + } + if got := entity.Properties["repeated"]; !proto.Equal(got, want) { + t.Errorf("Entity proto differs\ngot: %v\nwant: %v", got, want) + } +} + +type byName PropertyList + +func (s byName) Len() int { return len(s) } +func (s byName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sortPL sorts the property list by property name, and +// recursively sorts any nested property lists, or nested slices of +// property lists. +func sortPL(pl PropertyList) { + sort.Stable(byName(pl)) + for _, p := range pl { + switch p.Value.(type) { + case *Entity: + sortPL(p.Value.(*Entity).Properties) + case []interface{}: + for _, p2 := range p.Value.([]interface{}) { + if nent, ok := p2.(*Entity); ok { + sortPL(nent.Properties) + } + } + } + } +} + +func TestValidGeoPoint(t *testing.T) { + testCases := []struct { + desc string + pt GeoPoint + want bool + }{ + { + "valid", + GeoPoint{67.21, 13.37}, + true, + }, + { + "high lat", + GeoPoint{-90.01, 13.37}, + false, + }, + { + "low lat", + GeoPoint{90.01, 13.37}, + false, + }, + { + "high lng", + GeoPoint{67.21, 182}, + false, + }, + { + "low lng", + GeoPoint{67.21, -181}, + false, + }, + } + + for _, tc := range testCases { + if got := tc.pt.Valid(); got != tc.want { + t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) + } + } +} + +func TestPutInvalidEntity(t *testing.T) { + // Test that trying to put an invalid entity always returns the correct error + // type. + + // Fake client that can pretend to start a transaction. + fakeClient := &fakeDatastoreClient{ + beginTransaction: func(*pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) { + return &pb.BeginTransactionResponse{ + Transaction: []byte("deadbeef"), + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + key := IncompleteKey("kind", nil) + + _, err := client.Put(ctx, key, "invalid entity") + if err != ErrInvalidEntityType { + t.Errorf("client.Put returned err %v, want %v", err, ErrInvalidEntityType) + } + + _, err = client.PutMulti(ctx, []*Key{key}, []interface{}{"invalid entity"}) + if me, ok := err.(MultiError); !ok { + t.Errorf("client.PutMulti returned err %v, want MultiError type", err) + } else if len(me) != 1 || me[0] != ErrInvalidEntityType { + t.Errorf("client.PutMulti returned err %v, want MulitError{ErrInvalidEntityType}", err) + } + + client.RunInTransaction(ctx, func(tx *Transaction) error { + _, err := tx.Put(key, "invalid entity") + if err != ErrInvalidEntityType { + t.Errorf("tx.Put returned err %v, want %v", err, ErrInvalidEntityType) + } + + _, err = tx.PutMulti([]*Key{key}, []interface{}{"invalid entity"}) + if me, ok := err.(MultiError); !ok { + t.Errorf("tx.PutMulti returned err %v, want MultiError type", err) + } else if len(me) != 1 || me[0] != ErrInvalidEntityType { + t.Errorf("tx.PutMulti returned err %v, want MulitError{ErrInvalidEntityType}", err) + } + + return errors.New("bang!") // Return error: we don't actually want to commit. + }) +} + +func TestDeferred(t *testing.T) { + type Ent struct { + A int + B string + } + + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + + entity1 := &pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, + }, + } + entity2 := &pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + }, + } + + // count keeps track of the number of times fakeClient.lookup has been + // called. + var count int + // Fake client that will return Deferred keys in resp on the first call. + fakeClient := &fakeDatastoreClient{ + lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { + count++ + // On the first call, we return deferred keys. + if count == 1 { + return &pb.LookupResponse{ + Found: []*pb.EntityResult{ + { + Entity: entity1, + Version: 1, + }, + }, + Deferred: []*pb.Key{ + keyToProto(keys[1]), + }, + }, nil + } + + // On the second call, we do not return any more deferred keys. + return &pb.LookupResponse{ + Found: []*pb.EntityResult{ + { + Entity: entity2, + Version: 1, + }, + }, + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + + dst := make([]Ent, len(keys)) + err := client.GetMulti(ctx, keys, dst) + if err != nil { + t.Fatalf("client.Get: %v", err) + } + + if count != 2 { + t.Fatalf("expected client.lookup to be called 2 times. Got %d", count) + } + + if len(dst) != 2 { + t.Fatalf("expected 2 entities returned, got %d", len(dst)) + } + + for _, e := range dst { + if e.A == 1 { + if e.B != "one" { + t.Fatalf("unexpected entity %+v", e) + } + } else if e.A == 2 { + if e.B != "two" { + t.Fatalf("unexpected entity %+v", e) + } + } else { + t.Fatalf("unexpected entity %+v", e) + } + } + +} + +type KeyLoaderEnt struct { + A int + K *Key +} + +func (e *KeyLoaderEnt) Load(p []Property) error { + e.A = 2 + return nil +} + +func (e *KeyLoaderEnt) LoadKey(k *Key) error { + e.K = k + return nil +} + +func (e *KeyLoaderEnt) Save() ([]Property, error) { + return []Property{{Name: "A", Value: int64(3)}}, nil +} + +func TestKeyLoaderEndToEnd(t *testing.T) { + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + + entity1 := &pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, + }, + } + entity2 := &pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + }, + } + + fakeClient := &fakeDatastoreClient{ + lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { + return &pb.LookupResponse{ + Found: []*pb.EntityResult{ + { + Entity: entity1, + Version: 1, + }, + { + Entity: entity2, + Version: 1, + }, + }, + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + + dst := make([]*KeyLoaderEnt, len(keys)) + err := client.GetMulti(ctx, keys, dst) + if err != nil { + t.Fatalf("client.Get: %v", err) + } + + for i := range dst { + if !testutil.Equal(dst[i].K, keys[i]) { + t.Fatalf("unexpected entity %d to have key %+v, got %+v", i, keys[i], dst[i].K) + } + } +} + +func TestDeferredMissing(t *testing.T) { + type Ent struct { + A int + B string + } + + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + + entity1 := &pb.Entity{ + Key: keyToProto(keys[0]), + } + entity2 := &pb.Entity{ + Key: keyToProto(keys[1]), + } + + var count int + fakeClient := &fakeDatastoreClient{ + lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { + count++ + + if count == 1 { + return &pb.LookupResponse{ + Missing: []*pb.EntityResult{ + { + Entity: entity1, + Version: 1, + }, + }, + Deferred: []*pb.Key{ + keyToProto(keys[1]), + }, + }, nil + } + + return &pb.LookupResponse{ + Missing: []*pb.EntityResult{ + { + Entity: entity2, + Version: 1, + }, + }, + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + + dst := make([]Ent, len(keys)) + err := client.GetMulti(ctx, keys, dst) + errs, ok := err.(MultiError) + if !ok { + t.Fatalf("expected error returns to be MultiError; got %v", err) + } + if len(errs) != 2 { + t.Fatalf("expected 2 errors returns, got %d", len(errs)) + } + if errs[0] != ErrNoSuchEntity { + t.Fatalf("expected error to be ErrNoSuchEntity; got %v", errs[0]) + } + if errs[1] != ErrNoSuchEntity { + t.Fatalf("expected error to be ErrNoSuchEntity; got %v", errs[1]) + } + + if count != 2 { + t.Fatalf("expected client.lookup to be called 2 times. Got %d", count) + } + + if len(dst) != 2 { + t.Fatalf("expected 2 entities returned, got %d", len(dst)) + } + + for _, e := range dst { + if e.A != 0 || e.B != "" { + t.Fatalf("unexpected entity %+v", e) + } + } +} + +type fakeDatastoreClient struct { + pb.DatastoreClient + + // Optional handlers for the datastore methods. + // Any handlers left undefined will return an error. + lookup func(*pb.LookupRequest) (*pb.LookupResponse, error) + runQuery func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) + beginTransaction func(*pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) + commit func(*pb.CommitRequest) (*pb.CommitResponse, error) + rollback func(*pb.RollbackRequest) (*pb.RollbackResponse, error) + allocateIds func(*pb.AllocateIdsRequest) (*pb.AllocateIdsResponse, error) +} + +func (c *fakeDatastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) { + if c.lookup == nil { + return nil, errors.New("no lookup handler defined") + } + return c.lookup(in) +} +func (c *fakeDatastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) { + if c.runQuery == nil { + return nil, errors.New("no runQuery handler defined") + } + return c.runQuery(in) +} +func (c *fakeDatastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) { + if c.beginTransaction == nil { + return nil, errors.New("no beginTransaction handler defined") + } + return c.beginTransaction(in) +} +func (c *fakeDatastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) { + if c.commit == nil { + return nil, errors.New("no commit handler defined") + } + return c.commit(in) +} +func (c *fakeDatastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) { + if c.rollback == nil { + return nil, errors.New("no rollback handler defined") + } + return c.rollback(in) +} +func (c *fakeDatastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) { + if c.allocateIds == nil { + return nil, errors.New("no allocateIds handler defined") + } + return c.allocateIds(in) +} diff --git a/vendor/cloud.google.com/go/datastore/doc.go b/vendor/cloud.google.com/go/datastore/doc.go new file mode 100644 index 0000000..1a05abb --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/doc.go @@ -0,0 +1,491 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package datastore provides a client for Google Cloud Datastore. + + +Basic Operations + +Entities are the unit of storage and are associated with a key. A key +consists of an optional parent key, a string application ID, a string kind +(also known as an entity type), and either a StringID or an IntID. A +StringID is also known as an entity name or key name. + +It is valid to create a key with a zero StringID and a zero IntID; this is +called an incomplete key, and does not refer to any saved entity. Putting an +entity into the datastore under an incomplete key will cause a unique key +to be generated for that entity, with a non-zero IntID. + +An entity's contents are a mapping from case-sensitive field names to values. +Valid value types are: + - signed integers (int, int8, int16, int32 and int64), + - bool, + - string, + - float32 and float64, + - []byte (up to 1 megabyte in length), + - any type whose underlying type is one of the above predeclared types, + - *Key, + - GeoPoint, + - time.Time (stored with microsecond precision), + - structs whose fields are all valid value types, + - pointers to structs whose fields are all valid value types, + - slices of any of the above, + - pointers to a signed integer, bool, string, float32, or float64. + +Slices of structs are valid, as are structs that contain slices. + +The Get and Put functions load and save an entity's contents. An entity's +contents are typically represented by a struct pointer. + +Example code: + + type Entity struct { + Value string + } + + func main() { + ctx := context.Background() + + // Create a datastore client. In a typical application, you would create + // a single client which is reused for every datastore operation. + dsClient, err := datastore.NewClient(ctx, "my-project") + if err != nil { + // Handle error. + } + + k := datastore.NameKey("Entity", "stringID", nil) + e := new(Entity) + if err := dsClient.Get(ctx, k, e); err != nil { + // Handle error. + } + + old := e.Value + e.Value = "Hello World!" + + if _, err := dsClient.Put(ctx, k, e); err != nil { + // Handle error. + } + + fmt.Printf("Updated value from %q to %q\n", old, e.Value) + } + +GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and +Delete functions. They take a []*Key instead of a *Key, and may return a +datastore.MultiError when encountering partial failure. + +Mutate generalizes PutMulti and DeleteMulti to a sequence of any Datastore mutations. +It takes a series of mutations created with NewInsert, NewUpdate, NewUpsert and +NewDelete and applies them atomically. + + +Properties + +An entity's contents can be represented by a variety of types. These are +typically struct pointers, but can also be any type that implements the +PropertyLoadSaver interface. If using a struct pointer, you do not have to +explicitly implement the PropertyLoadSaver interface; the datastore will +automatically convert via reflection. If a struct pointer does implement that +interface then those methods will be used in preference to the default +behavior for struct pointers. Struct pointers are more strongly typed and are +easier to use; PropertyLoadSavers are more flexible. + +The actual types passed do not have to match between Get and Put calls or even +across different calls to datastore. It is valid to put a *PropertyList and +get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. +Conceptually, any entity is saved as a sequence of properties, and is loaded +into the destination value on a property-by-property basis. When loading into +a struct pointer, an entity that cannot be completely represented (such as a +missing field) will result in an ErrFieldMismatch error but it is up to the +caller whether this error is fatal, recoverable or ignorable. + +By default, for struct pointers, all properties are potentially indexed, and +the property name is the same as the field name (and hence must start with an +upper case letter). + +Fields may have a `datastore:"name,options"` tag. The tag name is the +property name, which must be one or more valid Go identifiers joined by ".", +but may start with a lower case letter. An empty tag name means to just use the +field name. A "-" tag name means that the datastore will ignore that field. + +The only valid options are "omitempty", "noindex" and "flatten". + +If the options include "omitempty" and the value of the field is empty, then the +field will be omitted on Save. The empty values are false, 0, any nil pointer or +interface value, and any array, slice, map, or string of length zero. Struct field +values will never be empty, except for nil pointers. + +If options include "noindex" then the field will not be indexed. All fields are indexed +by default. Strings or byte slices longer than 1500 bytes cannot be indexed; +fields used to store long strings and byte slices must be tagged with "noindex" +or they will cause Put operations to fail. + +For a nested struct field, the options may also include "flatten". This indicates +that the immediate fields and any nested substruct fields of the nested struct should be +flattened. See below for examples. + +To use multiple options together, separate them by a comma. +The order does not matter. + +If the options is "" then the comma may be omitted. + +Example code: + + // A and B are renamed to a and b. + // A, C and J are not indexed. + // D's tag is equivalent to having no tag at all (E). + // I is ignored entirely by the datastore. + // J has tag information for both the datastore and json packages. + type TaggedStruct struct { + A int `datastore:"a,noindex"` + B int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + } + + +Slice Fields + +A field of slice type corresponds to a Datastore array property, except for []byte, which corresponds +to a Datastore blob. + +Zero-length slice fields are not saved. Slice fields of length 1 or greater are saved +as Datastore arrays. When a zero-length Datastore array is loaded into a slice field, +the slice field remains unchanged. + +If a non-array value is loaded into a slice field, the result will be a slice with +one element, containing the value. + +Loading Nulls + +Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value. +Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value. +Loading a Null into a pointer field results in nil. +Loading a Null into a field of struct type is an error. + +Pointer Fields + +A struct field can be a pointer to a signed integer, floating-point number, string or +bool. Putting a non-nil pointer will store its dereferenced value. Putting a nil +pointer will store a Datastore Null property, unless the field is marked omitempty, +in which case no property will be stored. + +Loading a Null into a pointer field sets the pointer to nil. Loading any other value +allocates new storage with the value, and sets the field to point to it. + + +Key Field + +If the struct contains a *datastore.Key field tagged with the name "__key__", +its value will be ignored on Put. When reading the Entity back into the Go struct, +the field will be populated with the *datastore.Key value used to query for +the Entity. + +Example code: + + type MyEntity struct { + A int + K *datastore.Key `datastore:"__key__"` + } + + k := datastore.NameKey("Entity", "stringID", nil) + e := MyEntity{A: 12} + k, err = dsClient.Put(ctx, k, e) + if err != nil { + // Handle error. + } + + var entities []MyEntity + q := datastore.NewQuery("Entity").Filter("A =", 12).Limit(1) + _, err := dsClient.GetAll(ctx, q, &entities) + if err != nil { + // Handle error + } + + log.Println(entities[0]) + // Prints {12 /Entity,stringID} + + + +Structured Properties + +If the struct pointed to contains other structs, then the nested or embedded +structs are themselves saved as Entity values. For example, given these definitions: + + type Inner struct { + W int32 + X string + } + + type Outer struct { + I Inner + } + +then an Outer would have one property, Inner, encoded as an Entity value. + +If an outer struct is tagged "noindex" then all of its implicit flattened +fields are effectively "noindex". + +If the Inner struct contains a *Key field with the name "__key__", like so: + + type Inner struct { + W int32 + X string + K *datastore.Key `datastore:"__key__"` + } + + type Outer struct { + I Inner + } + +then the value of K will be used as the Key for Inner, represented +as an Entity value in datastore. + +If any nested struct fields should be flattened, instead of encoded as +Entity values, the nested struct field should be tagged with the "flatten" +option. For example, given the following: + + type Inner1 struct { + W int32 + X string + } + + type Inner2 struct { + Y float64 + } + + type Inner3 struct { + Z bool + } + + type Inner4 struct { + WW int + } + + type Inner5 struct { + X Inner4 + } + + type Outer struct { + A int16 + I []Inner1 `datastore:",flatten"` + J Inner2 `datastore:",flatten"` + K Inner5 `datastore:",flatten"` + Inner3 `datastore:",flatten"` + } + +an Outer's properties would be equivalent to those of: + + type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + KDotXDotWW int `datastore:"K.X.WW"` + Z bool + } + +Note that the "flatten" option cannot be used for Entity value fields. +The server will reject any dotted field names for an Entity value. + + +The PropertyLoadSaver Interface + +An entity's contents can also be represented by any type that implements the +PropertyLoadSaver interface. This type may be a struct pointer, but it does +not have to be. The datastore package will call Load when getting the entity's +contents, and Save when putting the entity's contents. +Possible uses include deriving non-stored fields, verifying fields, or indexing +a field only if its value is positive. + +Example code: + + type CustomPropsExample struct { + I, J int + // Sum is not stored, but should always be equal to I + J. + Sum int `datastore:"-"` + } + + func (x *CustomPropsExample) Load(ps []datastore.Property) error { + // Load I and J as usual. + if err := datastore.LoadStruct(x, ps); err != nil { + return err + } + // Derive the Sum field. + x.Sum = x.I + x.J + return nil + } + + func (x *CustomPropsExample) Save() ([]datastore.Property, error) { + // Validate the Sum field. + if x.Sum != x.I + x.J { + return nil, errors.New("CustomPropsExample has inconsistent sum") + } + // Save I and J as usual. The code below is equivalent to calling + // "return datastore.SaveStruct(x)", but is done manually for + // demonstration purposes. + return []datastore.Property{ + { + Name: "I", + Value: int64(x.I), + }, + { + Name: "J", + Value: int64(x.J), + }, + }, nil + } + +The *PropertyList type implements PropertyLoadSaver, and can therefore hold an +arbitrary entity's contents. + +The KeyLoader Interface + +If a type implements the PropertyLoadSaver interface, it may +also want to implement the KeyLoader interface. +The KeyLoader interface exists to allow implementations of PropertyLoadSaver +to also load an Entity's Key into the Go type. This type may be a struct +pointer, but it does not have to be. The datastore package will call LoadKey +when getting the entity's contents, after calling Load. + +Example code: + + type WithKeyExample struct { + I int + Key *datastore.Key + } + + func (x *WithKeyExample) LoadKey(k *datastore.Key) error { + x.Key = k + return nil + } + + func (x *WithKeyExample) Load(ps []datastore.Property) error { + // Load I as usual. + return datastore.LoadStruct(x, ps) + } + + func (x *WithKeyExample) Save() ([]datastore.Property, error) { + // Save I as usual. + return datastore.SaveStruct(x) + } + +To load a Key into a struct which does not implement the PropertyLoadSaver +interface, see the "Key Field" section above. + + +Queries + +Queries retrieve entities based on their properties or key's ancestry. Running +a query yields an iterator of results: either keys or (key, entity) pairs. +Queries are re-usable and it is safe to call Query.Run from concurrent +goroutines. Iterators are not safe for concurrent use. + +Queries are immutable, and are either created by calling NewQuery, or derived +from an existing query by calling a method like Filter or Order that returns a +new query value. A query is typically constructed by calling NewQuery followed +by a chain of zero or more such methods. These methods are: + - Ancestor and Filter constrain the entities returned by running a query. + - Order affects the order in which they are returned. + - Project constrains the fields returned. + - Distinct de-duplicates projected entities. + - KeysOnly makes the iterator return only keys, not (key, entity) pairs. + - Start, End, Offset and Limit define which sub-sequence of matching entities + to return. Start and End take cursors, Offset and Limit take integers. Start + and Offset affect the first result, End and Limit affect the last result. + If both Start and Offset are set, then the offset is relative to Start. + If both End and Limit are set, then the earliest constraint wins. Limit is + relative to Start+Offset, not relative to End. As a special case, a + negative limit means unlimited. + +Example code: + + type Widget struct { + Description string + Price int + } + + func printWidgets(ctx context.Context, client *datastore.Client) { + q := datastore.NewQuery("Widget"). + Filter("Price <", 1000). + Order("-Price") + for t := client.Run(ctx, q); ; { + var x Widget + key, err := t.Next(&x) + if err == iterator.Done { + break + } + if err != nil { + // Handle error. + } + fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x) + } + } + + +Transactions + +Client.RunInTransaction runs a function in a transaction. + +Example code: + + type Counter struct { + Count int + } + + func incCount(ctx context.Context, client *datastore.Client) { + var count int + key := datastore.NameKey("Counter", "singleton", nil) + _, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var x Counter + if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return err + } + x.Count++ + if _, err := tx.Put(key, &x); err != nil { + return err + } + count = x.Count + return nil + }) + if err != nil { + // Handle error. + } + // The value of count is only valid once the transaction is successful + // (RunInTransaction has returned nil). + fmt.Printf("Count=%d\n", count) + } + +Pass the ReadOnly option to RunInTransaction if your transaction is used only for Get, +GetMulti or queries. Read-only transactions are more efficient. + +Google Cloud Datastore Emulator + +This package supports the Cloud Datastore emulator, which is useful for testing and +development. Environment variables are used to indicate that datastore traffic should be +directed to the emulator instead of the production Datastore service. + +To install and set up the emulator and its environment variables, see the documentation +at https://cloud.google.com/datastore/docs/tools/datastore-emulator. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + +*/ +package datastore // import "cloud.google.com/go/datastore" diff --git a/vendor/cloud.google.com/go/datastore/errors.go b/vendor/cloud.google.com/go/datastore/errors.go new file mode 100644 index 0000000..3077f80 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/errors.go @@ -0,0 +1,47 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file provides error functions for common API failure modes. + +package datastore + +import ( + "fmt" +) + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/cloud.google.com/go/datastore/example_test.go b/vendor/cloud.google.com/go/datastore/example_test.go new file mode 100644 index 0000000..88edbe9 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/example_test.go @@ -0,0 +1,567 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore_test + +import ( + "fmt" + "log" + "time" + + "cloud.google.com/go/datastore" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +func ExampleClient_Get() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + key := datastore.NameKey("Article", "articled1", nil) + article := &Article{} + if err := client.Get(ctx, key, article); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Put() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + newKey := datastore.IncompleteKey("Article", nil) + _, err = client.Put(ctx, newKey, &Article{ + Title: "The title of the article", + Description: "The description of the article...", + Body: "...", + Author: datastore.NameKey("Author", "jbd", nil), + PublishedAt: time.Now(), + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Put_flatten() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + type Animal struct { + Name string + Type string + Breed string + } + + type Human struct { + Name string + Height int + Pet Animal `datastore:",flatten"` + } + + newKey := datastore.IncompleteKey("Human", nil) + _, err = client.Put(ctx, newKey, &Human{ + Name: "Susan", + Height: 67, + Pet: Animal{ + Name: "Fluffy", + Type: "Cat", + Breed: "Sphynx", + }, + }) + if err != nil { + log.Fatal(err) + } +} + +func ExampleClient_Delete() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + key := datastore.NameKey("Article", "articled1", nil) + if err := client.Delete(ctx, key); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteMulti() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var keys []*datastore.Key + for i := 1; i <= 10; i++ { + keys = append(keys, datastore.IDKey("Article", int64(i), nil)) + } + if err := client.DeleteMulti(ctx, keys); err != nil { + // TODO: Handle error. + } +} + +type Post struct { + Title string + PublishedAt time.Time + Comments int +} + +func ExampleClient_GetMulti() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + datastore.NameKey("Post", "post3", nil), + } + posts := make([]Post, 3) + if err := client.GetMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_PutMulti_slice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + } + + // PutMulti with a Post slice. + posts := []*Post{ + {Title: "Post 1", PublishedAt: time.Now()}, + {Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_PutMulti_interfaceSlice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + } + + // PutMulti with an empty interface slice. + posts := []interface{}{ + &Post{Title: "Post 1", PublishedAt: time.Now()}, + &Post{Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleNewQuery() { + // Query for Post entities. + q := datastore.NewQuery("Post") + _ = q // TODO: Use the query with Client.Run. +} + +func ExampleNewQuery_options() { + // Query to order the posts by the number of comments they have recieved. + q := datastore.NewQuery("Post").Order("-Comments") + // Start listing from an offset and limit the results. + q = q.Offset(20).Limit(10) + _ = q // TODO: Use the query. +} + +func ExampleClient_Count() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // Count the number of the post entities. + q := datastore.NewQuery("Post") + n, err := client.Count(ctx, q) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("There are %d posts.", n) +} + +func ExampleClient_Run() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List the posts published since yesterday. + yesterday := time.Now().Add(-24 * time.Hour) + q := datastore.NewQuery("Post").Filter("PublishedAt >", yesterday) + it := client.Run(ctx, q) + _ = it // TODO: iterate using Next. +} + +func ExampleClient_NewTransaction() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + const retries = 3 + + // Increment a counter. + // See https://cloud.google.com/appengine/articles/sharding_counters for + // a more scalable solution. + type Counter struct { + Count int + } + + key := datastore.NameKey("counter", "CounterA", nil) + var tx *datastore.Transaction + for i := 0; i < retries; i++ { + tx, err = client.NewTransaction(ctx) + if err != nil { + break + } + + var c Counter + if err = tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity { + break + } + c.Count++ + if _, err = tx.Put(key, &c); err != nil { + break + } + + // Attempt to commit the transaction. If there's a conflict, try again. + if _, err = tx.Commit(); err != datastore.ErrConcurrentTransaction { + break + } + } + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_RunInTransaction() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Increment a counter. + // See https://cloud.google.com/appengine/articles/sharding_counters for + // a more scalable solution. + type Counter struct { + Count int + } + + var count int + key := datastore.NameKey("Counter", "singleton", nil) + _, err = client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var x Counter + if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return err + } + x.Count++ + if _, err := tx.Put(key, &x); err != nil { + return err + } + count = x.Count + return nil + }) + if err != nil { + // TODO: Handle error. + } + // The value of count is only valid once the transaction is successful + // (RunInTransaction has returned nil). + fmt.Printf("Count=%d\n", count) +} + +func ExampleClient_AllocateIDs() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var keys []*datastore.Key + for i := 0; i < 10; i++ { + keys = append(keys, datastore.IncompleteKey("Article", nil)) + } + keys, err = client.AllocateIDs(ctx, keys) + if err != nil { + // TODO: Handle error. + } + _ = keys // TODO: Use keys. +} + +func ExampleKey_Encode() { + key := datastore.IDKey("Article", 1, nil) + encoded := key.Encode() + fmt.Println(encoded) + // Output: EgsKB0FydGljbGUQAQ +} + +func ExampleDecodeKey() { + const encoded = "EgsKB0FydGljbGUQAQ" + key, err := datastore.DecodeKey(encoded) + if err != nil { + // TODO: Handle error. + } + fmt.Println(key) + // Output: /Article,1 +} + +func ExampleIDKey() { + // Key with numeric ID. + k := datastore.IDKey("Article", 1, nil) + _ = k // TODO: Use key. +} + +func ExampleNameKey() { + // Key with string ID. + k := datastore.NameKey("Article", "article8", nil) + _ = k // TODO: Use key. +} + +func ExampleIncompleteKey() { + k := datastore.IncompleteKey("Article", nil) + _ = k // TODO: Use incomplete key. +} + +func ExampleClient_GetAll() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var posts []*Post + keys, err := client.GetAll(ctx, datastore.NewQuery("Post"), &posts) + for i, key := range keys { + fmt.Println(key) + fmt.Println(posts[i]) + } +} + +func ExampleClient_Mutate() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + key1 := datastore.NameKey("Post", "post1", nil) + key2 := datastore.NameKey("Post", "post2", nil) + key3 := datastore.NameKey("Post", "post3", nil) + key4 := datastore.NameKey("Post", "post4", nil) + + _, err = client.Mutate(ctx, + datastore.NewInsert(key1, Post{Title: "Post 1"}), + datastore.NewUpsert(key2, Post{Title: "Post 2"}), + datastore.NewUpdate(key3, Post{Title: "Post 3"}), + datastore.NewDelete(key4)) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleCommit_Key() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "") + if err != nil { + // TODO: Handle error. + } + var pk1, pk2 *datastore.PendingKey + // Create two posts in a single transaction. + commit, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var err error + pk1, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 1", PublishedAt: time.Now()}) + if err != nil { + return err + } + pk2, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 2", PublishedAt: time.Now()}) + if err != nil { + return err + } + return nil + }) + if err != nil { + // TODO: Handle error. + } + // Now pk1, pk2 are valid PendingKeys. Let's convert them into real keys + // using the Commit object. + k1 := commit.Key(pk1) + k2 := commit.Key(pk2) + fmt.Println(k1, k2) +} + +func ExampleIterator_Next() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post")) + for { + var p Post + key, err := it.Next(&p) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(key, p) + } +} + +func ExampleIterator_Cursor() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post")) + for { + var p Post + _, err := it.Next(&p) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(p) + cursor, err := it.Cursor() + if err != nil { + // TODO: Handle error. + } + // When printed, a cursor will display as a string that can be passed + // to datastore.NewCursor. + fmt.Printf("to resume with this post, use cursor %s\n", cursor) + } +} + +func ExampleDecodeCursor() { + // See Query.Start for a fuller example of DecodeCursor. + // getCursor represents a function that returns a cursor from a previous + // iteration in string form. + cursorString := getCursor() + cursor, err := datastore.DecodeCursor(cursorString) + if err != nil { + // TODO: Handle error. + } + _ = cursor // TODO: Use the cursor with Query.Start or Query.End. +} + +func getCursor() string { return "" } + +func ExampleQuery_Start() { + // This example demonstrates how to use cursors and Query.Start + // to resume an iteration. + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // getCursor represents a function that returns a cursor from a previous + // iteration in string form. + cursorString := getCursor() + cursor, err := datastore.DecodeCursor(cursorString) + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post").Start(cursor)) + _ = it // TODO: Use iterator. +} + +func ExampleLoadStruct() { + type Player struct { + User string + Score int + } + // Normally LoadStruct would only be used inside a custom implementation of + // PropertyLoadSaver; this is for illustrative purposes only. + props := []datastore.Property{ + {Name: "User", Value: "Alice"}, + {Name: "Score", Value: int64(97)}, + } + + var p Player + if err := datastore.LoadStruct(&p, props); err != nil { + // TODO: Handle error. + } + fmt.Println(p) + // Output: {Alice 97} +} + +func ExampleSaveStruct() { + type Player struct { + User string + Score int + } + + p := &Player{ + User: "Alice", + Score: 97, + } + props, err := datastore.SaveStruct(p) + if err != nil { + // TODO: Handle error. + } + fmt.Println(props) + // TODO(jba): make this output stable: Output: [{User Alice false} {Score 97 false}] +} diff --git a/vendor/cloud.google.com/go/datastore/integration_test.go b/vendor/cloud.google.com/go/datastore/integration_test.go new file mode 100644 index 0000000..3345857 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/integration_test.go @@ -0,0 +1,1277 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "log" + "net" + "os" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/rpcreplay" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// TODO(djd): Make test entity clean up more robust: some test entities may +// be left behind if tests are aborted, the transport fails, etc. + +var timeNow = time.Now() + +// suffix is a timestamp-based suffix which is appended to key names, +// particularly for the root keys of entity groups. This reduces flakiness +// when the tests are run in parallel. +var suffix string + +const replayFilename = "datastore.replay" + +type replayInfo struct { + ProjectID string + Time time.Time +} + +var ( + record = flag.Bool("record", false, "record RPCs") + + newTestClient = func(ctx context.Context, t *testing.T) *Client { + return newClient(ctx, t, nil) + } +) + +func TestMain(m *testing.M) { + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + flag.Parse() + if testing.Short() { + if *record { + log.Fatal("cannot combine -short and -record") + } + if _, err := os.Stat(replayFilename); err == nil { + initReplay() + } + } else if *record { + if testutil.ProjID() == "" { + log.Fatal("must record with a project ID") + } + b, err := json.Marshal(replayInfo{ + ProjectID: testutil.ProjID(), + Time: timeNow, + }) + if err != nil { + log.Fatal(err) + } + rec, err := rpcreplay.NewRecorder(replayFilename, b) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := rec.Close(); err != nil { + log.Fatalf("closing recorder: %v", err) + } + }() + newTestClient = func(ctx context.Context, t *testing.T) *Client { + return newClient(ctx, t, rec.DialOptions()) + } + log.Printf("recording to %s", replayFilename) + } + suffix = fmt.Sprintf("-t%d", timeNow.UnixNano()) + return m.Run() +} + +func initReplay() { + rep, err := rpcreplay.NewReplayer(replayFilename) + if err != nil { + log.Fatal(err) + } + defer rep.Close() + + var ri replayInfo + if err := json.Unmarshal(rep.Initial(), &ri); err != nil { + log.Fatalf("unmarshaling initial replay info: %v", err) + } + timeNow = ri.Time.In(time.Local) + + conn, err := replayConn(rep) + if err != nil { + log.Fatal(err) + } + newTestClient = func(ctx context.Context, t *testing.T) *Client { + client, err := NewClient(ctx, ri.ProjectID, option.WithGRPCConn(conn)) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + return client + } + log.Printf("replaying from %s", replayFilename) +} + +func replayConn(rep *rpcreplay.Replayer) (*grpc.ClientConn, error) { + // If we make a real connection we need creds from somewhere, and they + // might not be available, for instance on Travis. + // Replaying doesn't require a connection live at all, but we need + // something to attach gRPC interceptors to. + // So we start a local listener and connect to it, then close them down. + // TODO(jba): build something like this into the replayer? + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + conn, err := grpc.Dial(l.Addr().String(), + append([]grpc.DialOption{grpc.WithInsecure()}, rep.DialOptions()...)...) + if err != nil { + return nil, err + } + conn.Close() + l.Close() + return conn, nil +} + +func newClient(ctx context.Context, t *testing.T, dialOpts []grpc.DialOption) *Client { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ts := testutil.TokenSource(ctx, ScopeDatastore) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + opts := []option.ClientOption{option.WithTokenSource(ts)} + for _, opt := range dialOpts { + opts = append(opts, option.WithGRPCDialOption(opt)) + } + client, err := NewClient(ctx, testutil.ProjID(), opts...) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + return client +} + +func TestBasics(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), time.Second*20) + client := newTestClient(ctx, t) + defer client.Close() + + type X struct { + I int + S string + T time.Time + } + + x0 := X{66, "99", timeNow.Truncate(time.Millisecond)} + k, err := client.Put(ctx, IncompleteKey("BasicsX", nil), &x0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + x1 := X{} + err = client.Get(ctx, k, &x1) + if err != nil { + t.Errorf("client.Get: %v", err) + } + err = client.Delete(ctx, k) + if err != nil { + t.Errorf("client.Delete: %v", err) + } + if !testutil.Equal(x0, x1) { + t.Errorf("compare: x0=%v, x1=%v", x0, x1) + } +} + +func TestTopLevelKeyLoaded(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), time.Second*20) + client := newTestClient(ctx, t) + defer client.Close() + + completeKey := NameKey("EntityWithKey", "myent", nil) + + type EntityWithKey struct { + I int + S string + K *Key `datastore:"__key__"` + } + + in := &EntityWithKey{ + I: 12, + S: "abcd", + } + + k, err := client.Put(ctx, completeKey, in) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + + var e EntityWithKey + err = client.Get(ctx, k, &e) + if err != nil { + t.Fatalf("client.Get: %v", err) + } + + // The two keys should be absolutely identical. + if !testutil.Equal(e.K, k) { + t.Fatalf("e.K not equal to k; got %#v, want %#v", e.K, k) + } + +} + +func TestListValues(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + p0 := PropertyList{ + {Name: "L", Value: []interface{}{int64(12), "string", true}}, + } + k, err := client.Put(ctx, IncompleteKey("ListValue", nil), &p0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + var p1 PropertyList + if err := client.Get(ctx, k, &p1); err != nil { + t.Errorf("client.Get: %v", err) + } + if !testutil.Equal(p0, p1) { + t.Errorf("compare:\np0=%v\np1=%#v", p0, p1) + } + if err = client.Delete(ctx, k); err != nil { + t.Errorf("client.Delete: %v", err) + } +} + +func TestGetMulti(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type X struct { + I int + } + p := NameKey("X", "x"+suffix, nil) + + cases := []struct { + key *Key + put bool + }{ + {key: NameKey("X", "item1", p), put: true}, + {key: NameKey("X", "item2", p), put: false}, + {key: NameKey("X", "item3", p), put: false}, + {key: NameKey("X", "item3", p), put: false}, + {key: NameKey("X", "item4", p), put: true}, + } + + var src, dst []*X + var srcKeys, dstKeys []*Key + for _, c := range cases { + dst = append(dst, &X{}) + dstKeys = append(dstKeys, c.key) + if c.put { + src = append(src, &X{}) + srcKeys = append(srcKeys, c.key) + } + } + if _, err := client.PutMulti(ctx, srcKeys, src); err != nil { + t.Error(err) + } + err := client.GetMulti(ctx, dstKeys, dst) + if err == nil { + t.Errorf("client.GetMulti got %v, expected error", err) + } + e, ok := err.(MultiError) + if !ok { + t.Errorf("client.GetMulti got %T, expected MultiError", err) + } + for i, err := range e { + got, want := err, (error)(nil) + if !cases[i].put { + got, want = err, ErrNoSuchEntity + } + if got != want { + t.Errorf("MultiError[%d] == %v, want %v", i, got, want) + } + } +} + +type Z struct { + S string + T string `datastore:",noindex"` + P []byte + K []byte `datastore:",noindex"` +} + +func (z Z) String() string { + var lens []string + v := reflect.ValueOf(z) + for i := 0; i < v.NumField(); i++ { + if l := v.Field(i).Len(); l > 0 { + lens = append(lens, fmt.Sprintf("len(%s)=%d", v.Type().Field(i).Name, l)) + } + } + return fmt.Sprintf("Z{ %s }", strings.Join(lens, ",")) +} + +func TestUnindexableValues(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + x1500 := strings.Repeat("x", 1500) + x1501 := strings.Repeat("x", 1501) + testCases := []struct { + in Z + wantErr bool + }{ + {in: Z{S: x1500}, wantErr: false}, + {in: Z{S: x1501}, wantErr: true}, + {in: Z{T: x1500}, wantErr: false}, + {in: Z{T: x1501}, wantErr: false}, + {in: Z{P: []byte(x1500)}, wantErr: false}, + {in: Z{P: []byte(x1501)}, wantErr: true}, + {in: Z{K: []byte(x1500)}, wantErr: false}, + {in: Z{K: []byte(x1501)}, wantErr: false}, + } + for _, tt := range testCases { + _, err := client.Put(ctx, IncompleteKey("BasicsZ", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +func TestNilKey(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + testCases := []struct { + in K0 + wantErr bool + }{ + {in: K0{K: testKey0}, wantErr: false}, + {in: K0{}, wantErr: false}, + } + for _, tt := range testCases { + _, err := client.Put(ctx, IncompleteKey("NilKey", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +type SQChild struct { + I, J int + T, U int64 +} + +type SQTestCase struct { + desc string + q *Query + wantCount int + wantSum int +} + +func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent *Key, children []*SQChild, + testCases []SQTestCase, extraTests ...func()) { + keys := make([]*Key, len(children)) + for i := range keys { + keys[i] = IncompleteKey("SQChild", parent) + } + keys, err := client.PutMulti(ctx, keys, children) + if err != nil { + t.Fatalf("client.PutMulti: %v", err) + } + defer func() { + err := client.DeleteMulti(ctx, keys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + + for _, tc := range testCases { + count, err := client.Count(ctx, tc.q) + if err != nil { + t.Errorf("Count %q: %v", tc.desc, err) + continue + } + if count != tc.wantCount { + t.Errorf("Count %q: got %d want %d", tc.desc, count, tc.wantCount) + continue + } + } + + for _, tc := range testCases { + var got []SQChild + _, err := client.GetAll(ctx, tc.q, &got) + if err != nil { + t.Errorf("client.GetAll %q: %v", tc.desc, err) + continue + } + sum := 0 + for _, c := range got { + sum += c.I + c.J + } + if sum != tc.wantSum { + t.Errorf("sum %q: got %d want %d", tc.desc, sum, tc.wantSum) + continue + } + } + for _, x := range extraTests { + x() + } +} + +func TestFilters(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestFilters"+suffix, nil) + now := timeNow.Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "I>1", + baseQuery.Filter("I>", 1), + 6, + 2 + 3 + 4 + 5 + 6 + 7, + }, + { + "I>2 AND I<=5", + baseQuery.Filter("I>", 2).Filter("I<=", 5), + 3, + 3 + 4 + 5, + }, + { + "I>=3 AND I<3", + baseQuery.Filter("I>=", 3).Filter("I<", 3), + 0, + 0, + }, + { + "I=4", + baseQuery.Filter("I=", 4), + 1, + 4, + }, + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !testutil.Equal(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 7, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 0, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("-I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !testutil.Equal(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }) +} + +type ckey struct{} + +func TestLargeQuery(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + parent := NameKey("LQParent", "TestFilters"+suffix, nil) + now := timeNow.Truncate(time.Millisecond).Unix() + + // Make a large number of children entities. + const n = 800 + children := make([]*SQChild, 0, n) + keys := make([]*Key, 0, n) + for i := 0; i < n; i++ { + children = append(children, &SQChild{I: i, T: now, U: now}) + keys = append(keys, IncompleteKey("SQChild", parent)) + } + + // Store using PutMulti in batches. + const batchSize = 500 + for i := 0; i < n; i = i + 500 { + j := i + batchSize + if j > n { + j = n + } + fullKeys, err := client.PutMulti(ctx, keys[i:j], children[i:j]) + if err != nil { + t.Fatalf("PutMulti(%d, %d): %v", i, j, err) + } + defer func() { + err := client.DeleteMulti(ctx, fullKeys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + } + + q := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Order("I") + + // Wait group to allow us to run query tests in parallel below. + var wg sync.WaitGroup + + // Check we get the expected count and results for various limits/offsets. + queryTests := []struct { + limit, offset, want int + }{ + // Just limit. + {limit: 0, want: 0}, + {limit: 100, want: 100}, + {limit: 501, want: 501}, + {limit: n, want: n}, + {limit: n * 2, want: n}, + {limit: -1, want: n}, + // Just offset. + {limit: -1, offset: 100, want: n - 100}, + {limit: -1, offset: 500, want: n - 500}, + {limit: -1, offset: n, want: 0}, + // Limit and offset. + {limit: 100, offset: 100, want: 100}, + {limit: 1000, offset: 100, want: n - 100}, + {limit: 500, offset: 500, want: n - 500}, + } + for _, tt := range queryTests { + q := q.Limit(tt.limit).Offset(tt.offset) + wg.Add(1) + + go func(limit, offset, want int) { + defer wg.Done() + // Check Count returns the expected number of results. + count, err := client.Count(ctx, q) + if err != nil { + t.Errorf("client.Count(limit=%d offset=%d): %v", limit, offset, err) + return + } + if count != want { + t.Errorf("Count(limit=%d offset=%d) returned %d, want %d", limit, offset, count, want) + } + + var got []SQChild + _, err = client.GetAll(ctx, q, &got) + if err != nil { + t.Errorf("client.GetAll(limit=%d offset=%d): %v", limit, offset, err) + return + } + if len(got) != want { + t.Errorf("GetAll(limit=%d offset=%d) returned %d, want %d", limit, offset, len(got), want) + } + for i, child := range got { + if got, want := child.I, i+offset; got != want { + t.Errorf("GetAll(limit=%d offset=%d) got[%d].I == %d; want %d", limit, offset, i, got, want) + break + } + } + }(tt.limit, tt.offset, tt.want) + } + + // Also check iterator cursor behaviour. + cursorTests := []struct { + limit, offset int // Query limit and offset. + count int // The number of times to call "next" + want int // The I value of the desired element, -1 for "Done". + }{ + // No limits. + {count: 0, limit: -1, want: 0}, + {count: 5, limit: -1, want: 5}, + {count: 500, limit: -1, want: 500}, + {count: 1000, limit: -1, want: -1}, // No more results. + // Limits. + {count: 5, limit: 5, want: 5}, + {count: 500, limit: 5, want: 5}, + {count: 1000, limit: 1000, want: -1}, // No more results. + // Offsets. + {count: 0, offset: 5, limit: -1, want: 5}, + {count: 5, offset: 5, limit: -1, want: 10}, + {count: 200, offset: 500, limit: -1, want: 700}, + {count: 200, offset: 1000, limit: -1, want: -1}, // No more results. + } + for _, tt := range cursorTests { + wg.Add(1) + + go func(count, limit, offset, want int) { + defer wg.Done() + + ctx := context.WithValue(ctx, ckey{}, fmt.Sprintf("c=%d,l=%d,o=%d", count, limit, offset)) + // Run iterator through count calls to Next. + it := client.Run(ctx, q.Limit(limit).Offset(offset).KeysOnly()) + for i := 0; i < count; i++ { + _, err := it.Next(nil) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("count=%d, limit=%d, offset=%d: it.Next failed at i=%d", count, limit, offset, i) + return + } + } + + // Grab the cursor. + cursor, err := it.Cursor() + if err != nil { + t.Errorf("count=%d, limit=%d, offset=%d: it.Cursor: %v", count, limit, offset, err) + return + } + + // Make a request for the next element. + it = client.Run(ctx, q.Limit(1).Start(cursor)) + var entity SQChild + _, err = it.Next(&entity) + switch { + case want == -1: + if err != iterator.Done { + t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor %v, want Done", count, limit, offset, err) + } + case err != nil: + t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor: %v, want nil", count, limit, offset, err) + case entity.I != want: + t.Errorf("count=%d, limit=%d, offset=%d: got.I = %d, want %d", count, limit, offset, entity.I, want) + } + }(tt.count, tt.limit, tt.offset, tt.want) + } + wg.Wait() +} + +func TestEventualConsistency(t *testing.T) { + // TODO(jba): either make this actually test eventual consistency, or + // delete it. Currently it behaves the same with or without the + // EventualConsistency call. + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestEventualConsistency"+suffix, nil) + now := timeNow.Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + } + query := NewQuery("SQChild").Ancestor(parent).Filter("T =", now).EventualConsistency() + testSmallQueries(t, ctx, client, parent, children, nil, func() { + got, err := client.Count(ctx, query) + if err != nil { + t.Fatalf("Count: %v", err) + } + if got < 0 || 3 < got { + t.Errorf("Count: got %d, want [0,3]", got) + } + }) +} + +func TestProjection(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestProjection"+suffix, nil) + now := timeNow.Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 1 << 0, J: 100, T: now, U: now}, + {I: 1 << 1, J: 100, T: now, U: now}, + {I: 1 << 2, J: 200, T: now, U: now}, + {I: 1 << 3, J: 300, T: now, U: now}, + {I: 1 << 4, J: 300, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Filter("J>", 150) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "project", + baseQuery.Project("J"), + 3, + 200 + 300 + 300, + }, + { + "distinct", + baseQuery.Project("J").Distinct(), + 2, + 200 + 300, + }, + { + "distinct on", + baseQuery.Project("J").DistinctOn("J"), + 2, + 200 + 300, + }, + { + "project on meaningful (GD_WHEN) field", + baseQuery.Project("U"), + 3, + 0, + }, + }) +} + +func TestAllocateIDs(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + keys := make([]*Key, 5) + for i := range keys { + keys[i] = IncompleteKey("AllocID", nil) + } + keys, err := client.AllocateIDs(ctx, keys) + if err != nil { + t.Errorf("AllocID #0 failed: %v", err) + } + if want := len(keys); want != 5 { + t.Errorf("Expected to allocate 5 keys, %d keys are found", want) + } + for _, k := range keys { + if k.Incomplete() { + t.Errorf("Unexpeceted incomplete key found: %v", k) + } + } +} + +func TestGetAllWithFieldMismatch(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type Fat struct { + X, Y int + } + type Thin struct { + X int + } + + // Ancestor queries (those within an entity group) are strongly consistent + // by default, which prevents a test from being flaky. + // See https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Data_consistency + // for more information. + parent := NameKey("SQParent", "TestGetAllWithFieldMismatch"+suffix, nil) + putKeys := make([]*Key, 3) + for i := range putKeys { + putKeys[i] = IDKey("GetAllThing", int64(10+i), parent) + _, err := client.Put(ctx, putKeys[i], &Fat{X: 20 + i, Y: 30 + i}) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + } + + var got []Thin + want := []Thin{ + {X: 20}, + {X: 21}, + {X: 22}, + } + getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got) + if len(getKeys) != 3 && !testutil.Equal(getKeys, putKeys) { + t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys) + } + if !testutil.Equal(got, want) { + t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want) + } + if _, ok := err.(*ErrFieldMismatch); !ok { + t.Errorf("client.GetAll: got err=%v, want ErrFieldMismatch", err) + } +} + +func TestKindlessQueries(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type Dee struct { + I int + Why string + } + type Dum struct { + I int + Pling string + } + + parent := NameKey("Tweedle", "tweedle"+suffix, nil) + + keys := []*Key{ + NameKey("Dee", "dee0", parent), + NameKey("Dum", "dum1", parent), + NameKey("Dum", "dum2", parent), + NameKey("Dum", "dum3", parent), + } + src := []interface{}{ + &Dee{1, "binary0001"}, + &Dum{2, "binary0010"}, + &Dum{4, "binary0100"}, + &Dum{8, "binary1000"}, + } + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("put: %v", err) + } + + testCases := []struct { + desc string + query *Query + want []int + wantErr string + }{ + { + desc: "Dee", + query: NewQuery("Dee"), + want: []int{1}, + }, + { + desc: "Doh", + query: NewQuery("Doh"), + want: nil}, + { + desc: "Dum", + query: NewQuery("Dum"), + want: []int{2, 4, 8}, + }, + { + desc: "", + query: NewQuery(""), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless filter", + query: NewQuery("").Filter("__key__ =", keys[2]), + want: []int{4}, + }, + { + desc: "Kindless order", + query: NewQuery("").Order("__key__"), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless bad filter", + query: NewQuery("").Filter("I =", 4), + wantErr: "kind is required", + }, + { + desc: "Kindless bad order", + query: NewQuery("").Order("-__key__"), + wantErr: "kind is required for all orders except __key__ ascending", + }, + } +loop: + for _, tc := range testCases { + q := tc.query.Ancestor(parent) + gotCount, err := client.Count(ctx, q) + if err != nil { + if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("count %q: err %v, want err %q", tc.desc, err, tc.wantErr) + } + continue + } + if tc.wantErr != "" { + t.Errorf("count %q: want err %q", tc.desc, tc.wantErr) + continue + } + if gotCount != len(tc.want) { + t.Errorf("count %q: got %d want %d", tc.desc, gotCount, len(tc.want)) + continue + } + var got []int + for iter := client.Run(ctx, q); ; { + var dst struct { + I int + Why, Pling string + } + _, err := iter.Next(&dst) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("iter.Next %q: %v", tc.desc, err) + continue loop + } + got = append(got, dst.I) + } + sort.Ints(got) + if !testutil.Equal(got, tc.want) { + t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want) + continue + } + } +} + +func TestTransaction(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type Counter struct { + N int + T time.Time + } + + bangErr := errors.New("bang") + tests := []struct { + desc string + causeConflict []bool + retErr []error + want int + wantErr error + }{ + { + desc: "3 attempts, no conflicts", + causeConflict: []bool{false}, + retErr: []error{nil}, + want: 11, + }, + { + desc: "1 attempt, user error", + causeConflict: []bool{false}, + retErr: []error{bangErr}, + wantErr: bangErr, + }, + { + desc: "2 attempts, 1 conflict", + causeConflict: []bool{true, false}, + retErr: []error{nil, nil}, + want: 13, // Each conflict increments by 2. + }, + { + desc: "3 attempts, 3 conflicts", + causeConflict: []bool{true, true, true}, + retErr: []error{nil, nil, nil}, + wantErr: ErrConcurrentTransaction, + }, + } + + for i, tt := range tests { + // Put a new counter. + c := &Counter{N: 10, T: timeNow} + key, err := client.Put(ctx, IncompleteKey("TransCounter", nil), c) + if err != nil { + t.Errorf("%s: client.Put: %v", tt.desc, err) + continue + } + defer client.Delete(ctx, key) + + // Increment the counter in a transaction. + // The test case can manually cause a conflict or return an + // error at each attempt. + var attempts int + _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { + attempts++ + if attempts > len(tt.causeConflict) { + return fmt.Errorf("too many attempts. Got %d, max %d", attempts, len(tt.causeConflict)) + } + + var c Counter + if err := tx.Get(key, &c); err != nil { + return err + } + c.N++ + if _, err := tx.Put(key, &c); err != nil { + return err + } + + if tt.causeConflict[attempts-1] { + c.N += 1 + if _, err := client.Put(ctx, key, &c); err != nil { + return err + } + } + + return tt.retErr[attempts-1] + }, MaxAttempts(i)) + + // Check the error returned by RunInTransaction. + if err != tt.wantErr { + t.Errorf("%s: got err %v, want %v", tt.desc, err, tt.wantErr) + continue + } + if err != nil { + continue + } + + // Check the final value of the counter. + if err := client.Get(ctx, key, c); err != nil { + t.Errorf("%s: client.Get: %v", tt.desc, err) + continue + } + if c.N != tt.want { + t.Errorf("%s: counter N=%d, want N=%d", tt.desc, c.N, tt.want) + } + } +} + +func TestReadOnlyTransaction(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t, nil) + defer client.Close() + + type value struct{ N int } + + // Put a value. + const n = 5 + v := &value{N: n} + key, err := client.Put(ctx, IncompleteKey("roTxn", nil), v) + if err != nil { + t.Fatal(err) + } + defer client.Delete(ctx, key) + + // Read it from a read-only transaction. + _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { + if err := tx.Get(key, v); err != nil { + return err + } + return nil + }, ReadOnly) + if err != nil { + t.Fatal(err) + } + if v.N != n { + t.Fatalf("got %d, want %d", v.N, n) + } + + // Attempting to write from a read-only transaction is an error. + _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { + if _, err := tx.Put(key, v); err != nil { + return err + } + return nil + }, ReadOnly) + if err == nil { + t.Fatal("got nil, want error") + } +} + +func TestNilPointers(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type X struct { + S string + } + + src := []*X{{"zero"}, {"one"}} + keys := []*Key{IncompleteKey("NilX", nil), IncompleteKey("NilX", nil)} + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("PutMulti: %v", err) + } + + // It's okay to store into a slice of nil *X. + xs := make([]*X, 2) + if err := client.GetMulti(ctx, keys, xs); err != nil { + t.Errorf("GetMulti: %v", err) + } else if !testutil.Equal(xs, src) { + t.Errorf("GetMulti fetched %v, want %v", xs, src) + } + + // It isn't okay to store into a single nil *X. + var x0 *X + if err, want := client.Get(ctx, keys[0], x0), ErrInvalidEntityType; err != want { + t.Errorf("Get: err %v; want %v", err, want) + } + + // Test that deleting with duplicate keys work. + keys = append(keys, keys...) + if err := client.DeleteMulti(ctx, keys); err != nil { + t.Errorf("Delete: %v", err) + } +} + +func TestNestedRepeatedElementNoIndex(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type Inner struct { + Name string + Value string `datastore:",noindex"` + } + type Outer struct { + Config []Inner + } + m := &Outer{ + Config: []Inner{ + {Name: "short", Value: "a"}, + {Name: "long", Value: strings.Repeat("a", 2000)}, + }, + } + + key := NameKey("Nested", "Nested"+suffix, nil) + if _, err := client.Put(ctx, key, m); err != nil { + t.Fatalf("client.Put: %v", err) + } + if err := client.Delete(ctx, key); err != nil { + t.Fatalf("client.Delete: %v", err) + } +} + +func TestPointerFields(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + want := populatedPointers() + key, err := client.Put(ctx, IncompleteKey("pointers", nil), want) + if err != nil { + t.Fatal(err) + } + var got Pointers + if err := client.Get(ctx, key, &got); err != nil { + t.Fatal(err) + } + if got.Pi == nil || *got.Pi != *want.Pi { + t.Errorf("Pi: got %v, want %v", got.Pi, *want.Pi) + } + if got.Ps == nil || *got.Ps != *want.Ps { + t.Errorf("Ps: got %v, want %v", got.Ps, *want.Ps) + } + if got.Pb == nil || *got.Pb != *want.Pb { + t.Errorf("Pb: got %v, want %v", got.Pb, *want.Pb) + } + if got.Pf == nil || *got.Pf != *want.Pf { + t.Errorf("Pf: got %v, want %v", got.Pf, *want.Pf) + } + if got.Pg == nil || *got.Pg != *want.Pg { + t.Errorf("Pg: got %v, want %v", got.Pg, *want.Pg) + } + if got.Pt == nil || !got.Pt.Equal(*want.Pt) { + t.Errorf("Pt: got %v, want %v", got.Pt, *want.Pt) + } +} + +func TestMutate(t *testing.T) { + // test Client.Mutate + testMutate(t, func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error) { + return client.Mutate(ctx, muts...) + }) + // test Transaction.Mutate + testMutate(t, func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error) { + var pkeys []*PendingKey + commit, err := client.RunInTransaction(ctx, func(tx *Transaction) error { + var err error + pkeys, err = tx.Mutate(muts...) + return err + }) + if err != nil { + return nil, err + } + var keys []*Key + for _, pk := range pkeys { + keys = append(keys, commit.Key(pk)) + } + return keys, nil + }) +} + +func testMutate(t *testing.T, mutate func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error)) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type T struct{ I int } + + check := func(k *Key, want interface{}) { + var x T + err := client.Get(ctx, k, &x) + switch want := want.(type) { + case error: + if err != want { + t.Errorf("key %s: got error %v, want %v", k, err, want) + } + case int: + if err != nil { + t.Fatalf("key %s: %v", k, err) + } + if x.I != want { + t.Errorf("key %s: got %d, want %d", k, x.I, want) + } + default: + panic("check: bad arg") + } + } + + keys, err := mutate(ctx, client, + NewInsert(IncompleteKey("t", nil), &T{1}), + NewUpsert(IncompleteKey("t", nil), &T{2}), + ) + if err != nil { + t.Fatal(err) + } + check(keys[0], 1) + check(keys[1], 2) + + _, err = mutate(ctx, client, + NewUpdate(keys[0], &T{3}), + NewDelete(keys[1]), + ) + check(keys[0], 3) + check(keys[1], ErrNoSuchEntity) + + _, err = mutate(ctx, client, NewInsert(keys[0], &T{4})) + if got, want := status.Code(err), codes.AlreadyExists; got != want { + t.Errorf("Insert existing key: got %s, want %s", got, want) + } + + _, err = mutate(ctx, client, NewUpdate(keys[1], &T{4})) + if got, want := status.Code(err), codes.NotFound; got != want { + t.Errorf("Update non-existing key: got %s, want %s", got, want) + } +} diff --git a/vendor/cloud.google.com/go/datastore/key.go b/vendor/cloud.google.com/go/datastore/key.go new file mode 100644 index 0000000..b9f2cf5 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/key.go @@ -0,0 +1,280 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/base64" + "encoding/gob" + "errors" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// Key represents the datastore key for a stored entity. +type Key struct { + // Kind cannot be empty. + Kind string + // Either ID or Name must be zero for the Key to be valid. + // If both are zero, the Key is incomplete. + ID int64 + Name string + // Parent must either be a complete Key or nil. + Parent *Key + + // Namespace provides the ability to partition your data for multiple + // tenants. In most cases, it is not necessary to specify a namespace. + // See docs on datastore multitenancy for details: + // https://cloud.google.com/datastore/docs/concepts/multitenancy + Namespace string +} + +// Incomplete reports whether the key does not refer to a stored entity. +func (k *Key) Incomplete() bool { + return k.Name == "" && k.ID == 0 +} + +// valid returns whether the key is valid. +func (k *Key) valid() bool { + if k == nil { + return false + } + for ; k != nil; k = k.Parent { + if k.Kind == "" { + return false + } + if k.Name != "" && k.ID != 0 { + return false + } + if k.Parent != nil { + if k.Parent.Incomplete() { + return false + } + if k.Parent.Namespace != k.Namespace { + return false + } + } + } + return true +} + +// Equal reports whether two keys are equal. Two keys are equal if they are +// both nil, or if their kinds, IDs, names, namespaces and parents are equal. +func (k *Key) Equal(o *Key) bool { + for { + if k == nil || o == nil { + return k == o // if either is nil, both must be nil + } + if k.Namespace != o.Namespace || k.Name != o.Name || k.ID != o.ID || k.Kind != o.Kind { + return false + } + if k.Parent == nil && o.Parent == nil { + return true + } + k = k.Parent + o = o.Parent + } +} + +// marshal marshals the key's string representation to the buffer. +func (k *Key) marshal(b *bytes.Buffer) { + if k.Parent != nil { + k.Parent.marshal(b) + } + b.WriteByte('/') + b.WriteString(k.Kind) + b.WriteByte(',') + if k.Name != "" { + b.WriteString(k.Name) + } else { + b.WriteString(strconv.FormatInt(k.ID, 10)) + } +} + +// String returns a string representation of the key. +func (k *Key) String() string { + if k == nil { + return "" + } + b := bytes.NewBuffer(make([]byte, 0, 512)) + k.marshal(b) + return b.String() +} + +// Note: Fields not renamed compared to appengine gobKey struct +// This ensures gobs created by appengine can be read here, and vice/versa +type gobKey struct { + Kind string + StringID string + IntID int64 + Parent *gobKey + AppID string + Namespace string +} + +func keyToGobKey(k *Key) *gobKey { + if k == nil { + return nil + } + return &gobKey{ + Kind: k.Kind, + StringID: k.Name, + IntID: k.ID, + Parent: keyToGobKey(k.Parent), + Namespace: k.Namespace, + } +} + +func gobKeyToKey(gk *gobKey) *Key { + if gk == nil { + return nil + } + return &Key{ + Kind: gk.Kind, + Name: gk.StringID, + ID: gk.IntID, + Parent: gobKeyToKey(gk.Parent), + Namespace: gk.Namespace, + } +} + +// GobEncode marshals the key into a sequence of bytes +// using an encoding/gob.Encoder. +func (k *Key) GobEncode() ([]byte, error) { + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// GobDecode unmarshals a sequence of bytes using an encoding/gob.Decoder. +func (k *Key) GobDecode(buf []byte) error { + gk := new(gobKey) + if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { + return err + } + *k = *gobKeyToKey(gk) + return nil +} + +// MarshalJSON marshals the key into JSON. +func (k *Key) MarshalJSON() ([]byte, error) { + return []byte(`"` + k.Encode() + `"`), nil +} + +// UnmarshalJSON unmarshals a key JSON object into a Key. +func (k *Key) UnmarshalJSON(buf []byte) error { + if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { + return errors.New("datastore: bad JSON key") + } + k2, err := DecodeKey(string(buf[1 : len(buf)-1])) + if err != nil { + return err + } + *k = *k2 + return nil +} + +// Encode returns an opaque representation of the key +// suitable for use in HTML and URLs. +// This is compatible with the Python and Java runtimes. +func (k *Key) Encode() string { + pKey := keyToProto(k) + + b, err := proto.Marshal(pKey) + if err != nil { + panic(err) + } + + // Trailing padding is stripped. + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// DecodeKey decodes a key from the opaque representation returned by Encode. +func DecodeKey(encoded string) (*Key, error) { + // Re-add padding. + if m := len(encoded) % 4; m != 0 { + encoded += strings.Repeat("=", 4-m) + } + + b, err := base64.URLEncoding.DecodeString(encoded) + if err != nil { + return nil, err + } + + pKey := new(pb.Key) + if err := proto.Unmarshal(b, pKey); err != nil { + return nil, err + } + return protoToKey(pKey) +} + +// AllocateIDs accepts a slice of incomplete keys and returns a +// slice of complete keys that are guaranteed to be valid in the datastore. +func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) { + if keys == nil { + return nil, nil + } + + req := &pb.AllocateIdsRequest{ + ProjectId: c.dataset, + Keys: multiKeyToProto(keys), + } + resp, err := c.client.AllocateIds(ctx, req) + if err != nil { + return nil, err + } + + return multiProtoToKey(resp.Keys) +} + +// IncompleteKey creates a new incomplete key. +// The supplied kind cannot be empty. +// The namespace of the new key is empty. +func IncompleteKey(kind string, parent *Key) *Key { + return &Key{ + Kind: kind, + Parent: parent, + } +} + +// NameKey creates a new key with a name. +// The supplied kind cannot be empty. +// The supplied parent must either be a complete key or nil. +// The namespace of the new key is empty. +func NameKey(kind, name string, parent *Key) *Key { + return &Key{ + Kind: kind, + Name: name, + Parent: parent, + } +} + +// IDKey creates a new key with an ID. +// The supplied kind cannot be empty. +// The supplied parent must either be a complete key or nil. +// The namespace of the new key is empty. +func IDKey(kind string, id int64, parent *Key) *Key { + return &Key{ + Kind: kind, + ID: id, + Parent: parent, + } +} diff --git a/vendor/cloud.google.com/go/datastore/key_test.go b/vendor/cloud.google.com/go/datastore/key_test.go new file mode 100644 index 0000000..5f2ddcb --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/key_test.go @@ -0,0 +1,210 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "testing" +) + +func TestEqual(t *testing.T) { + testCases := []struct { + x, y *Key + equal bool + }{ + { + x: nil, + y: nil, + equal: true, + }, + { + x: &Key{Kind: "kindA"}, + y: &Key{Kind: "kindA"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameA"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindB", Name: "nameA"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameB"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", ID: 1337}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindY", Name: "nameX"}}, + equal: false, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337}, + equal: false, + }, + } + + for _, tt := range testCases { + if got := tt.x.Equal(tt.y); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal) + } + if got := tt.y.Equal(tt.x); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal) + } + } +} + +func TestEncoding(t *testing.T) { + testCases := []struct { + k *Key + valid bool + }{ + { + k: nil, + valid: false, + }, + { + k: &Key{}, + valid: false, + }, + { + k: &Key{Kind: "kindA"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Namespace: "gopherspace"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Name: "nameA"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", ID: 1337}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Name: "nameA", ID: 1337}, + valid: false, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB"}}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB"}}, + valid: false, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB", Namespace: "gopherspace"}}, + valid: false, + }, + } + + for _, tt := range testCases { + if got := tt.k.valid(); got != tt.valid { + t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid) + } + + // Check encoding/decoding for valid keys. + if !tt.valid { + continue + } + enc := tt.k.Encode() + dec, err := DecodeKey(enc) + if err != nil { + t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err) + continue + } + if !tt.k.Equal(dec) { + t.Logf("Proto: %s", keyToProto(tt.k)) + t.Errorf("Decoded key %v not equal to %v", dec, tt.k) + } + + b, err := json.Marshal(tt.k) + if err != nil { + t.Errorf("json.Marshal(%v): %v", tt.k, err) + continue + } + key := &Key{} + if err := json.Unmarshal(b, key); err != nil { + t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err) + continue + } + if !tt.k.Equal(key) { + t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k) + } + + buf := &bytes.Buffer{} + gobEnc := gob.NewEncoder(buf) + if err := gobEnc.Encode(tt.k); err != nil { + t.Errorf("gobEnc.Encode(%v): %v", tt.k, err) + continue + } + gobDec := gob.NewDecoder(buf) + key = &Key{} + if err := gobDec.Decode(key); err != nil { + t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err) + } + if !tt.k.Equal(key) { + t.Errorf("gob decoded key %v not equal to %v", dec, tt.k) + } + } +} + +func TestInvalidKeyDecode(t *testing.T) { + // Check that decoding an invalid key returns an err and doesn't panic. + enc := NameKey("Kind", "Foo", nil).Encode() + + invalid := []string{ + "", + "Laboratorio", + enc + "Junk", + enc[:len(enc)-4], + } + for _, enc := range invalid { + key, err := DecodeKey(enc) + if err == nil || key != nil { + t.Errorf("DecodeKey(%q) = %v, %v; want nil, error", enc, key, err) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/load.go b/vendor/cloud.google.com/go/datastore/load.go new file mode 100644 index 0000000..652b0da --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/load.go @@ -0,0 +1,512 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "time" + + "cloud.google.com/go/internal/fields" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +var ( + typeOfByteSlice = reflect.TypeOf([]byte(nil)) + typeOfTime = reflect.TypeOf(time.Time{}) + typeOfGeoPoint = reflect.TypeOf(GeoPoint{}) + typeOfKeyPtr = reflect.TypeOf(&Key{}) + typeOfEntityPtr = reflect.TypeOf(&Entity{}) +) + +// typeMismatchReason returns a string explaining why the property p could not +// be stored in an entity field of type v.Type(). +func typeMismatchReason(p Property, v reflect.Value) string { + entityType := "empty" + switch p.Value.(type) { + case int64: + entityType = "int" + case bool: + entityType = "bool" + case string: + entityType = "string" + case float64: + entityType = "float" + case *Key: + entityType = "*datastore.Key" + case *Entity: + entityType = "*datastore.Entity" + case GeoPoint: + entityType = "GeoPoint" + case time.Time: + entityType = "time.Time" + case []byte: + entityType = "[]byte" + } + + return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) +} + +func overflowReason(x interface{}, v reflect.Value) string { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) +} + +type propertyLoader struct { + // m holds the number of times a substruct field like "Foo.Bar.Baz" has + // been seen so far. The map is constructed lazily. + m map[string]int +} + +func (l *propertyLoader) load(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string { + sl, ok := p.Value.([]interface{}) + if !ok { + return l.loadOneElement(codec, structValue, p, prev) + } + for _, val := range sl { + p.Value = val + if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" { + return errStr + } + } + return "" +} + +// loadOneElement loads the value of Property p into structValue based on the provided +// codec. codec is used to find the field in structValue into which p should be loaded. +// prev is the set of property names already seen for structValue. +func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string { + var sliceOk bool + var sliceIndex int + var v reflect.Value + + name := p.Name + fieldNames := strings.Split(name, ".") + + for len(fieldNames) > 0 { + var field *fields.Field + + // Start by trying to find a field with name. If none found, + // cut off the last field (delimited by ".") and find its parent + // in the codec. + // eg. for name "A.B.C.D", split off "A.B.C" and try to + // find a field in the codec with this name. + // Loop again with "A.B", etc. + for i := len(fieldNames); i > 0; i-- { + parent := strings.Join(fieldNames[:i], ".") + field = codec.Match(parent) + if field != nil { + fieldNames = fieldNames[i:] + break + } + } + + // If we never found a matching field in the codec, return + // error message. + if field == nil { + return "no such struct field" + } + + v = initField(structValue, field.Index) + if !v.IsValid() { + return "no such struct field" + } + if !v.CanSet() { + return "cannot set struct field" + } + + // If field implements PLS, we delegate loading to the PLS's Load early, + // and stop iterating through fields. + ok, err := plsFieldLoad(v, p, fieldNames) + if err != nil { + return err.Error() + } + if ok { + return "" + } + + if field.Type.Kind() == reflect.Struct { + codec, err = structCache.Fields(field.Type) + if err != nil { + return err.Error() + } + structValue = v + } + + // If the element is a slice, we need to accommodate it. + if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice { + if l.m == nil { + l.m = make(map[string]int) + } + sliceIndex = l.m[p.Name] + l.m[p.Name] = sliceIndex + 1 + for v.Len() <= sliceIndex { + v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) + } + structValue = v.Index(sliceIndex) + + // If structValue implements PLS, we delegate loading to the PLS's + // Load early, and stop iterating through fields. + ok, err := plsFieldLoad(structValue, p, fieldNames) + if err != nil { + return err.Error() + } + if ok { + return "" + } + + if structValue.Type().Kind() == reflect.Struct { + codec, err = structCache.Fields(structValue.Type()) + if err != nil { + return err.Error() + } + } + sliceOk = true + } + } + + var slice reflect.Value + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + slice = v + v = reflect.New(v.Type().Elem()).Elem() + } else if _, ok := prev[p.Name]; ok && !sliceOk { + // Zero the field back out that was set previously, turns out + // it's a slice and we don't know what to do with it + v.Set(reflect.Zero(v.Type())) + return "multiple-valued property requires a slice field type" + } + + prev[p.Name] = struct{}{} + + if errReason := setVal(v, p); errReason != "" { + // Set the slice back to its zero value. + if slice.IsValid() { + slice.Set(reflect.Zero(slice.Type())) + } + return errReason + } + + if slice.IsValid() { + slice.Index(sliceIndex).Set(v) + } + + return "" +} + +// plsFieldLoad first tries to converts v's value to a PLS, then v's addressed +// value to a PLS. If neither succeeds, plsFieldLoad returns false for first return +// value. Otherwise, the first return value will be true. +// If v is successfully converted to a PLS, plsFieldLoad will then try to Load +// the property p into v (by way of the PLS's Load method). +// +// If the field v has been flattened, the Property's name must be altered +// before calling Load to reflect the field v. +// For example, if our original field name was "A.B.C.D", +// and at this point in iteration we had initialized the field +// corresponding to "A" and have moved into the struct, so that now +// v corresponds to the field named "B", then we want to let the +// PLS handle this field (B)'s subfields ("C", "D"), +// so we send the property to the PLS's Load, renamed to "C.D". +// +// If subfields are present, the field v has been flattened. +func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err error) { + vpls, err := plsForLoad(v) + if err != nil { + return false, err + } + + if vpls == nil { + return false, nil + } + + // If Entity, load properties as well as key. + if e, ok := p.Value.(*Entity); ok { + err = loadEntity(vpls, e) + return true, err + } + + // If flattened, we must alter the property's name to reflect + // the field v. + if len(subfields) > 0 { + p.Name = strings.Join(subfields, ".") + } + + return true, vpls.Load([]Property{p}) +} + +// setVal sets 'v' to the value of the Property 'p'. +func setVal(v reflect.Value, p Property) (s string) { + pValue := p.Value + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x, ok := pValue.(int64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowInt(x) { + return overflowReason(x, v) + } + v.SetInt(x) + case reflect.Bool: + x, ok := pValue.(bool) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetBool(x) + case reflect.String: + x, ok := pValue.(string) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetString(x) + case reflect.Float32, reflect.Float64: + x, ok := pValue.(float64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowFloat(x) { + return overflowReason(x, v) + } + v.SetFloat(x) + case reflect.Ptr: + // v must be a pointer to either a Key, an Entity, or one of the supported basic types. + if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct && !isValidPointerType(v.Type().Elem()) { + return typeMismatchReason(p, v) + } + + if pValue == nil { + // If v is populated already, set it to nil. + if !v.IsNil() { + v.Set(reflect.New(v.Type()).Elem()) + } + return "" + } + + if x, ok := p.Value.(*Key); ok { + if _, ok := v.Interface().(*Key); !ok { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + return "" + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + switch x := pValue.(type) { + case *Entity: + err := loadEntity(v.Interface(), x) + if err != nil { + return err.Error() + } + case int64: + if v.Elem().OverflowInt(x) { + return overflowReason(x, v.Elem()) + } + v.Elem().SetInt(x) + case float64: + if v.Elem().OverflowFloat(x) { + return overflowReason(x, v.Elem()) + } + v.Elem().SetFloat(x) + case bool: + v.Elem().SetBool(x) + case string: + v.Elem().SetString(x) + case GeoPoint, time.Time: + v.Elem().Set(reflect.ValueOf(x)) + default: + return typeMismatchReason(p, v) + } + case reflect.Struct: + switch v.Type() { + case typeOfTime: + x, ok := pValue.(time.Time) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case typeOfGeoPoint: + x, ok := pValue.(GeoPoint) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + default: + ent, ok := pValue.(*Entity) + if !ok { + return typeMismatchReason(p, v) + } + err := loadEntity(v.Addr().Interface(), ent) + if err != nil { + return err.Error() + } + } + case reflect.Slice: + x, ok := pValue.([]byte) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.Type().Elem().Kind() != reflect.Uint8 { + return typeMismatchReason(p, v) + } + v.SetBytes(x) + default: + return typeMismatchReason(p, v) + } + return "" +} + +// initField is similar to reflect's Value.FieldByIndex, in that it +// returns the nested struct field corresponding to index, but it +// initialises any nil pointers encountered when traversing the structure. +func initField(val reflect.Value, index []int) reflect.Value { + for _, i := range index[:len(index)-1] { + val = val.Field(i) + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + } + return val.Field(index[len(index)-1]) +} + +// loadEntityProto loads an EntityProto into PropertyLoadSaver or struct pointer. +func loadEntityProto(dst interface{}, src *pb.Entity) error { + ent, err := protoToEntity(src) + if err != nil { + return err + } + return loadEntity(dst, ent) +} + +func loadEntity(dst interface{}, ent *Entity) error { + if pls, ok := dst.(PropertyLoadSaver); ok { + err := pls.Load(ent.Properties) + if err != nil { + return err + } + if e, ok := dst.(KeyLoader); ok { + err = e.LoadKey(ent.Key) + } + return err + } + return loadEntityToStruct(dst, ent) +} + +func loadEntityToStruct(dst interface{}, ent *Entity) error { + pls, err := newStructPLS(dst) + if err != nil { + return err + } + // Load properties. + err = pls.Load(ent.Properties) + if err != nil { + return err + } + // Load key. + keyField := pls.codec.Match(keyFieldName) + if keyField != nil && ent.Key != nil { + pls.v.FieldByIndex(keyField.Index).Set(reflect.ValueOf(ent.Key)) + } + + return nil +} + +func (s structPLS) Load(props []Property) error { + var fieldName, errReason string + var l propertyLoader + + prev := make(map[string]struct{}) + for _, p := range props { + if errStr := l.load(s.codec, s.v, p, prev); errStr != "" { + // We don't return early, as we try to load as many properties as possible. + // It is valid to load an entity into a struct that cannot fully represent it. + // That case returns an error, but the caller is free to ignore it. + fieldName, errReason = p.Name, errStr + } + } + if errReason != "" { + return &ErrFieldMismatch{ + StructType: s.v.Type(), + FieldName: fieldName, + Reason: errReason, + } + } + return nil +} + +func protoToEntity(src *pb.Entity) (*Entity, error) { + props := make([]Property, 0, len(src.Properties)) + for name, val := range src.Properties { + v, err := propToValue(val) + if err != nil { + return nil, err + } + props = append(props, Property{ + Name: name, + Value: v, + NoIndex: val.ExcludeFromIndexes, + }) + } + var key *Key + if src.Key != nil { + // Ignore any error, since nested entity values + // are allowed to have an invalid key. + key, _ = protoToKey(src.Key) + } + + return &Entity{key, props}, nil +} + +// propToValue returns a Go value that represents the PropertyValue. For +// example, a TimestampValue becomes a time.Time. +func propToValue(v *pb.Value) (interface{}, error) { + switch v := v.ValueType.(type) { + case *pb.Value_NullValue: + return nil, nil + case *pb.Value_BooleanValue: + return v.BooleanValue, nil + case *pb.Value_IntegerValue: + return v.IntegerValue, nil + case *pb.Value_DoubleValue: + return v.DoubleValue, nil + case *pb.Value_TimestampValue: + return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil + case *pb.Value_KeyValue: + return protoToKey(v.KeyValue) + case *pb.Value_StringValue: + return v.StringValue, nil + case *pb.Value_BlobValue: + return []byte(v.BlobValue), nil + case *pb.Value_GeoPointValue: + return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil + case *pb.Value_EntityValue: + return protoToEntity(v.EntityValue) + case *pb.Value_ArrayValue: + arr := make([]interface{}, 0, len(v.ArrayValue.Values)) + for _, v := range v.ArrayValue.Values { + vv, err := propToValue(v) + if err != nil { + return nil, err + } + arr = append(arr, vv) + } + return arr, nil + default: + return nil, nil + } +} diff --git a/vendor/cloud.google.com/go/datastore/load_test.go b/vendor/cloud.google.com/go/datastore/load_test.go new file mode 100644 index 0000000..1755c1c --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/load_test.go @@ -0,0 +1,886 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "reflect" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type Simple struct { + I int64 +} + +type SimpleWithTag struct { + I int64 `datastore:"II"` +} + +type NestedSimpleWithTag struct { + A SimpleWithTag `datastore:"AA"` +} + +type NestedSliceOfSimple struct { + A []Simple +} + +type SimpleTwoFields struct { + S string + SS string +} + +type NestedSimpleAnonymous struct { + Simple + X string +} + +type NestedSimple struct { + A Simple + I int +} + +type NestedSimple1 struct { + A Simple + X string +} + +type NestedSimple2X struct { + AA NestedSimple + A SimpleTwoFields + S string +} + +type BDotB struct { + B string `datastore:"B.B"` +} + +type ABDotB struct { + A BDotB +} + +type MultiAnonymous struct { + Simple + SimpleTwoFields + X string +} + +func TestLoadEntityNestedLegacy(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + want interface{} + }{ + { + desc: "nested", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "A.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + want: &NestedSimple1{ + A: Simple{I: 2}, + X: "two", + }, + }, + { + desc: "nested with tag", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "AA.II": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + want: &NestedSimpleWithTag{ + A: SimpleWithTag{I: 2}, + }, + }, + { + desc: "nested with anonymous struct field", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + want: &NestedSimpleAnonymous{ + Simple: Simple{I: 2}, + X: "two", + }, + }, + { + desc: "nested with dotted field tag", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "A.B.B": {ValueType: &pb.Value_StringValue{StringValue: "bb"}}, + }, + }, + want: &ABDotB{ + A: BDotB{ + B: "bb", + }, + }, + }, + { + desc: "nested with multiple anonymous fields", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, + "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, + "X": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, + }, + }, + want: &MultiAnonymous{ + Simple: Simple{I: 3}, + SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, + X: "s", + }, + }, + } + + for _, tc := range testCases { + dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + err := loadEntityProto(dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) + } + } +} + +type WithKey struct { + X string + I int + K *Key `datastore:"__key__"` +} + +type NestedWithKey struct { + Y string + N WithKey +} + +var ( + incompleteKey = newKey("", nil) + invalidKey = newKey("s", incompleteKey) +) + +func TestLoadEntityNested(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + want interface{} + }{ + { + desc: "nested basic", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + }, + }, + want: &NestedSimple{ + A: Simple{I: 3}, + I: 10, + }, + }, + { + desc: "nested with struct tags", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "AA": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "II": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + }, + }, + }}, + }, + }, + want: &NestedSimpleWithTag{ + A: SimpleWithTag{I: 1}, + }, + }, + { + desc: "nested 2x", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "AA": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + }, + }, + }}, + "A": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, + "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, + }, + }, + }}, + "S": {ValueType: &pb.Value_StringValue{StringValue: "SS"}}, + }, + }, + want: &NestedSimple2X{ + AA: NestedSimple{ + A: Simple{I: 3}, + I: 1, + }, + A: SimpleTwoFields{S: "S", SS: "s"}, + S: "SS", + }, + }, + { + desc: "nested anonymous", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + "X": {ValueType: &pb.Value_StringValue{StringValue: "SomeX"}}, + }, + }, + want: &NestedSimpleAnonymous{ + Simple: Simple{I: 3}, + X: "SomeX", + }, + }, + { + desc: "nested simple with slice", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, + }, + }, + }}, + }, + }, + }}, + }, + }, + + want: &NestedSliceOfSimple{ + A: []Simple{Simple{I: 3}, Simple{I: 4}}, + }, + }, + { + desc: "nested with multiple anonymous fields", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, + "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, + "X": {ValueType: &pb.Value_StringValue{StringValue: "ss"}}, + }, + }, + want: &MultiAnonymous{ + Simple: Simple{I: 3}, + SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, + X: "ss", + }, + }, + { + desc: "nested with dotted field tag", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "B.B": {ValueType: &pb.Value_StringValue{StringValue: "bb"}}, + }, + }, + }}, + }, + }, + want: &ABDotB{ + A: BDotB{ + B: "bb", + }, + }, + }, + { + desc: "nested entity with key", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Key: keyToProto(testKey1a), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + want: &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: testKey1a, + }, + }, + }, + { + desc: "nested entity with invalid key", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Key: keyToProto(invalidKey), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + want: &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: invalidKey, + }, + }, + }, + } + + for _, tc := range testCases { + dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + err := loadEntityProto(dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) + } + } +} + +type NestedStructPtrs struct { + *SimpleTwoFields + Nest *SimpleTwoFields + TwiceNest *NestedSimple2 + I int +} + +type NestedSimple2 struct { + A *Simple + I int +} + +func TestAlreadyPopulatedDst(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + dst interface{} + want interface{} + }{ + { + desc: "simple already populated, nil properties", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_NullValue{}}, + }, + }, + dst: &Simple{ + I: 12, + }, + want: &Simple{}, + }, + { + desc: "nested structs already populated", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "world"}}, + }, + }, + dst: &SimpleTwoFields{S: "hello" /* SS: "" */}, + want: &SimpleTwoFields{S: "hello", SS: "world"}, + }, + { + desc: "nested structs already populated, pValues nil", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_NullValue{}}, + "SS": {ValueType: &pb.Value_StringValue{StringValue: "ss hello"}}, + "Nest": {ValueType: &pb.Value_NullValue{}}, + "TwiceNest": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_NullValue{}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 5}}, + }, + }, + dst: &NestedStructPtrs{ + &SimpleTwoFields{S: "hello" /* SS: "" */}, + &SimpleTwoFields{ /* S: "" */ SS: "twice hello"}, + &NestedSimple2{ + A: &Simple{I: 2}, + /* I: 0 */ + }, + 0, + }, + want: &NestedStructPtrs{ + &SimpleTwoFields{ /* S: "" */ SS: "ss hello"}, + nil, + &NestedSimple2{ + /* A: nil, */ + I: 2, + }, + 5, + }, + }, + } + + for _, tc := range testCases { + err := loadEntityProto(tc.dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, tc.dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want) + } + } +} + +type PLS0 struct { + A string +} + +func (p *PLS0) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "A" { + p.A = pp.Value.(string) + } + } + return nil +} + +func (p *PLS0) Save() (props []Property, err error) { + return []Property{{Name: "A", Value: p.A}}, nil +} + +type KeyLoader1 struct { + A string + K *Key +} + +func (kl *KeyLoader1) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "A" { + kl.A = pp.Value.(string) + } + } + return nil +} + +func (kl *KeyLoader1) Save() (props []Property, err error) { + return []Property{{Name: "A", Value: kl.A}}, nil +} + +func (kl *KeyLoader1) LoadKey(k *Key) error { + kl.K = k + return nil +} + +type KeyLoader2 struct { + B int + Key *Key +} + +func (kl *KeyLoader2) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "B" { + kl.B = int(pp.Value.(int64)) + } + } + return nil +} + +func (kl *KeyLoader2) Save() (props []Property, err error) { + return []Property{{Name: "B", Value: int64(kl.B)}}, nil +} + +func (kl *KeyLoader2) LoadKey(k *Key) error { + kl.Key = k + return nil +} + +type KeyLoader3 struct { + C bool + K *Key +} + +func (kl *KeyLoader3) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "C" { + kl.C = pp.Value.(bool) + } + } + return nil +} + +func (kl *KeyLoader3) Save() (props []Property, err error) { + return []Property{{Name: "C", Value: kl.C}}, nil +} + +func (kl *KeyLoader3) LoadKey(k *Key) error { + kl.K = k + return nil +} + +type KeyLoader4 struct { + PLS0 + K *Key +} + +func (kl *KeyLoader4) LoadKey(k *Key) error { + kl.K = k + return nil +} + +type NotKeyLoader struct { + A string + K *Key +} + +func (p *NotKeyLoader) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "A" { + p.A = pp.Value.(string) + } + } + return nil +} + +func (p *NotKeyLoader) Save() (props []Property, err error) { + return []Property{{Name: "A", Value: p.A}}, nil +} + +type NestedKeyLoaders struct { + Two *KeyLoader2 + Three []*KeyLoader3 + Four *KeyLoader4 + PLS *NotKeyLoader +} + +func TestKeyLoader(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + dst interface{} + want interface{} + }{ + { + desc: "simple key loader", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, + }, + }, + dst: &KeyLoader1{}, + want: &KeyLoader1{ + A: "hello", + K: testKey0, + }, + }, + { + desc: "embedded PLS key loader", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, + }, + }, + dst: &KeyLoader4{}, + want: &KeyLoader4{ + PLS0: PLS0{A: "hello"}, + K: testKey0, + }, + }, + { + desc: "nested key loaders", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Two": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "B": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, + }, + Key: keyToProto(testKey1a), + }, + }}, + "Three": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "C": {ValueType: &pb.Value_BooleanValue{BooleanValue: true}}, + }, + Key: keyToProto(testKey1b), + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "C": {ValueType: &pb.Value_BooleanValue{BooleanValue: false}}, + }, + Key: keyToProto(testKey0), + }, + }}, + }, + }, + }}, + "Four": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_StringValue{StringValue: "testing"}}, + }, + Key: keyToProto(testKey2a), + }, + }}, + "PLS": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_StringValue{StringValue: "something"}}, + }, + + Key: keyToProto(testKey1a), + }, + }}, + }, + }, + dst: &NestedKeyLoaders{}, + want: &NestedKeyLoaders{ + Two: &KeyLoader2{B: 12, Key: testKey1a}, + Three: []*KeyLoader3{ + { + C: true, + K: testKey1b, + }, + { + C: false, + K: testKey0, + }, + }, + Four: &KeyLoader4{ + PLS0: PLS0{A: "testing"}, + K: testKey2a, + }, + PLS: &NotKeyLoader{A: "something"}, + }, + }, + } + + for _, tc := range testCases { + err := loadEntityProto(tc.dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, tc.dst) { + t.Errorf("%s: compare:\ngot: %+v\nwant: %+v", tc.desc, tc.dst, tc.want) + } + } +} + +func TestLoadPointers(t *testing.T) { + for _, test := range []struct { + desc string + in []Property + want Pointers + }{ + { + desc: "nil properties load as nil pointers", + in: []Property{ + Property{Name: "Pi", Value: nil}, + Property{Name: "Ps", Value: nil}, + Property{Name: "Pb", Value: nil}, + Property{Name: "Pf", Value: nil}, + Property{Name: "Pg", Value: nil}, + Property{Name: "Pt", Value: nil}, + }, + want: Pointers{}, + }, + { + desc: "missing properties load as nil pointers", + in: []Property(nil), + want: Pointers{}, + }, + { + desc: "non-nil properties load as the appropriate values", + in: []Property{ + Property{Name: "Pi", Value: int64(1)}, + Property{Name: "Ps", Value: "x"}, + Property{Name: "Pb", Value: true}, + Property{Name: "Pf", Value: 3.14}, + Property{Name: "Pg", Value: GeoPoint{Lat: 1, Lng: 2}}, + Property{Name: "Pt", Value: time.Unix(100, 0)}, + }, + want: func() Pointers { + p := populatedPointers() + *p.Pi = 1 + *p.Ps = "x" + *p.Pb = true + *p.Pf = 3.14 + *p.Pg = GeoPoint{Lat: 1, Lng: 2} + *p.Pt = time.Unix(100, 0) + return *p + }(), + }, + } { + var got Pointers + if err := LoadStruct(&got, test.in); err != nil { + t.Fatalf("%s: %v", test.desc, err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%s:\ngot %+v\nwant %+v", test.desc, got, test.want) + } + } +} + +func TestLoadNonArrayIntoSlice(t *testing.T) { + // Loading a non-array value into a slice field results in a slice of size 1. + var got struct{ S []string } + if err := LoadStruct(&got, []Property{{Name: "S", Value: "x"}}); err != nil { + t.Fatal(err) + } + if want := []string{"x"}; !testutil.Equal(got.S, want) { + t.Errorf("got %#v, want %#v", got.S, want) + } +} + +func TestLoadEmptyArrayIntoSlice(t *testing.T) { + // Loading an empty array into a slice field is a no-op. + var got = struct{ S []string }{[]string{"x"}} + if err := LoadStruct(&got, []Property{{Name: "S", Value: []interface{}{}}}); err != nil { + t.Fatal(err) + } + if want := []string{"x"}; !testutil.Equal(got.S, want) { + t.Errorf("got %#v, want %#v", got.S, want) + } +} + +func TestLoadNull(t *testing.T) { + // Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value. + // Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value. + // (As expected from the behavior of slices and nulls with basic types.) + type S struct { + I int64 + F float64 + S string + B bool + A []string + } + got := S{ + I: 1, + F: 1.0, + S: "1", + B: true, + A: []string{"X"}, + } + want := S{A: []string{""}} + props := []Property{{Name: "I"}, {Name: "F"}, {Name: "S"}, {Name: "B"}, {Name: "A"}} + if err := LoadStruct(&got, props); err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Loading a Null into a pointer to struct field results in a nil field. + got2 := struct{ X *S }{X: &S{}} + if err := LoadStruct(&got2, []Property{{Name: "X"}}); err != nil { + t.Fatal(err) + } + if got2.X != nil { + t.Errorf("got %v, want nil", got2.X) + } + + // Loading a Null into a struct field is an error. + got3 := struct{ X S }{} + err := LoadStruct(&got3, []Property{{Name: "X"}}) + if err == nil { + t.Error("got nil, want error") + } +} + +// var got2 struct{ S []Pet } +// if err := LoadStruct(&got2, []Property{{Name: "S", Value: nil}}); err != nil { +// t.Fatal(err) +// } + +// } diff --git a/vendor/cloud.google.com/go/datastore/mutation.go b/vendor/cloud.google.com/go/datastore/mutation.go new file mode 100644 index 0000000..894c80d --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/mutation.go @@ -0,0 +1,129 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// A Mutation represents a change to a Datastore entity. +type Mutation struct { + key *Key // needed for transaction PendingKeys and to dedup deletions + mut *pb.Mutation + err error +} + +func (m *Mutation) isDelete() bool { + _, ok := m.mut.Operation.(*pb.Mutation_Delete) + return ok +} + +// NewInsert creates a mutation that will save the entity src into the datastore with +// key k, returning an error if k already exists. +// See Client.Put for valid values of src. +func NewInsert(k *Key, src interface{}) *Mutation { + if !k.valid() { + return &Mutation{err: ErrInvalidKey} + } + p, err := saveEntity(k, src) + if err != nil { + return &Mutation{err: err} + } + return &Mutation{ + key: k, + mut: &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}}, + } +} + +// NewUpsert creates a mutation that saves the entity src into the datastore with key +// k, whether or not k exists. See Client.Put for valid values of src. +func NewUpsert(k *Key, src interface{}) *Mutation { + if !k.valid() { + return &Mutation{err: ErrInvalidKey} + } + p, err := saveEntity(k, src) + if err != nil { + return &Mutation{err: err} + } + return &Mutation{ + key: k, + mut: &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}}, + } +} + +// NewUpdate creates a mutation that replaces the entity in the datastore with key k, +// returning an error if k does not exist. See Client.Put for valid values of src. +func NewUpdate(k *Key, src interface{}) *Mutation { + if !k.valid() { + return &Mutation{err: ErrInvalidKey} + } + if k.Incomplete() { + return &Mutation{err: fmt.Errorf("datastore: can't update the incomplete key: %v", k)} + } + p, err := saveEntity(k, src) + if err != nil { + return &Mutation{err: err} + } + return &Mutation{ + key: k, + mut: &pb.Mutation{Operation: &pb.Mutation_Update{Update: p}}, + } +} + +// NewDelete creates a mutation that deletes the entity with key k. +func NewDelete(k *Key) *Mutation { + if !k.valid() { + return &Mutation{err: ErrInvalidKey} + } + if k.Incomplete() { + return &Mutation{err: fmt.Errorf("datastore: can't delete the incomplete key: %v", k)} + } + return &Mutation{ + key: k, + mut: &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}}, + } +} + +func mutationProtos(muts []*Mutation) ([]*pb.Mutation, error) { + // If any of the mutations have errors, collect and return them. + var merr MultiError + for i, m := range muts { + if m.err != nil { + if merr == nil { + merr = make(MultiError, len(muts)) + } + merr[i] = m.err + } + } + if merr != nil { + return nil, merr + } + var protos []*pb.Mutation + // Collect protos. Remove duplicate deletions (see deleteMutations). + seen := map[string]bool{} + for _, m := range muts { + if m.isDelete() { + ks := m.key.String() + if seen[ks] { + continue + } + seen[ks] = true + } + protos = append(protos, m.mut) + } + return protos, nil +} diff --git a/vendor/cloud.google.com/go/datastore/mutation_test.go b/vendor/cloud.google.com/go/datastore/mutation_test.go new file mode 100644 index 0000000..a434bb1 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/mutation_test.go @@ -0,0 +1,150 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +func TestMutationProtos(t *testing.T) { + var keys []*Key + for i := 1; i <= 4; i++ { + k := IDKey("kind", int64(i), nil) + keys = append(keys, k) + } + entity := &PropertyList{{Name: "n", Value: "v"}} + entityForKey := func(k *Key) *pb.Entity { + return &pb.Entity{ + Key: keyToProto(k), + Properties: map[string]*pb.Value{ + "n": &pb.Value{ValueType: &pb.Value_StringValue{StringValue: "v"}}, + }, + } + } + for _, test := range []struct { + desc string + in []*Mutation + want []*pb.Mutation + }{ + { + desc: "nil", + in: nil, + want: nil, + }, + { + desc: "empty", + in: []*Mutation{}, + want: nil, + }, + { + desc: "various", + in: []*Mutation{ + NewInsert(keys[0], entity), + NewUpsert(keys[1], entity), + NewUpdate(keys[2], entity), + NewDelete(keys[3]), + }, + want: []*pb.Mutation{ + &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: entityForKey(keys[0])}}, + &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: entityForKey(keys[1])}}, + &pb.Mutation{Operation: &pb.Mutation_Update{Update: entityForKey(keys[2])}}, + &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[3])}}, + }, + }, + { + desc: "duplicate deletes", + in: []*Mutation{ + NewDelete(keys[0]), + NewInsert(keys[1], entity), + NewDelete(keys[0]), + NewDelete(keys[2]), + NewDelete(keys[0]), + }, + want: []*pb.Mutation{ + &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[0])}}, + &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: entityForKey(keys[1])}}, + &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[2])}}, + }, + }, + } { + got, err := mutationProtos(test.in) + if err != nil { + t.Errorf("%s: %v", test.desc, err) + continue + } + if diff := testutil.Diff(got, test.want); diff != "" { + t.Errorf("%s: %s", test.desc, diff) + } + } +} + +func TestMutationProtosErrors(t *testing.T) { + entity := &PropertyList{{Name: "n", Value: "v"}} + k := IDKey("kind", 1, nil) + ik := IncompleteKey("kind", nil) + for _, test := range []struct { + desc string + in []*Mutation + want []int // non-nil indexes of MultiError + }{ + { + desc: "invalid key", + in: []*Mutation{ + NewInsert(nil, entity), + NewUpdate(nil, entity), + NewUpsert(nil, entity), + NewDelete(nil), + }, + want: []int{0, 1, 2, 3}, + }, + { + desc: "incomplete key", + in: []*Mutation{ + NewInsert(ik, entity), + NewUpdate(ik, entity), + NewUpsert(ik, entity), + NewDelete(ik), + }, + want: []int{1, 3}, + }, + { + desc: "bad entity", + in: []*Mutation{ + NewInsert(k, 1), + NewUpdate(k, 2), + NewUpsert(k, 3), + }, + want: []int{0, 1, 2}, + }, + } { + _, err := mutationProtos(test.in) + if err == nil { + t.Errorf("%s: got nil, want error", test.desc) + continue + } + var got []int + for i, err := range err.(MultiError) { + if err != nil { + got = append(got, i) + } + } + if !testutil.Equal(got, test.want) { + t.Errorf("%s: got errors at %v, want at %v", test.desc, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/oc_test.go b/vendor/cloud.google.com/go/datastore/oc_test.go new file mode 100644 index 0000000..49a4bf1 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/oc_test.go @@ -0,0 +1,45 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package datastore + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" +) + +func TestOCTracing(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + te := testutil.NewTestExporter() + defer te.Unregister() + + type SomeValue struct { + S string + } + _, err := client.Put(ctx, IncompleteKey("SomeKey", nil), &SomeValue{"foo"}) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + + if len(te.Spans) == 0 { + t.Fatalf("Expected some span to be created, but got %d", 0) + } +} diff --git a/vendor/cloud.google.com/go/datastore/prop.go b/vendor/cloud.google.com/go/datastore/prop.go new file mode 100644 index 0000000..628ccc9 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/prop.go @@ -0,0 +1,342 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "unicode" + + "cloud.google.com/go/internal/fields" +) + +// Entities with more than this many indexed properties will not be saved. +const maxIndexedProperties = 20000 + +// []byte fields more than 1 megabyte long will not be loaded or saved. +const maxBlobLen = 1 << 20 + +// Property is a name/value pair plus some metadata. A datastore entity's +// contents are loaded and saved as a sequence of Properties. Each property +// name must be unique within an entity. +type Property struct { + // Name is the property name. + Name string + // Value is the property value. The valid types are: + // - int64 + // - bool + // - string + // - float64 + // - *Key + // - time.Time + // - GeoPoint + // - []byte (up to 1 megabyte in length) + // - *Entity (representing a nested struct) + // Value can also be: + // - []interface{} where each element is one of the above types + // This set is smaller than the set of valid struct field types that the + // datastore can load and save. A Value's type must be explicitly on + // the list above; it is not sufficient for the underlying type to be + // on that list. For example, a Value of "type myInt64 int64" is + // invalid. Smaller-width integers and floats are also invalid. Again, + // this is more restrictive than the set of valid struct field types. + // + // A Value will have an opaque type when loading entities from an index, + // such as via a projection query. Load entities into a struct instead + // of a PropertyLoadSaver when using a projection query. + // + // A Value may also be the nil interface value; this is equivalent to + // Python's None but not directly representable by a Go struct. Loading + // a nil-valued property into a struct will set that field to the zero + // value. + Value interface{} + // NoIndex is whether the datastore cannot index this property. + // If NoIndex is set to false, []byte and string values are limited to + // 1500 bytes. + NoIndex bool +} + +// An Entity is the value type for a nested struct. +// This type is only used for a Property's Value. +type Entity struct { + Key *Key + Properties []Property +} + +// PropertyLoadSaver can be converted from and to a slice of Properties. +type PropertyLoadSaver interface { + Load([]Property) error + Save() ([]Property, error) +} + +// KeyLoader can store a Key. +type KeyLoader interface { + // PropertyLoadSaver is embedded because a KeyLoader + // must also always implement PropertyLoadSaver. + PropertyLoadSaver + LoadKey(k *Key) error +} + +// PropertyList converts a []Property to implement PropertyLoadSaver. +type PropertyList []Property + +var ( + typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() + typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) +) + +// Load loads all of the provided properties into l. +// It does not first reset *l to an empty slice. +func (l *PropertyList) Load(p []Property) error { + *l = append(*l, p...) + return nil +} + +// Save saves all of l's properties as a slice of Properties. +func (l *PropertyList) Save() ([]Property, error) { + return *l, nil +} + +// validPropertyName returns whether name consists of one or more valid Go +// identifiers joined by ".". +func validPropertyName(name string) bool { + if name == "" { + return false + } + for _, s := range strings.Split(name, ".") { + if s == "" { + return false + } + first := true + for _, c := range s { + if first { + first = false + if c != '_' && !unicode.IsLetter(c) { + return false + } + } else { + if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + } + return true +} + +// parseTag interprets datastore struct field tags +func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + s := t.Get("datastore") + parts := strings.Split(s, ",") + if parts[0] == "-" && len(parts) == 1 { + return "", false, nil, nil + } + if parts[0] != "" && !validPropertyName(parts[0]) { + err = fmt.Errorf("datastore: struct tag has invalid property name: %q", parts[0]) + return "", false, nil, err + } + + var opts saveOpts + if len(parts) > 1 { + for _, p := range parts[1:] { + switch p { + case "flatten": + opts.flatten = true + case "omitempty": + opts.omitEmpty = true + case "noindex": + opts.noIndex = true + default: + err = fmt.Errorf("datastore: struct tag has invalid option: %q", p) + return "", false, nil, err + } + } + other = opts + } + return parts[0], true, other, nil +} + +func validateType(t reflect.Type) error { + if t.Kind() != reflect.Struct { + return fmt.Errorf("datastore: validate called with non-struct type %s", t) + } + + return validateChildType(t, "", false, false, map[reflect.Type]bool{}) +} + +// validateChildType is a recursion helper func for validateType +func validateChildType(t reflect.Type, fieldName string, flatten, prevSlice bool, prevTypes map[reflect.Type]bool) error { + if prevTypes[t] { + return nil + } + prevTypes[t] = true + + switch t.Kind() { + case reflect.Slice: + if flatten && prevSlice { + return fmt.Errorf("datastore: flattening nested structs leads to a slice of slices: field %q", fieldName) + } + return validateChildType(t.Elem(), fieldName, flatten, true, prevTypes) + case reflect.Struct: + if t == typeOfTime || t == typeOfGeoPoint { + return nil + } + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + // If a named field is unexported, ignore it. An anonymous + // unexported field is processed, because it may contain + // exported fields, which are visible. + exported := (f.PkgPath == "") + if !exported && !f.Anonymous { + continue + } + + _, keep, other, err := parseTag(f.Tag) + // Handle error from parseTag now instead of later (in cache.Fields call). + if err != nil { + return err + } + if !keep { + continue + } + if other != nil { + opts := other.(saveOpts) + flatten = flatten || opts.flatten + } + if err := validateChildType(f.Type, f.Name, flatten, prevSlice, prevTypes); err != nil { + return err + } + } + case reflect.Ptr: + if t == typeOfKeyPtr { + return nil + } + return validateChildType(t.Elem(), fieldName, flatten, prevSlice, prevTypes) + } + return nil +} + +// isLeafType determines whether or not a type is a 'leaf type' +// and should not be recursed into, but considered one field. +func isLeafType(t reflect.Type) bool { + return t == typeOfTime || t == typeOfGeoPoint +} + +// structCache collects the structs whose fields have already been calculated. +var structCache = fields.NewCache(parseTag, validateType, isLeafType) + +// structPLS adapts a struct to be a PropertyLoadSaver. +type structPLS struct { + v reflect.Value + codec fields.List +} + +// newStructPLS returns a structPLS, which implements the +// PropertyLoadSaver interface, for the struct pointer p. +func newStructPLS(p interface{}) (*structPLS, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidEntityType + } + v = v.Elem() + f, err := structCache.Fields(v.Type()) + if err != nil { + return nil, err + } + return &structPLS{v, f}, nil +} + +// LoadStruct loads the properties from p to dst. +// dst must be a struct pointer. +// +// The values of dst's unmatched struct fields are not modified, +// and matching slice-typed fields are not reset before appending to +// them. In particular, it is recommended to pass a pointer to a zero +// valued struct on each LoadStruct call. +func LoadStruct(dst interface{}, p []Property) error { + x, err := newStructPLS(dst) + if err != nil { + return err + } + return x.Load(p) +} + +// SaveStruct returns the properties from src as a slice of Properties. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Property, error) { + x, err := newStructPLS(src) + if err != nil { + return nil, err + } + return x.Save() +} + +// plsForLoad tries to convert v to a PropertyLoadSaver. +// If successful, plsForLoad returns a settable v as a PropertyLoadSaver. +// +// plsForLoad is intended to be used with nested struct fields which +// may implement PropertyLoadSaver. +// +// v must be settable. +func plsForLoad(v reflect.Value) (PropertyLoadSaver, error) { + var nilPtr bool + if v.Kind() == reflect.Ptr && v.IsNil() { + nilPtr = true + v.Set(reflect.New(v.Type().Elem())) + } + + vpls, err := pls(v) + if nilPtr && (vpls == nil || err != nil) { + // unset v + v.Set(reflect.Zero(v.Type())) + } + + return vpls, err +} + +// plsForSave tries to convert v to a PropertyLoadSaver. +// If successful, plsForSave returns v as a PropertyLoadSaver. +// +// plsForSave is intended to be used with nested struct fields which +// may implement PropertyLoadSaver. +// +// v must be settable. +func plsForSave(v reflect.Value) (PropertyLoadSaver, error) { + switch v.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Chan, reflect.Func: + // If v is nil, return early. v contains no data to save. + if v.IsNil() { + return nil, nil + } + } + + return pls(v) +} + +func pls(v reflect.Value) (PropertyLoadSaver, error) { + if v.Kind() != reflect.Ptr { + if _, ok := v.Interface().(PropertyLoadSaver); ok { + return nil, fmt.Errorf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface()) + } + + v = v.Addr() + } + + vpls, _ := v.Interface().(PropertyLoadSaver) + return vpls, nil +} diff --git a/vendor/cloud.google.com/go/datastore/query.go b/vendor/cloud.google.com/go/datastore/query.go new file mode 100644 index 0000000..7bf5840 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/query.go @@ -0,0 +1,784 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + + "cloud.google.com/go/internal/trace" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type operator int + +const ( + lessThan operator = iota + 1 + lessEq + equal + greaterEq + greaterThan + + keyFieldName = "__key__" +) + +var operatorToProto = map[operator]pb.PropertyFilter_Operator{ + lessThan: pb.PropertyFilter_LESS_THAN, + lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL, + equal: pb.PropertyFilter_EQUAL, + greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL, + greaterThan: pb.PropertyFilter_GREATER_THAN, +} + +// filter is a conditional filter on query results. +type filter struct { + FieldName string + Op operator + Value interface{} +} + +type sortDirection bool + +const ( + ascending sortDirection = false + descending sortDirection = true +) + +var sortDirectionToProto = map[sortDirection]pb.PropertyOrder_Direction{ + ascending: pb.PropertyOrder_ASCENDING, + descending: pb.PropertyOrder_DESCENDING, +} + +// order is a sort order on query results. +type order struct { + FieldName string + Direction sortDirection +} + +// NewQuery creates a new Query for a specific entity kind. +// +// An empty kind means to return all entities, including entities created and +// managed by other App Engine features, and is called a kindless query. +// Kindless queries cannot include filters or sort orders on property values. +func NewQuery(kind string) *Query { + return &Query{ + kind: kind, + limit: -1, + } +} + +// Query represents a datastore query. +type Query struct { + kind string + ancestor *Key + filter []filter + order []order + projection []string + + distinct bool + distinctOn []string + keysOnly bool + eventual bool + limit int32 + offset int32 + start []byte + end []byte + + namespace string + + trans *Transaction + + err error +} + +func (q *Query) clone() *Query { + x := *q + // Copy the contents of the slice-typed fields to a new backing store. + if len(q.filter) > 0 { + x.filter = make([]filter, len(q.filter)) + copy(x.filter, q.filter) + } + if len(q.order) > 0 { + x.order = make([]order, len(q.order)) + copy(x.order, q.order) + } + return &x +} + +// Ancestor returns a derivative query with an ancestor filter. +// The ancestor should not be nil. +func (q *Query) Ancestor(ancestor *Key) *Query { + q = q.clone() + if ancestor == nil { + q.err = errors.New("datastore: nil query ancestor") + return q + } + q.ancestor = ancestor + return q +} + +// EventualConsistency returns a derivative query that returns eventually +// consistent results. +// It only has an effect on ancestor queries. +func (q *Query) EventualConsistency() *Query { + q = q.clone() + q.eventual = true + return q +} + +// Namespace returns a derivative query that is associated with the given +// namespace. +// +// A namespace may be used to partition data for multi-tenant applications. +// For details, see https://cloud.google.com/datastore/docs/concepts/multitenancy. +func (q *Query) Namespace(ns string) *Query { + q = q.clone() + q.namespace = ns + return q +} + +// Transaction returns a derivative query that is associated with the given +// transaction. +// +// All reads performed as part of the transaction will come from a single +// consistent snapshot. Furthermore, if the transaction is set to a +// serializable isolation level, another transaction cannot concurrently modify +// the data that is read or modified by this transaction. +func (q *Query) Transaction(t *Transaction) *Query { + q = q.clone() + q.trans = t + return q +} + +// Filter returns a derivative query with a field-based filter. +// The filterStr argument must be a field name followed by optional space, +// followed by an operator, one of ">", "<", ">=", "<=", or "=". +// Fields are compared against the provided value using the operator. +// Multiple filters are AND'ed together. +// Field names which contain spaces, quote marks, or operator characters +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Filter(filterStr string, value interface{}) *Query { + q = q.clone() + filterStr = strings.TrimSpace(filterStr) + if filterStr == "" { + q.err = fmt.Errorf("datastore: invalid filter %q", filterStr) + return q + } + f := filter{ + FieldName: strings.TrimRight(filterStr, " ><=!"), + Value: value, + } + switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { + case "<=": + f.Op = lessEq + case ">=": + f.Op = greaterEq + case "<": + f.Op = lessThan + case ">": + f.Op = greaterThan + case "=": + f.Op = equal + default: + q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) + return q + } + var err error + f.FieldName, err = unquote(f.FieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName) + return q + } + q.filter = append(q.filter, f) + return q +} + +// Order returns a derivative query with a field-based sort order. Orders are +// applied in the order they are added. The default order is ascending; to sort +// in descending order prefix the fieldName with a minus sign (-). +// Field names which contain spaces, quote marks, or the minus sign +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Order(fieldName string) *Query { + q = q.clone() + fieldName, dir := strings.TrimSpace(fieldName), ascending + if strings.HasPrefix(fieldName, "-") { + fieldName, dir = strings.TrimSpace(fieldName[1:]), descending + } else if strings.HasPrefix(fieldName, "+") { + q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) + return q + } + fieldName, err := unquote(fieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName) + return q + } + if fieldName == "" { + q.err = errors.New("datastore: empty order") + return q + } + q.order = append(q.order, order{ + Direction: dir, + FieldName: fieldName, + }) + return q +} + +// unquote optionally interprets s as a double-quoted or backquoted Go +// string literal if it begins with the relevant character. +func unquote(s string) (string, error) { + if s == "" || (s[0] != '`' && s[0] != '"') { + return s, nil + } + return strconv.Unquote(s) +} + +// Project returns a derivative query that yields only the given fields. It +// cannot be used with KeysOnly. +func (q *Query) Project(fieldNames ...string) *Query { + q = q.clone() + q.projection = append([]string(nil), fieldNames...) + return q +} + +// Distinct returns a derivative query that yields de-duplicated entities with +// respect to the set of projected fields. It is only used for projection +// queries. Distinct cannot be used with DistinctOn. +func (q *Query) Distinct() *Query { + q = q.clone() + q.distinct = true + return q +} + +// DistinctOn returns a derivative query that yields de-duplicated entities with +// respect to the set of the specified fields. It is only used for projection +// queries. The field list should be a subset of the projected field list. +// DistinctOn cannot be used with Distinct. +func (q *Query) DistinctOn(fieldNames ...string) *Query { + q = q.clone() + q.distinctOn = fieldNames + return q +} + +// KeysOnly returns a derivative query that yields only keys, not keys and +// entities. It cannot be used with projection queries. +func (q *Query) KeysOnly() *Query { + q = q.clone() + q.keysOnly = true + return q +} + +// Limit returns a derivative query that has a limit on the number of results +// returned. A negative value means unlimited. +func (q *Query) Limit(limit int) *Query { + q = q.clone() + if limit < math.MinInt32 || limit > math.MaxInt32 { + q.err = errors.New("datastore: query limit overflow") + return q + } + q.limit = int32(limit) + return q +} + +// Offset returns a derivative query that has an offset of how many keys to +// skip over before returning results. A negative value is invalid. +func (q *Query) Offset(offset int) *Query { + q = q.clone() + if offset < 0 { + q.err = errors.New("datastore: negative query offset") + return q + } + if offset > math.MaxInt32 { + q.err = errors.New("datastore: query offset overflow") + return q + } + q.offset = int32(offset) + return q +} + +// Start returns a derivative query with the given start point. +func (q *Query) Start(c Cursor) *Query { + q = q.clone() + q.start = c.cc + return q +} + +// End returns a derivative query with the given end point. +func (q *Query) End(c Cursor) *Query { + q = q.clone() + q.end = c.cc + return q +} + +// toProto converts the query to a protocol buffer. +func (q *Query) toProto(req *pb.RunQueryRequest) error { + if len(q.projection) != 0 && q.keysOnly { + return errors.New("datastore: query cannot both project and be keys-only") + } + if len(q.distinctOn) != 0 && q.distinct { + return errors.New("datastore: query cannot be both distinct and distinct-on") + } + dst := &pb.Query{} + if q.kind != "" { + dst.Kind = []*pb.KindExpression{{Name: q.kind}} + } + if q.projection != nil { + for _, propertyName := range q.projection { + dst.Projection = append(dst.Projection, &pb.Projection{Property: &pb.PropertyReference{Name: propertyName}}) + } + + for _, propertyName := range q.distinctOn { + dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) + } + + if q.distinct { + for _, propertyName := range q.projection { + dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) + } + } + } + if q.keysOnly { + dst.Projection = []*pb.Projection{{Property: &pb.PropertyReference{Name: keyFieldName}}} + } + + var filters []*pb.Filter + for _, qf := range q.filter { + if qf.FieldName == "" { + return errors.New("datastore: empty query filter field name") + } + v, err := interfaceToProto(reflect.ValueOf(qf.Value).Interface(), false) + if err != nil { + return fmt.Errorf("datastore: bad query filter value type: %v", err) + } + op, ok := operatorToProto[qf.Op] + if !ok { + return errors.New("datastore: unknown query filter operator") + } + xf := &pb.PropertyFilter{ + Op: op, + Property: &pb.PropertyReference{Name: qf.FieldName}, + Value: v, + } + filters = append(filters, &pb.Filter{ + FilterType: &pb.Filter_PropertyFilter{PropertyFilter: xf}, + }) + } + + if q.ancestor != nil { + filters = append(filters, &pb.Filter{ + FilterType: &pb.Filter_PropertyFilter{PropertyFilter: &pb.PropertyFilter{ + Property: &pb.PropertyReference{Name: keyFieldName}, + Op: pb.PropertyFilter_HAS_ANCESTOR, + Value: &pb.Value{ValueType: &pb.Value_KeyValue{KeyValue: keyToProto(q.ancestor)}}, + }}}) + } + + if len(filters) == 1 { + dst.Filter = filters[0] + } else if len(filters) > 1 { + dst.Filter = &pb.Filter{FilterType: &pb.Filter_CompositeFilter{CompositeFilter: &pb.CompositeFilter{ + Op: pb.CompositeFilter_AND, + Filters: filters, + }}} + } + + for _, qo := range q.order { + if qo.FieldName == "" { + return errors.New("datastore: empty query order field name") + } + xo := &pb.PropertyOrder{ + Property: &pb.PropertyReference{Name: qo.FieldName}, + Direction: sortDirectionToProto[qo.Direction], + } + dst.Order = append(dst.Order, xo) + } + if q.limit >= 0 { + dst.Limit = &wrapperspb.Int32Value{Value: q.limit} + } + dst.Offset = q.offset + dst.StartCursor = q.start + dst.EndCursor = q.end + + if t := q.trans; t != nil { + if t.id == nil { + return errExpiredTransaction + } + if q.eventual { + return errors.New("datastore: cannot use EventualConsistency query in a transaction") + } + req.ReadOptions = &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, + } + } + + if q.eventual { + req.ReadOptions = &pb.ReadOptions{ConsistencyType: &pb.ReadOptions_ReadConsistency_{ReadConsistency: pb.ReadOptions_EVENTUAL}} + } + + req.QueryType = &pb.RunQueryRequest_Query{Query: dst} + return nil +} + +// Count returns the number of results for the given query. +// +// The running time and number of API calls made by Count scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise Count will +// continue until it finishes counting or the provided context expires. +func (c *Client) Count(ctx context.Context, q *Query) (n int, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Count") + defer func() { trace.EndSpan(ctx, err) }() + + // Check that the query is well-formed. + if q.err != nil { + return 0, q.err + } + + // Create a copy of the query, with keysOnly true (if we're not a projection, + // since the two are incompatible). + newQ := q.clone() + newQ.keysOnly = len(newQ.projection) == 0 + + // Create an iterator and use it to walk through the batches of results + // directly. + it := c.Run(ctx, newQ) + for { + err := it.nextBatch() + if err == iterator.Done { + return n, nil + } + if err != nil { + return 0, err + } + n += len(it.results) + } +} + +// GetAll runs the provided query in the given context and returns all keys +// that match that query, as well as appending the values to dst. +// +// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- +// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. +// +// As a special case, *PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when *[]PropertyList was intended. +// +// The keys returned by GetAll will be in a 1-1 correspondence with the entities +// added to dst. +// +// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +// +// The running time and number of API calls made by GetAll scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise GetAll will +// continue until it finishes collecting results or the provided context +// expires. +func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) (keys []*Key, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.GetAll") + defer func() { trace.EndSpan(ctx, err) }() + + var ( + dv reflect.Value + mat multiArgType + elemType reflect.Type + errFieldMismatch error + ) + if !q.keysOnly { + dv = reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return nil, ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType = checkMultiArg(dv) + if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { + return nil, ErrInvalidEntityType + } + } + + for t := c.Run(ctx, q); ; { + k, e, err := t.next() + if err == iterator.Done { + break + } + if err != nil { + return keys, err + } + if !q.keysOnly { + ev := reflect.New(elemType) + if elemType.Kind() == reflect.Map { + // This is a special case. The zero values of a map type are + // not immediately useful; they have to be make'd. + // + // Funcs and channels are similar, in that a zero value is not useful, + // but even a freshly make'd channel isn't useful: there's no fixed + // channel buffer size that is always going to be large enough, and + // there's no goroutine to drain the other end. Theoretically, these + // types could be supported, for example by sniffing for a constructor + // method or requiring prior registration, but for now it's not a + // frequent enough concern to be worth it. Programmers can work around + // it by explicitly using Iterator.Next instead of the Query.GetAll + // convenience method. + x := reflect.MakeMap(elemType) + ev.Elem().Set(x) + } + if err = loadEntityProto(ev.Interface(), e); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return keys, err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + } + keys = append(keys, k) + } + return keys, errFieldMismatch +} + +// Run runs the given query in the given context. +func (c *Client) Run(ctx context.Context, q *Query) *Iterator { + if q.err != nil { + return &Iterator{err: q.err} + } + t := &Iterator{ + ctx: ctx, + client: c, + limit: q.limit, + offset: q.offset, + keysOnly: q.keysOnly, + pageCursor: q.start, + entityCursor: q.start, + req: &pb.RunQueryRequest{ + ProjectId: c.dataset, + }, + } + + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Run") + defer func() { trace.EndSpan(ctx, t.err) }() + if q.namespace != "" { + t.req.PartitionId = &pb.PartitionId{ + NamespaceId: q.namespace, + } + } + + if err := q.toProto(t.req); err != nil { + t.err = err + } + return t +} + +// Iterator is the result of running a query. +type Iterator struct { + ctx context.Context + client *Client + err error + + // results is the list of EntityResults still to be iterated over from the + // most recent API call. It will be nil if no requests have yet been issued. + results []*pb.EntityResult + // req is the request to send. It may be modified and used multiple times. + req *pb.RunQueryRequest + + // limit is the limit on the number of results this iterator should return. + // The zero value is used to prevent further fetches from the server. + // A negative value means unlimited. + limit int32 + // offset is the number of results that still need to be skipped. + offset int32 + // keysOnly records whether the query was keys-only (skip entity loading). + keysOnly bool + + // pageCursor is the compiled cursor for the next batch/page of result. + // TODO(djd): Can we delete this in favour of paging with the last + // entityCursor from each batch? + pageCursor []byte + // entityCursor is the compiled cursor of the next result. + entityCursor []byte +} + +// Next returns the key of the next result. When there are no more results, +// iterator.Done is returned as the error. +// +// If the query is not keys only and dst is non-nil, it also loads the entity +// stored for that key into the struct pointer or PropertyLoadSaver dst, with +// the same semantics and possible errors as for the Get function. +func (t *Iterator) Next(dst interface{}) (k *Key, err error) { + k, e, err := t.next() + if err != nil { + return nil, err + } + if dst != nil && !t.keysOnly { + err = loadEntityProto(dst, e) + } + return k, err +} + +func (t *Iterator) next() (*Key, *pb.Entity, error) { + // Fetch additional batches while there are no more results. + for t.err == nil && len(t.results) == 0 { + t.err = t.nextBatch() + } + if t.err != nil { + return nil, nil, t.err + } + + // Extract the next result, update cursors, and parse the entity's key. + e := t.results[0] + t.results = t.results[1:] + t.entityCursor = e.Cursor + if len(t.results) == 0 { + t.entityCursor = t.pageCursor // At the end of the batch. + } + if e.Entity.Key == nil { + return nil, nil, errors.New("datastore: internal error: server did not return a key") + } + k, err := protoToKey(e.Entity.Key) + if err != nil || k.Incomplete() { + return nil, nil, errors.New("datastore: internal error: server returned an invalid key") + } + + return k, e.Entity, nil +} + +// nextBatch makes a single call to the server for a batch of results. +func (t *Iterator) nextBatch() error { + if t.limit == 0 { + return iterator.Done // Short-circuits the zero-item response. + } + + // Adjust the query with the latest start cursor, limit and offset. + q := t.req.GetQuery() + q.StartCursor = t.pageCursor + q.Offset = t.offset + if t.limit >= 0 { + q.Limit = &wrapperspb.Int32Value{Value: t.limit} + } else { + q.Limit = nil + } + + // Run the query. + resp, err := t.client.client.RunQuery(t.ctx, t.req) + if err != nil { + return err + } + + // Adjust any offset from skipped results. + skip := resp.Batch.SkippedResults + if skip < 0 { + return errors.New("datastore: internal error: negative number of skipped_results") + } + t.offset -= skip + if t.offset < 0 { + return errors.New("datastore: internal error: query skipped too many results") + } + if t.offset > 0 && len(resp.Batch.EntityResults) > 0 { + return errors.New("datastore: internal error: query returned results before requested offset") + } + + // Adjust the limit. + if t.limit >= 0 { + t.limit -= int32(len(resp.Batch.EntityResults)) + if t.limit < 0 { + return errors.New("datastore: internal error: query returned more results than the limit") + } + } + + // If there are no more results available, set limit to zero to prevent + // further fetches. Otherwise, check that there is a next page cursor available. + if resp.Batch.MoreResults != pb.QueryResultBatch_NOT_FINISHED { + t.limit = 0 + } else if resp.Batch.EndCursor == nil { + return errors.New("datastore: internal error: server did not return a cursor") + } + + // Update cursors. + // If any results were skipped, use the SkippedCursor as the next entity cursor. + if skip > 0 { + t.entityCursor = resp.Batch.SkippedCursor + } else { + t.entityCursor = q.StartCursor + } + t.pageCursor = resp.Batch.EndCursor + + t.results = resp.Batch.EntityResults + return nil +} + +// Cursor returns a cursor for the iterator's current location. +func (t *Iterator) Cursor() (c Cursor, err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Query.Cursor") + defer func() { trace.EndSpan(t.ctx, err) }() + + // If there is still an offset, we need to the skip those results first. + for t.err == nil && t.offset > 0 { + t.err = t.nextBatch() + } + + if t.err != nil && t.err != iterator.Done { + return Cursor{}, t.err + } + + return Cursor{t.entityCursor}, nil +} + +// Cursor is an iterator's position. It can be converted to and from an opaque +// string. A cursor can be used from different HTTP requests, but only with a +// query with the same kind, ancestor, filter and order constraints. +// +// The zero Cursor can be used to indicate that there is no start and/or end +// constraint for a query. +type Cursor struct { + cc []byte +} + +// String returns a base-64 string representation of a cursor. +func (c Cursor) String() string { + if c.cc == nil { + return "" + } + + return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=") +} + +// Decode decodes a cursor from its base-64 string representation. +func DecodeCursor(s string) (Cursor, error) { + if s == "" { + return Cursor{}, nil + } + if n := len(s) % 4; n != 0 { + s += strings.Repeat("=", 4-n) + } + b, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return Cursor{}, err + } + return Cursor{b}, nil +} diff --git a/vendor/cloud.google.com/go/datastore/query_test.go b/vendor/cloud.google.com/go/datastore/query_test.go new file mode 100644 index 0000000..795fa6d --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/query_test.go @@ -0,0 +1,547 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "sort" + "testing" + + "cloud.google.com/go/internal/testutil" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +var ( + key1 = &pb.Key{ + Path: []*pb.Key_PathElement{ + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{Id: 6}, + }, + }, + } + key2 = &pb.Key{ + Path: []*pb.Key_PathElement{ + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{Id: 6}, + }, + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{Id: 8}, + }, + }, + } +) + +type fakeClient struct { + pb.DatastoreClient + queryFn func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) + commitFn func(*pb.CommitRequest) (*pb.CommitResponse, error) +} + +func (c *fakeClient) RunQuery(_ context.Context, req *pb.RunQueryRequest, _ ...grpc.CallOption) (*pb.RunQueryResponse, error) { + return c.queryFn(req) +} + +func (c *fakeClient) Commit(_ context.Context, req *pb.CommitRequest, _ ...grpc.CallOption) (*pb.CommitResponse, error) { + return c.commitFn(req) +} + +func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + expectedIn := &pb.RunQueryRequest{ + QueryType: &pb.RunQueryRequest_Query{Query: &pb.Query{ + Kind: []*pb.KindExpression{{Name: "Gopher"}}, + }}, + } + if !proto.Equal(in, expectedIn) { + return nil, fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) + } + return &pb.RunQueryResponse{ + Batch: &pb.QueryResultBatch{ + MoreResults: pb.QueryResultBatch_NO_MORE_RESULTS, + EntityResultType: pb.EntityResult_FULL, + EntityResults: []*pb.EntityResult{ + { + Entity: &pb.Entity{ + Key: key1, + Properties: map[string]*pb.Value{ + "Name": {ValueType: &pb.Value_StringValue{StringValue: "George"}}, + "Height": {ValueType: &pb.Value_IntegerValue{IntegerValue: 32}}, + }, + }, + }, + { + Entity: &pb.Entity{ + Key: key2, + Properties: map[string]*pb.Value{ + "Name": {ValueType: &pb.Value_StringValue{StringValue: "Rufus"}}, + // No height for Rufus. + }, + }, + }, + }, + }, + }, nil +} + +type StructThatImplementsPLS struct{} + +func (StructThatImplementsPLS) Load(p []Property) error { return nil } +func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = StructThatImplementsPLS{} + +type StructPtrThatImplementsPLS struct{} + +func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } +func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} + +type PropertyMap map[string]Property + +func (m PropertyMap) Load(props []Property) error { + for _, p := range props { + m[p.Name] = p + } + return nil +} + +func (m PropertyMap) Save() ([]Property, error) { + props := make([]Property, 0, len(m)) + for _, p := range m { + props = append(props, p) + } + return props, nil +} + +var _ PropertyLoadSaver = PropertyMap{} + +type Gopher struct { + Name string + Height int +} + +// typeOfEmptyInterface is the type of interface{}, but we can't use +// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an +// interface{}. +var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() + +func TestCheckMultiArg(t *testing.T) { + testCases := []struct { + v interface{} + mat multiArgType + elemType reflect.Type + }{ + // Invalid cases. + {nil, multiArgTypeInvalid, nil}, + {Gopher{}, multiArgTypeInvalid, nil}, + {&Gopher{}, multiArgTypeInvalid, nil}, + {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. + {PropertyMap{}, multiArgTypeInvalid, nil}, + {[]*PropertyList(nil), multiArgTypeInvalid, nil}, + {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, + {[]**Gopher(nil), multiArgTypeInvalid, nil}, + {[]*interface{}(nil), multiArgTypeInvalid, nil}, + // Valid cases. + { + []PropertyList(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyList{}), + }, + { + []PropertyMap(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyMap{}), + }, + { + []StructThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructThatImplementsPLS{}), + }, + { + []StructPtrThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructPtrThatImplementsPLS{}), + }, + { + []Gopher(nil), + multiArgTypeStruct, + reflect.TypeOf(Gopher{}), + }, + { + []*Gopher(nil), + multiArgTypeStructPtr, + reflect.TypeOf(Gopher{}), + }, + { + []interface{}(nil), + multiArgTypeInterface, + typeOfEmptyInterface, + }, + } + for _, tc := range testCases { + mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) + if mat != tc.mat || elemType != tc.elemType { + t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", + tc.v, mat, elemType, tc.mat, tc.elemType) + } + } +} + +func TestSimpleQuery(t *testing.T) { + struct1 := Gopher{Name: "George", Height: 32} + struct2 := Gopher{Name: "Rufus"} + pList1 := PropertyList{ + { + Name: "Height", + Value: int64(32), + }, + { + Name: "Name", + Value: "George", + }, + } + pList2 := PropertyList{ + { + Name: "Name", + Value: "Rufus", + }, + } + pMap1 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "George", + }, + "Height": Property{ + Name: "Height", + Value: int64(32), + }, + } + pMap2 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "Rufus", + }, + } + + testCases := []struct { + dst interface{} + want interface{} + }{ + // The destination must have type *[]P, *[]S or *[]*S, for some non-interface + // type P such that *P implements PropertyLoadSaver, or for some struct type S. + {new([]Gopher), &[]Gopher{struct1, struct2}}, + {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, + {new([]PropertyList), &[]PropertyList{pList1, pList2}}, + {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, + + // Any other destination type is invalid. + {0, nil}, + {Gopher{}, nil}, + {PropertyList{}, nil}, + {PropertyMap{}, nil}, + {[]int{}, nil}, + {[]Gopher{}, nil}, + {[]PropertyList{}, nil}, + {new(int), nil}, + {new(Gopher), nil}, + {new(PropertyList), nil}, // This is a special case. + {new(PropertyMap), nil}, + {new([]int), nil}, + {new([]map[int]int), nil}, + {new([]map[string]Property), nil}, + {new([]map[string]interface{}), nil}, + {new([]*int), nil}, + {new([]*map[int]int), nil}, + {new([]*map[string]Property), nil}, + {new([]*map[string]interface{}), nil}, + {new([]**Gopher), nil}, + {new([]*PropertyList), nil}, + {new([]*PropertyMap), nil}, + } + for _, tc := range testCases { + nCall := 0 + client := &Client{ + client: &fakeClient{ + queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + nCall++ + return fakeRunQuery(req) + }, + }, + } + ctx := context.Background() + + var ( + expectedErr error + expectedNCall int + ) + if tc.want == nil { + expectedErr = ErrInvalidEntityType + } else { + expectedNCall = 1 + } + keys, err := client.GetAll(ctx, NewQuery("Gopher"), tc.dst) + if err != expectedErr { + t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr) + continue + } + if nCall != expectedNCall { + t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) + continue + } + if err != nil { + continue + } + + key1 := IDKey("Gopher", 6, nil) + expectedKeys := []*Key{ + key1, + IDKey("Gopher", 8, key1), + } + if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { + t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) + continue + } + for i, key := range keys { + if !keysEqual(key, expectedKeys[i]) { + t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) + continue + } + } + + // Make sure we sort any PropertyList items (the order is not deterministic). + if pLists, ok := tc.dst.(*[]PropertyList); ok { + for _, p := range *pLists { + sort.Sort(byName(p)) + } + } + + if !testutil.Equal(tc.dst, tc.want) { + t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want) + continue + } + } +} + +// keysEqual is like (*Key).Equal, but ignores the App ID. +func keysEqual(a, b *Key) bool { + for a != nil && b != nil { + if a.Kind != b.Kind || a.Name != b.Name || a.ID != b.ID { + return false + } + a, b = a.Parent, b.Parent + } + return a == b +} + +func TestQueriesAreImmutable(t *testing.T) { + // Test that deriving q2 from q1 does not modify q1. + q0 := NewQuery("foo") + q1 := NewQuery("foo") + q2 := q1.Offset(2) + if !testutil.Equal(q0, q1, cmp.AllowUnexported(Query{})) { + t.Errorf("q0 and q1 were not equal") + } + if testutil.Equal(q1, q2, cmp.AllowUnexported(Query{})) { + t.Errorf("q1 and q2 were equal") + } + + // Test that deriving from q4 twice does not conflict, even though + // q4 has a long list of order clauses. This tests that the arrays + // backed by a query's slice of orders are not shared. + f := func() *Query { + q := NewQuery("bar") + // 47 is an ugly number that is unlikely to be near a re-allocation + // point in repeated append calls. For example, it's not near a power + // of 2 or a multiple of 10. + for i := 0; i < 47; i++ { + q = q.Order(fmt.Sprintf("x%d", i)) + } + return q + } + q3 := f().Order("y") + q4 := f() + q5 := q4.Order("y") + q6 := q4.Order("z") + if !testutil.Equal(q3, q5, cmp.AllowUnexported(Query{})) { + t.Errorf("q3 and q5 were not equal") + } + if testutil.Equal(q5, q6, cmp.AllowUnexported(Query{})) { + t.Errorf("q5 and q6 were equal") + } +} + +func TestFilterParser(t *testing.T) { + testCases := []struct { + filterStr string + wantOK bool + wantFieldName string + wantOp operator + }{ + // Supported ops. + {"x<", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {" x < ", true, "x", lessThan}, + {"x <=", true, "x", lessEq}, + {"x =", true, "x", equal}, + {"x >=", true, "x", greaterEq}, + {"x >", true, "x", greaterThan}, + {"in >", true, "in", greaterThan}, + {"in>", true, "in", greaterThan}, + // Valid but (currently) unsupported ops. + {"x!=", false, "", 0}, + {"x !=", false, "", 0}, + {" x != ", false, "", 0}, + {"x IN", false, "", 0}, + {"x in", false, "", 0}, + // Invalid ops. + {"x EQ", false, "", 0}, + {"x lt", false, "", 0}, + {"x <>", false, "", 0}, + {"x >>", false, "", 0}, + {"x ==", false, "", 0}, + {"x =<", false, "", 0}, + {"x =>", false, "", 0}, + {"x !", false, "", 0}, + {"x ", false, "", 0}, + {"x", false, "", 0}, + // Quoted and interesting field names. + {"x > y =", true, "x > y", equal}, + {"` x ` =", true, " x ", equal}, + {`" x " =`, true, " x ", equal}, + {`" \"x " =`, true, ` "x `, equal}, + {`" x =`, false, "", 0}, + {`" x ="`, false, "", 0}, + {"` x \" =", false, "", 0}, + } + for _, tc := range testCases { + q := NewQuery("foo").Filter(tc.filterStr, 42) + if ok := q.err == nil; ok != tc.wantOK { + t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) + continue + } + if !tc.wantOK { + continue + } + if len(q.filter) != 1 { + t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) + continue + } + got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} + if got != want { + t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) + continue + } + } +} + +func TestNamespaceQuery(t *testing.T) { + gotNamespace := make(chan string, 1) + ctx := context.Background() + client := &Client{ + client: &fakeClient{ + queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + if part := req.PartitionId; part != nil { + gotNamespace <- part.NamespaceId + } else { + gotNamespace <- "" + } + return nil, errors.New("not implemented") + }, + }, + } + + var gs []Gopher + + client.GetAll(ctx, NewQuery("gopher"), &gs) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher")) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } + + const ns = "not_default" + client.GetAll(ctx, NewQuery("gopher").Namespace(ns), &gs) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher").Namespace(ns)) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } +} + +func TestReadOptions(t *testing.T) { + tid := []byte{1} + for _, test := range []struct { + q *Query + want *pb.ReadOptions + }{ + { + q: NewQuery(""), + want: nil, + }, + { + q: NewQuery("").Transaction(nil), + want: nil, + }, + { + q: NewQuery("").Transaction(&Transaction{id: tid}), + want: &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{ + Transaction: tid, + }, + }, + }, + { + q: NewQuery("").EventualConsistency(), + want: &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_ReadConsistency_{ + ReadConsistency: pb.ReadOptions_EVENTUAL, + }, + }, + }, + } { + req := &pb.RunQueryRequest{} + if err := test.q.toProto(req); err != nil { + t.Fatalf("%+v: got %v, want no error", test.q, err) + } + if got := req.ReadOptions; !proto.Equal(got, test.want) { + t.Errorf("%+v:\ngot %+v\nwant %+v", test.q, got, test.want) + } + } + // Test errors. + for _, q := range []*Query{ + NewQuery("").Transaction(&Transaction{id: nil}), + NewQuery("").Transaction(&Transaction{id: tid}).EventualConsistency(), + } { + req := &pb.RunQueryRequest{} + if err := q.toProto(req); err == nil { + t.Errorf("%+v: got nil, wanted error", q) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/save.go b/vendor/cloud.google.com/go/datastore/save.go new file mode 100644 index 0000000..b96d07c --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/save.go @@ -0,0 +1,462 @@ +// Copyright 4 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "time" + "unicode/utf8" + + timepb "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/datastore/v1" + llpb "google.golang.org/genproto/googleapis/type/latlng" +) + +type saveOpts struct { + noIndex bool + flatten bool + omitEmpty bool +} + +// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. +func saveEntity(key *Key, src interface{}) (*pb.Entity, error) { + var err error + var props []Property + if e, ok := src.(PropertyLoadSaver); ok { + props, err = e.Save() + } else { + props, err = SaveStruct(src) + } + if err != nil { + return nil, err + } + return propertiesToProto(key, props) +} + +// TODO(djd): Convert this and below to return ([]Property, error). +func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { + p := Property{ + Name: name, + NoIndex: opts.noIndex, + } + + if opts.omitEmpty && isEmptyValue(v) { + return nil + } + + // First check if field type implements PLS. If so, use PLS to + // save. + ok, err := plsFieldSave(props, p, name, opts, v) + if err != nil { + return err + } + if ok { + return nil + } + + switch x := v.Interface().(type) { + case *Key, time.Time, GeoPoint: + p.Value = x + default: + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.Value = v.Int() + case reflect.Bool: + p.Value = v.Bool() + case reflect.String: + p.Value = v.String() + case reflect.Float32, reflect.Float64: + p.Value = v.Float() + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + p.Value = v.Bytes() + } else { + return saveSliceProperty(props, name, opts, v) + } + case reflect.Ptr: + if isValidPointerType(v.Type().Elem()) { + if v.IsNil() { + // Nil pointer becomes a nil property value (unless omitempty, handled above). + p.Value = nil + *props = append(*props, p) + return nil + } + return saveStructProperty(props, name, opts, v.Elem()) + } + if v.Type().Elem().Kind() != reflect.Struct { + return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type()) + } + // Pointer to struct is a special case. + if v.IsNil() { + return nil + } + v = v.Elem() + fallthrough + case reflect.Struct: + if !v.CanAddr() { + return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") + } + vi := v.Addr().Interface() + + sub, err := newStructPLS(vi) + if err != nil { + return fmt.Errorf("datastore: unsupported struct field: %v", err) + } + + if opts.flatten { + return sub.save(props, opts, name+".") + } + + var subProps []Property + err = sub.save(&subProps, opts, "") + if err != nil { + return err + } + subKey, err := sub.key(v) + if err != nil { + return err + } + + p.Value = &Entity{ + Key: subKey, + Properties: subProps, + } + } + } + if p.Value == nil { + return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) + } + *props = append(*props, p) + return nil +} + +// plsFieldSave first tries to converts v's value to a PLS, then v's addressed +// value to a PLS. If neither succeeds, plsFieldSave returns false for first return +// value. +// If v is successfully converted to a PLS, plsFieldSave will then add the +// Value to property p by way of the PLS's Save method, and append it to props. +// +// If the flatten option is present in opts, name must be prepended to each property's +// name before it is appended to props. Eg. if name were "A" and a subproperty's name +// were "B", the resultant name of the property to be appended to props would be "A.B". +func plsFieldSave(props *[]Property, p Property, name string, opts saveOpts, v reflect.Value) (ok bool, err error) { + vpls, err := plsForSave(v) + if err != nil { + return false, err + } + + if vpls == nil { + return false, nil + } + + subProps, err := vpls.Save() + if err != nil { + return true, err + } + + if opts.flatten { + for _, subp := range subProps { + subp.Name = name + "." + subp.Name + *props = append(*props, subp) + } + return true, nil + } + + p.Value = &Entity{Properties: subProps} + *props = append(*props, p) + + return true, nil +} + +// key extracts the *Key struct field from struct v based on the structCodec of s. +func (s structPLS) key(v reflect.Value) (*Key, error) { + if v.Kind() != reflect.Struct { + return nil, errors.New("datastore: cannot save key of non-struct type") + } + + keyField := s.codec.Match(keyFieldName) + + if keyField == nil { + return nil, nil + } + + f := v.FieldByIndex(keyField.Index) + k, ok := f.Interface().(*Key) + if !ok { + return nil, fmt.Errorf("datastore: %s field on struct %T is not a *datastore.Key", keyFieldName, v.Interface()) + } + + return k, nil +} + +func saveSliceProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { + // Easy case: if the slice is empty, we're done. + if v.Len() == 0 { + return nil + } + // Work out the properties generated by the first element in the slice. This will + // usually be a single property, but will be more if this is a slice of structs. + var headProps []Property + if err := saveStructProperty(&headProps, name, opts, v.Index(0)); err != nil { + return err + } + + // Convert the first element's properties into slice properties, and + // keep track of the values in a map. + values := make(map[string][]interface{}, len(headProps)) + for _, p := range headProps { + values[p.Name] = append(make([]interface{}, 0, v.Len()), p.Value) + } + + // Find the elements for the subsequent elements. + for i := 1; i < v.Len(); i++ { + elemProps := make([]Property, 0, len(headProps)) + if err := saveStructProperty(&elemProps, name, opts, v.Index(i)); err != nil { + return err + } + for _, p := range elemProps { + v, ok := values[p.Name] + if !ok { + return fmt.Errorf("datastore: unexpected property %q in elem %d of slice", p.Name, i) + } + values[p.Name] = append(v, p.Value) + } + } + + // Convert to the final properties. + for _, p := range headProps { + p.Value = values[p.Name] + *props = append(*props, p) + } + return nil +} + +func (s structPLS) Save() ([]Property, error) { + var props []Property + if err := s.save(&props, saveOpts{}, ""); err != nil { + return nil, err + } + return props, nil +} + +func (s structPLS) save(props *[]Property, opts saveOpts, prefix string) error { + for _, f := range s.codec { + name := prefix + f.Name + v := getField(s.v, f.Index) + if !v.IsValid() || !v.CanSet() { + continue + } + + var tagOpts saveOpts + if f.ParsedTag != nil { + tagOpts = f.ParsedTag.(saveOpts) + } + + var opts1 saveOpts + opts1.noIndex = opts.noIndex || tagOpts.noIndex + opts1.flatten = opts.flatten || tagOpts.flatten + opts1.omitEmpty = tagOpts.omitEmpty // don't propagate + if err := saveStructProperty(props, name, opts1, v); err != nil { + return err + } + } + return nil +} + +// getField returns the field from v at the given index path. +// If it encounters a nil-valued field in the path, getField +// stops and returns a zero-valued reflect.Value, preventing the +// panic that would have been caused by reflect's FieldByIndex. +func getField(v reflect.Value, index []int) reflect.Value { + var zero reflect.Value + if v.Type().Kind() != reflect.Struct { + return zero + } + + for _, i := range index { + if v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Struct { + if v.IsNil() { + return zero + } + v = v.Elem() + } + v = v.Field(i) + } + return v +} + +func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) { + e := &pb.Entity{ + Key: keyToProto(key), + Properties: map[string]*pb.Value{}, + } + indexedProps := 0 + for _, p := range props { + // Do not send a Key value a a field to datastore. + if p.Name == keyFieldName { + continue + } + + val, err := interfaceToProto(p.Value, p.NoIndex) + if err != nil { + return nil, fmt.Errorf("datastore: %v for a Property with Name %q", err, p.Name) + } + if !p.NoIndex { + rVal := reflect.ValueOf(p.Value) + if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 { + indexedProps += rVal.Len() + } else { + indexedProps++ + } + } + if indexedProps > maxIndexedProperties { + return nil, errors.New("datastore: too many indexed properties") + } + + if _, ok := e.Properties[p.Name]; ok { + return nil, fmt.Errorf("datastore: duplicate Property with Name %q", p.Name) + } + e.Properties[p.Name] = val + } + return e, nil +} + +func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) { + val := &pb.Value{ExcludeFromIndexes: noIndex} + switch v := iv.(type) { + case int: + val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)} + case int32: + val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)} + case int64: + val.ValueType = &pb.Value_IntegerValue{IntegerValue: v} + case bool: + val.ValueType = &pb.Value_BooleanValue{BooleanValue: v} + case string: + if len(v) > 1500 && !noIndex { + return nil, errors.New("string property too long to index") + } + if !utf8.ValidString(v) { + return nil, fmt.Errorf("string is not valid utf8: %q", v) + } + val.ValueType = &pb.Value_StringValue{StringValue: v} + case float32: + val.ValueType = &pb.Value_DoubleValue{DoubleValue: float64(v)} + case float64: + val.ValueType = &pb.Value_DoubleValue{DoubleValue: v} + case *Key: + if v == nil { + val.ValueType = &pb.Value_NullValue{} + } else { + val.ValueType = &pb.Value_KeyValue{KeyValue: keyToProto(v)} + } + case GeoPoint: + if !v.Valid() { + return nil, errors.New("invalid GeoPoint value") + } + val.ValueType = &pb.Value_GeoPointValue{GeoPointValue: &llpb.LatLng{ + Latitude: v.Lat, + Longitude: v.Lng, + }} + case time.Time: + if v.Before(minTime) || v.After(maxTime) { + return nil, errors.New("time value out of range") + } + val.ValueType = &pb.Value_TimestampValue{TimestampValue: &timepb.Timestamp{ + Seconds: v.Unix(), + Nanos: int32(v.Nanosecond()), + }} + case []byte: + if len(v) > 1500 && !noIndex { + return nil, errors.New("[]byte property too long to index") + } + val.ValueType = &pb.Value_BlobValue{BlobValue: v} + case *Entity: + e, err := propertiesToProto(v.Key, v.Properties) + if err != nil { + return nil, err + } + val.ValueType = &pb.Value_EntityValue{EntityValue: e} + case []interface{}: + arr := make([]*pb.Value, 0, len(v)) + for i, v := range v { + elem, err := interfaceToProto(v, noIndex) + if err != nil { + return nil, fmt.Errorf("%v at index %d", err, i) + } + arr = append(arr, elem) + } + val.ValueType = &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: arr}} + // ArrayValues have ExcludeFromIndexes set on the individual items, rather + // than the top-level value. + val.ExcludeFromIndexes = false + default: + rv := reflect.ValueOf(iv) + if !rv.IsValid() { + val.ValueType = &pb.Value_NullValue{} + } else if rv.Kind() == reflect.Ptr { // non-nil pointer: dereference + if rv.IsNil() { + val.ValueType = &pb.Value_NullValue{} + return val, nil + } + return interfaceToProto(rv.Elem().Interface(), noIndex) + } else { + return nil, fmt.Errorf("invalid Value type %T", iv) + } + } + // TODO(jbd): Support EntityValue. + return val, nil +} + +// isEmptyValue is taken from the encoding/json package in the +// standard library. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +// isValidPointerType reports whether a struct field can be a pointer to type t +// for the purposes of saving and loading. +func isValidPointerType(t reflect.Type) bool { + if t == typeOfTime || t == typeOfGeoPoint { + return true + } + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Bool: + return true + case reflect.String: + return true + case reflect.Float32, reflect.Float64: + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/datastore/save_test.go b/vendor/cloud.google.com/go/datastore/save_test.go new file mode 100644 index 0000000..fbef3b7 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/save_test.go @@ -0,0 +1,285 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +func TestInterfaceToProtoNil(t *testing.T) { + // A nil *Key, or a nil value of any other pointer type, should convert to a NullValue. + for _, in := range []interface{}{ + (*Key)(nil), + (*int)(nil), + (*string)(nil), + (*bool)(nil), + (*float64)(nil), + (*GeoPoint)(nil), + (*time.Time)(nil), + } { + got, err := interfaceToProto(in, false) + if err != nil { + t.Fatalf("%T: %v", in, err) + } + _, ok := got.ValueType.(*pb.Value_NullValue) + if !ok { + t.Errorf("%T: got: %T\nwant: %T", in, got.ValueType, &pb.Value_NullValue{}) + } + } +} + +func TestSaveEntityNested(t *testing.T) { + type WithKey struct { + X string + I int + K *Key `datastore:"__key__"` + } + + type NestedWithKey struct { + Y string + N WithKey + } + + type WithoutKey struct { + X string + I int + } + + type NestedWithoutKey struct { + Y string + N WithoutKey + } + + type a struct { + S string + } + + type UnexpAnonym struct { + a + } + + testCases := []struct { + desc string + src interface{} + key *Key + want *pb.Entity + }{ + { + desc: "nested entity with key", + src: &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: testKey1a, + }, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Key: keyToProto(testKey1a), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + }, + { + desc: "nested entity with incomplete key", + src: &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: incompleteKey, + }, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Key: keyToProto(incompleteKey), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + }, + { + desc: "nested entity without key", + src: &NestedWithoutKey{ + Y: "yyy", + N: WithoutKey{ + X: "two", + I: 2, + }, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + }, + { + desc: "key at top level", + src: &WithKey{ + X: "three", + I: 3, + K: testKey0, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "three"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + }, + }, + }, + { + desc: "nested unexported anonymous struct field", + src: &UnexpAnonym{ + a{S: "hello"}, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, + }, + }, + }, + } + + for _, tc := range testCases { + got, err := saveEntity(tc.key, tc.src) + if err != nil { + t.Errorf("saveEntity: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, got) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want) + } + } +} + +func TestSavePointers(t *testing.T) { + for _, test := range []struct { + desc string + in interface{} + want []Property + }{ + { + desc: "nil pointers save as nil-valued properties", + in: &Pointers{}, + want: []Property{ + Property{Name: "Pi", Value: nil}, + Property{Name: "Ps", Value: nil}, + Property{Name: "Pb", Value: nil}, + Property{Name: "Pf", Value: nil}, + Property{Name: "Pg", Value: nil}, + Property{Name: "Pt", Value: nil}, + }, + }, + { + desc: "nil omitempty pointers not saved", + in: &PointersOmitEmpty{}, + want: []Property(nil), + }, + { + desc: "non-nil zero-valued pointers save as zero values", + in: populatedPointers(), + want: []Property{ + Property{Name: "Pi", Value: int64(0)}, + Property{Name: "Ps", Value: ""}, + Property{Name: "Pb", Value: false}, + Property{Name: "Pf", Value: 0.0}, + Property{Name: "Pg", Value: GeoPoint{}}, + Property{Name: "Pt", Value: time.Time{}}, + }, + }, + { + desc: "non-nil non-zero-valued pointers save as the appropriate values", + in: func() *Pointers { + p := populatedPointers() + *p.Pi = 1 + *p.Ps = "x" + *p.Pb = true + *p.Pf = 3.14 + *p.Pg = GeoPoint{Lat: 1, Lng: 2} + *p.Pt = time.Unix(100, 0) + return p + }(), + want: []Property{ + Property{Name: "Pi", Value: int64(1)}, + Property{Name: "Ps", Value: "x"}, + Property{Name: "Pb", Value: true}, + Property{Name: "Pf", Value: 3.14}, + Property{Name: "Pg", Value: GeoPoint{Lat: 1, Lng: 2}}, + Property{Name: "Pt", Value: time.Unix(100, 0)}, + }, + }, + } { + got, err := SaveStruct(test.in) + if err != nil { + t.Fatalf("%s: %v", test.desc, err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%s\ngot %#v\nwant %#v\n", test.desc, got, test.want) + } + } +} + +func TestSaveEmptySlice(t *testing.T) { + // Zero-length slice fields are not saved. + for _, slice := range [][]string{nil, {}} { + got, err := SaveStruct(&struct{ S []string }{S: slice}) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("%#v: got %d properties, wanted zero", slice, len(got)) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/testdata/index.yaml b/vendor/cloud.google.com/go/datastore/testdata/index.yaml new file mode 100644 index 0000000..47bc9de --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/testdata/index.yaml @@ -0,0 +1,41 @@ +indexes: + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + direction: desc + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + - name: U \ No newline at end of file diff --git a/vendor/cloud.google.com/go/datastore/time.go b/vendor/cloud.google.com/go/datastore/time.go new file mode 100644 index 0000000..e7f6a19 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/time.go @@ -0,0 +1,36 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "math" + "time" +) + +var ( + minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) + maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) +) + +func toUnixMicro(t time.Time) int64 { + // We cannot use t.UnixNano() / 1e3 because we want to handle times more than + // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot + // be represented in the numerator of a single int64 divide. + return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) +} + +func fromUnixMicro(t int64) time.Time { + return time.Unix(t/1e6, (t%1e6)*1e3) +} diff --git a/vendor/cloud.google.com/go/datastore/time_test.go b/vendor/cloud.google.com/go/datastore/time_test.go new file mode 100644 index 0000000..5cc846c --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/time_test.go @@ -0,0 +1,75 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + "time" +) + +func TestUnixMicro(t *testing.T) { + // Test that all these time.Time values survive a round trip to unix micros. + testCases := []time.Time{ + {}, + time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), + time.Unix(-1e6, -1000), + time.Unix(-1e6, 0), + time.Unix(-1e6, +1000), + time.Unix(-60, -1000), + time.Unix(-60, 0), + time.Unix(-60, +1000), + time.Unix(-1, -1000), + time.Unix(-1, 0), + time.Unix(-1, +1000), + time.Unix(0, -3000), + time.Unix(0, -2000), + time.Unix(0, -1000), + time.Unix(0, 0), + time.Unix(0, +1000), + time.Unix(0, +2000), + time.Unix(+60, -1000), + time.Unix(+60, 0), + time.Unix(+60, +1000), + time.Unix(+1e6, -1000), + time.Unix(+1e6, 0), + time.Unix(+1e6, +1000), + time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), + time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), + time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), + } + for _, tc := range testCases { + got := fromUnixMicro(toUnixMicro(tc)) + if !got.Equal(tc) { + t.Errorf("got %q, want %q", got, tc) + } + } + + // Test that a time.Time that isn't an integral number of microseconds + // is not perfectly reconstructed after a round trip. + t0 := time.Unix(0, 123) + t1 := fromUnixMicro(toUnixMicro(t0)) + if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { + t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) + } +} diff --git a/vendor/cloud.google.com/go/datastore/transaction.go b/vendor/cloud.google.com/go/datastore/transaction.go new file mode 100644 index 0000000..b480d60 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/transaction.go @@ -0,0 +1,409 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// ErrConcurrentTransaction is returned when a transaction is rolled back due +// to a conflict with a concurrent transaction. +var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") + +var errExpiredTransaction = errors.New("datastore: transaction expired") + +type transactionSettings struct { + attempts int + readOnly bool + prevID []byte // ID of the transaction to retry +} + +// newTransactionSettings creates a transactionSettings with a given TransactionOption slice. +// Unconfigured options will be set to default values. +func newTransactionSettings(opts []TransactionOption) *transactionSettings { + s := &transactionSettings{attempts: 3} + for _, o := range opts { + o.apply(s) + } + return s +} + +// TransactionOption configures the way a transaction is executed. +type TransactionOption interface { + apply(*transactionSettings) +} + +// MaxAttempts returns a TransactionOption that overrides the default 3 attempt times. +func MaxAttempts(attempts int) TransactionOption { + return maxAttempts(attempts) +} + +type maxAttempts int + +func (w maxAttempts) apply(s *transactionSettings) { + if w > 0 { + s.attempts = int(w) + } +} + +// ReadOnly is a TransactionOption that marks the transaction as read-only. +var ReadOnly TransactionOption + +func init() { + ReadOnly = readOnly{} +} + +type readOnly struct{} + +func (readOnly) apply(s *transactionSettings) { + s.readOnly = true +} + +// Transaction represents a set of datastore operations to be committed atomically. +// +// Operations are enqueued by calling the Put and Delete methods on Transaction +// (or their Multi-equivalents). These operations are only committed when the +// Commit method is invoked. To ensure consistency, reads must be performed by +// using Transaction's Get method or by using the Transaction method when +// building a query. +// +// A Transaction must be committed or rolled back exactly once. +type Transaction struct { + id []byte + client *Client + ctx context.Context + mutations []*pb.Mutation // The mutations to apply. + pending map[int]*PendingKey // Map from mutation index to incomplete keys pending transaction completion. +} + +// NewTransaction starts a new transaction. +func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (t *Transaction, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.NewTransaction") + defer func() { trace.EndSpan(ctx, err) }() + + for _, o := range opts { + if _, ok := o.(maxAttempts); ok { + return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option") + } + } + return c.newTransaction(ctx, newTransactionSettings(opts)) +} + +func (c *Client) newTransaction(ctx context.Context, s *transactionSettings) (*Transaction, error) { + req := &pb.BeginTransactionRequest{ProjectId: c.dataset} + if s.readOnly { + req.TransactionOptions = &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}}, + } + } else if s.prevID != nil { + req.TransactionOptions = &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{ + PreviousTransaction: s.prevID, + }}, + } + } + resp, err := c.client.BeginTransaction(ctx, req) + if err != nil { + return nil, err + } + return &Transaction{ + id: resp.Transaction, + ctx: ctx, + client: c, + mutations: nil, + pending: make(map[int]*PendingKey), + }, nil +} + +// RunInTransaction runs f in a transaction. f is invoked with a Transaction +// that f should use for all the transaction's datastore operations. +// +// f must not call Commit or Rollback on the provided Transaction. +// +// If f returns nil, RunInTransaction commits the transaction, +// returning the Commit and a nil error if it succeeds. If the commit fails due +// to a conflicting transaction, RunInTransaction retries f with a new +// Transaction. It gives up and returns ErrConcurrentTransaction after three +// failed attempts (or as configured with MaxAttempts). +// +// If f returns non-nil, then the transaction will be rolled back and +// RunInTransaction will return the same error. The function f is not retried. +// +// Note that when f returns, the transaction is not committed. Calling code +// must not assume that any of f's changes have been committed until +// RunInTransaction returns nil. +// +// Since f may be called multiple times, f should usually be idempotent – that +// is, it should have the same result when called multiple times. Note that +// Transaction.Get will append when unmarshalling slice fields, so it is not +// necessarily idempotent. +func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (cmt *Commit, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.RunInTransaction") + defer func() { trace.EndSpan(ctx, err) }() + + settings := newTransactionSettings(opts) + for n := 0; n < settings.attempts; n++ { + tx, err := c.newTransaction(ctx, settings) + if err != nil { + return nil, err + } + if err := f(tx); err != nil { + tx.Rollback() + return nil, err + } + if cmt, err := tx.Commit(); err != ErrConcurrentTransaction { + return cmt, err + } + // Pass this transaction's ID to the retry transaction to preserve + // transaction priority. + if !settings.readOnly { + settings.prevID = tx.id + } + } + return nil, ErrConcurrentTransaction +} + +// Commit applies the enqueued operations atomically. +func (t *Transaction) Commit() (c *Commit, err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Commit") + defer func() { trace.EndSpan(t.ctx, err) }() + + if t.id == nil { + return nil, errExpiredTransaction + } + req := &pb.CommitRequest{ + ProjectId: t.client.dataset, + TransactionSelector: &pb.CommitRequest_Transaction{Transaction: t.id}, + Mutations: t.mutations, + Mode: pb.CommitRequest_TRANSACTIONAL, + } + t.id = nil + resp, err := t.client.client.Commit(t.ctx, req) + if err != nil { + if grpc.Code(err) == codes.Aborted { + return nil, ErrConcurrentTransaction + } + return nil, err + } + + // Copy any newly minted keys into the returned keys. + for i, p := range t.pending { + if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil { + return nil, errors.New("datastore: internal error: server returned the wrong mutation results") + } + key, err := protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + p.key = key + p.commit = c + } + + return c, nil +} + +// Rollback abandons a pending transaction. +func (t *Transaction) Rollback() (err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Rollback") + defer func() { trace.EndSpan(t.ctx, err) }() + + if t.id == nil { + return errExpiredTransaction + } + id := t.id + t.id = nil + _, err = t.client.client.Rollback(t.ctx, &pb.RollbackRequest{ + ProjectId: t.client.dataset, + Transaction: id, + }) + return err +} + +// Get is the transaction-specific version of the package function Get. +// All reads performed during the transaction will come from a single consistent +// snapshot. Furthermore, if the transaction is set to a serializable isolation +// level, another transaction cannot concurrently modify the data that is read +// or modified by this transaction. +func (t *Transaction) Get(key *Key, dst interface{}) (err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Get") + defer func() { trace.EndSpan(t.ctx, err) }() + + opts := &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, + } + err = t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +func (t *Transaction) GetMulti(keys []*Key, dst interface{}) (err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.GetMulti") + defer func() { trace.EndSpan(t.ctx, err) }() + + if t.id == nil { + return errExpiredTransaction + } + opts := &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, + } + return t.client.get(t.ctx, keys, dst, opts) +} + +// Put is the transaction-specific version of the package function Put. +// +// Put returns a PendingKey which can be resolved into a Key using the +// return value from a successful Commit. If key is an incomplete key, the +// returned pending key will resolve to a unique key generated by the +// datastore. +func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { + h, err := t.PutMulti([]*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return h[0], nil +} + +// PutMulti is a batch version of Put. One PendingKey is returned for each +// element of src in the same order. +// TODO(jba): rewrite in terms of Mutate. +func (t *Transaction) PutMulti(keys []*Key, src interface{}) (ret []*PendingKey, err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.PutMulti") + defer func() { trace.EndSpan(t.ctx, err) }() + + if t.id == nil { + return nil, errExpiredTransaction + } + mutations, err := putMutations(keys, src) + if err != nil { + return nil, err + } + origin := len(t.mutations) + t.mutations = append(t.mutations, mutations...) + + // Prepare the returned handles, pre-populating where possible. + ret = make([]*PendingKey, len(keys)) + for i, key := range keys { + p := &PendingKey{} + if key.Incomplete() { + // This key will be in the final commit result. + t.pending[origin+i] = p + } else { + p.key = key + } + ret[i] = p + } + + return ret, nil +} + +// Delete is the transaction-specific version of the package function Delete. +// Delete enqueues the deletion of the entity for the given key, to be +// committed atomically upon calling Commit. +func (t *Transaction) Delete(key *Key) error { + err := t.DeleteMulti([]*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +// TODO(jba): rewrite in terms of Mutate. +func (t *Transaction) DeleteMulti(keys []*Key) (err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.DeleteMulti") + defer func() { trace.EndSpan(t.ctx, err) }() + + if t.id == nil { + return errExpiredTransaction + } + mutations, err := deleteMutations(keys) + if err != nil { + return err + } + t.mutations = append(t.mutations, mutations...) + return nil +} + +// Mutate adds the mutations to the transaction. They will all be applied atomically +// upon calling Commit. Mutate returns a PendingKey for each Mutation in the argument +// list, in the same order. PendingKeys for Delete mutations are always nil. +// +// If any of the mutations are invalid, Mutate returns a MultiError with the errors. +// Mutate returns a MultiError in this case even if there is only one Mutation. +// +// For an example, see Client.Mutate. +func (t *Transaction) Mutate(muts ...*Mutation) ([]*PendingKey, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + pmuts, err := mutationProtos(muts) + if err != nil { + return nil, err + } + origin := len(t.mutations) + t.mutations = append(t.mutations, pmuts...) + // Prepare the returned handles, pre-populating where possible. + ret := make([]*PendingKey, len(muts)) + for i, mut := range muts { + if mut.isDelete() { + continue + } + p := &PendingKey{} + if mut.key.Incomplete() { + // This key will be in the final commit result. + t.pending[origin+i] = p + } else { + p.key = mut.key + } + ret[i] = p + } + return ret, nil +} + +// Commit represents the result of a committed transaction. +type Commit struct{} + +// Key resolves a pending key handle into a final key. +func (c *Commit) Key(p *PendingKey) *Key { + if p == nil { // if called on a *PendingKey from a Delete mutation + return nil + } + // If p.commit is nil, the PendingKey did not come from an incomplete key, + // so p.key is valid. + if p.commit != nil && c != p.commit { + panic("PendingKey was not created by corresponding transaction") + } + return p.key +} + +// PendingKey represents the key for newly-inserted entity. It can be +// resolved into a Key by calling the Key method of Commit. +type PendingKey struct { + key *Key + commit *Commit +} diff --git a/vendor/cloud.google.com/go/datastore/transaction_test.go b/vendor/cloud.google.com/go/datastore/transaction_test.go new file mode 100644 index 0000000..1655f5f --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/transaction_test.go @@ -0,0 +1,78 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +func TestNewTransaction(t *testing.T) { + var got *pb.BeginTransactionRequest + client := &Client{ + dataset: "project", + client: &fakeDatastoreClient{ + beginTransaction: func(req *pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) { + got = req + return &pb.BeginTransactionResponse{ + Transaction: []byte("tid"), + }, nil + }, + }, + } + ctx := context.Background() + for _, test := range []struct { + settings *transactionSettings + want *pb.BeginTransactionRequest + }{ + { + &transactionSettings{}, + &pb.BeginTransactionRequest{ProjectId: "project"}, + }, + { + &transactionSettings{readOnly: true}, + &pb.BeginTransactionRequest{ + ProjectId: "project", + TransactionOptions: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}}, + }, + }, + }, + { + &transactionSettings{prevID: []byte("tid")}, + &pb.BeginTransactionRequest{ + ProjectId: "project", + TransactionOptions: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{ + PreviousTransaction: []byte("tid"), + }, + }, + }, + }, + }, + } { + _, err := client.newTransaction(ctx, test.settings) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, test.want) { + t.Errorf("%+v:\ngot %+v\nwant %+v", test.settings, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go new file mode 100644 index 0000000..48bf06c --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go @@ -0,0 +1,215 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Controller2CallOptions contains the retry settings for each method of Controller2Client. +type Controller2CallOptions struct { + RegisterDebuggee []gax.CallOption + ListActiveBreakpoints []gax.CallOption + UpdateActiveBreakpoint []gax.CallOption +} + +func defaultController2ClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouddebugger.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultController2CallOptions() *Controller2CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &Controller2CallOptions{ + RegisterDebuggee: retry[[2]string{"default", "non_idempotent"}], + ListActiveBreakpoints: retry[[2]string{"default", "idempotent"}], + UpdateActiveBreakpoint: retry[[2]string{"default", "idempotent"}], + } +} + +// Controller2Client is a client for interacting with Stackdriver Debugger API. +type Controller2Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + controller2Client clouddebuggerpb.Controller2Client + + // The call options for this service. + CallOptions *Controller2CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewController2Client creates a new controller2 client. +// +// The Controller service provides the API for orchestrating a collection of +// debugger agents to perform debugging tasks. These agents are each attached +// to a process of an application which may include one or more replicas. +// +// The debugger agents register with the Controller to identify the application +// being debugged, the Debuggee. All agents that register with the same data, +// represent the same Debuggee, and are assigned the same debuggee_id. +// +// The debugger agents call the Controller to retrieve the list of active +// Breakpoints. Agents with the same debuggee_id get the same breakpoints +// list. An agent that can fulfill the breakpoint request updates the +// Controller with the breakpoint result. The controller selects the first +// result received and discards the rest of the results. +// Agents that poll again for active breakpoints will no longer have +// the completed breakpoint in the list and should remove that breakpoint from +// their attached process. +// +// The Controller service does not provide a way to retrieve the results of +// a completed breakpoint. This functionality is available using the Debugger +// service. +func NewController2Client(ctx context.Context, opts ...option.ClientOption) (*Controller2Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultController2ClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Controller2Client{ + conn: conn, + CallOptions: defaultController2CallOptions(), + + controller2Client: clouddebuggerpb.NewController2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Controller2Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Controller2Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// RegisterDebuggee registers the debuggee with the controller service. +// +// All agents attached to the same application must call this method with +// exactly the same request content to get back the same stable debuggee_id. +// Agents should call this method again whenever google.rpc.Code.NOT_FOUND +// is returned from any controller method. +// +// This protocol allows the controller service to disable debuggees, recover +// from data loss, or change the debuggee_id format. Agents must handle +// debuggee_id value changing upon re-registration. +func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...) + var resp *clouddebuggerpb.RegisterDebuggeeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.controller2Client.RegisterDebuggee(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListActiveBreakpoints returns the list of all active breakpoints for the debuggee. +// +// The breakpoint specification (location, condition, and expressions +// fields) is semantically immutable, although the field values may +// change. For example, an agent may update the location line number +// to reflect the actual line where the breakpoint was set, but this +// doesn't change the breakpoint semantics. +// +// This means that an agent does not need to check if a breakpoint has changed +// when it encounters the same breakpoint on a successive call. +// Moreover, an agent should remember the breakpoints that are completed +// until the controller removes them from the active list to avoid +// setting those breakpoints again. +func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...) + var resp *clouddebuggerpb.ListActiveBreakpointsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.controller2Client.ListActiveBreakpoints(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateActiveBreakpoint updates the breakpoint state or mutable fields. +// The entire Breakpoint message must be sent back to the controller service. +// +// Updates to active breakpoint fields are only allowed if the new value +// does not change the breakpoint specification. Updates to the location, +// condition and expressions fields should not alter the breakpoint +// semantics. These may only make changes such as canonicalizing a value +// or snapping the location to the correct line of code. +func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...) + var resp *clouddebuggerpb.UpdateActiveBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.controller2Client.UpdateActiveBreakpoint(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go new file mode 100644 index 0000000..4ea1860 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go @@ -0,0 +1,87 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger_test + +import ( + "cloud.google.com/go/debugger/apiv2" + "golang.org/x/net/context" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +func ExampleNewController2Client() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleController2Client_RegisterDebuggee() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.RegisterDebuggeeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RegisterDebuggee(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleController2Client_ListActiveBreakpoints() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListActiveBreakpointsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListActiveBreakpoints(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleController2Client_UpdateActiveBreakpoint() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.UpdateActiveBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateActiveBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go new file mode 100644 index 0000000..86dda36 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go @@ -0,0 +1,211 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Debugger2CallOptions contains the retry settings for each method of Debugger2Client. +type Debugger2CallOptions struct { + SetBreakpoint []gax.CallOption + GetBreakpoint []gax.CallOption + DeleteBreakpoint []gax.CallOption + ListBreakpoints []gax.CallOption + ListDebuggees []gax.CallOption +} + +func defaultDebugger2ClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouddebugger.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultDebugger2CallOptions() *Debugger2CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &Debugger2CallOptions{ + SetBreakpoint: retry[[2]string{"default", "non_idempotent"}], + GetBreakpoint: retry[[2]string{"default", "idempotent"}], + DeleteBreakpoint: retry[[2]string{"default", "idempotent"}], + ListBreakpoints: retry[[2]string{"default", "idempotent"}], + ListDebuggees: retry[[2]string{"default", "idempotent"}], + } +} + +// Debugger2Client is a client for interacting with Stackdriver Debugger API. +type Debugger2Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + debugger2Client clouddebuggerpb.Debugger2Client + + // The call options for this service. + CallOptions *Debugger2CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewDebugger2Client creates a new debugger2 client. +// +// The Debugger service provides the API that allows users to collect run-time +// information from a running application, without stopping or slowing it down +// and without modifying its state. An application may include one or +// more replicated processes performing the same work. +// +// A debugged application is represented using the Debuggee concept. The +// Debugger service provides a way to query for available debuggees, but does +// not provide a way to create one. A debuggee is created using the Controller +// service, usually by running a debugger agent with the application. +// +// The Debugger service enables the client to set one or more Breakpoints on a +// Debuggee and collect the results of the set Breakpoints. +func NewDebugger2Client(ctx context.Context, opts ...option.ClientOption) (*Debugger2Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultDebugger2ClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Debugger2Client{ + conn: conn, + CallOptions: defaultDebugger2CallOptions(), + + debugger2Client: clouddebuggerpb.NewDebugger2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Debugger2Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Debugger2Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// SetBreakpoint sets the breakpoint to the debuggee. +func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...) + var resp *clouddebuggerpb.SetBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.debugger2Client.SetBreakpoint(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetBreakpoint gets breakpoint information. +func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...) + var resp *clouddebuggerpb.GetBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.debugger2Client.GetBreakpoint(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteBreakpoint deletes the breakpoint from the debuggee. +func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.debugger2Client.DeleteBreakpoint(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListBreakpoints lists all breakpoints for the debuggee. +func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...) + var resp *clouddebuggerpb.ListBreakpointsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.debugger2Client.ListBreakpoints(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDebuggees lists all the debuggees that the user has access to. +func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...) + var resp *clouddebuggerpb.ListDebuggeesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.debugger2Client.ListDebuggees(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go new file mode 100644 index 0000000..b7f72e9 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go @@ -0,0 +1,121 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger_test + +import ( + "cloud.google.com/go/debugger/apiv2" + "golang.org/x/net/context" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +func ExampleNewDebugger2Client() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleDebugger2Client_SetBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.SetBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_GetBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.GetBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_DeleteBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.DeleteBreakpointRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDebugger2Client_ListBreakpoints() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListBreakpointsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListBreakpoints(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_ListDebuggees() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListDebuggeesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListDebuggees(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/doc.go b/vendor/cloud.google.com/go/debugger/apiv2/doc.go new file mode 100644 index 0000000..56402af --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/doc.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package debugger is an auto-generated package for the +// Stackdriver Debugger API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Examines the call stack and variables of a running application +// without stopping or slowing it down. +// +// Use the client at cloud.google.com/go/cmd/go-cloud-debug-agent in preference to this. +package debugger // import "cloud.google.com/go/debugger/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud_debugger", + } +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go b/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go new file mode 100644 index 0000000..5059154 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go @@ -0,0 +1,693 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDebugger2Server struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouddebuggerpb.Debugger2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDebugger2Server) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest) (*clouddebuggerpb.SetBreakpointResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.SetBreakpointResponse), nil +} + +func (s *mockDebugger2Server) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest) (*clouddebuggerpb.GetBreakpointResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.GetBreakpointResponse), nil +} + +func (s *mockDebugger2Server) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDebugger2Server) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest) (*clouddebuggerpb.ListBreakpointsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListBreakpointsResponse), nil +} + +func (s *mockDebugger2Server) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest) (*clouddebuggerpb.ListDebuggeesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListDebuggeesResponse), nil +} + +type mockController2Server struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouddebuggerpb.Controller2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockController2Server) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.RegisterDebuggeeResponse), nil +} + +func (s *mockController2Server) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListActiveBreakpointsResponse), nil +} + +func (s *mockController2Server) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.UpdateActiveBreakpointResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDebugger2 mockDebugger2Server + mockController2 mockController2Server +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + clouddebuggerpb.RegisterDebugger2Server(serv, &mockDebugger2) + clouddebuggerpb.RegisterController2Server(serv, &mockController2) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDebugger2SetBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.SetBreakpointResponse = &clouddebuggerpb.SetBreakpointResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.SetBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2SetBreakpointError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.SetBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetBreakpoint(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2GetBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.GetBreakpointResponse = &clouddebuggerpb.GetBreakpointResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.GetBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2GetBreakpointError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.GetBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetBreakpoint(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2DeleteBreakpoint(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.DeleteBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDebugger2DeleteBreakpointError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.DeleteBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteBreakpoint(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDebugger2ListBreakpoints(t *testing.T) { + var nextWaitToken string = "nextWaitToken1006864251" + var expectedResponse = &clouddebuggerpb.ListBreakpointsResponse{ + NextWaitToken: nextWaitToken, + } + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListBreakpointsRequest{ + DebuggeeId: debuggeeId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListBreakpoints(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2ListBreakpointsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListBreakpointsRequest{ + DebuggeeId: debuggeeId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListBreakpoints(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2ListDebuggees(t *testing.T) { + var expectedResponse *clouddebuggerpb.ListDebuggeesResponse = &clouddebuggerpb.ListDebuggeesResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var project string = "project-309310695" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListDebuggeesRequest{ + Project: project, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDebuggees(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2ListDebuggeesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var project string = "project-309310695" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListDebuggeesRequest{ + Project: project, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDebuggees(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2RegisterDebuggee(t *testing.T) { + var expectedResponse *clouddebuggerpb.RegisterDebuggeeResponse = &clouddebuggerpb.RegisterDebuggeeResponse{} + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{} + var request = &clouddebuggerpb.RegisterDebuggeeRequest{ + Debuggee: debuggee, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RegisterDebuggee(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2RegisterDebuggeeError(t *testing.T) { + errCode := codes.PermissionDenied + mockController2.err = gstatus.Error(errCode, "test error") + + var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{} + var request = &clouddebuggerpb.RegisterDebuggeeRequest{ + Debuggee: debuggee, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RegisterDebuggee(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2ListActiveBreakpoints(t *testing.T) { + var nextWaitToken string = "nextWaitToken1006864251" + var waitExpired bool = false + var expectedResponse = &clouddebuggerpb.ListActiveBreakpointsResponse{ + NextWaitToken: nextWaitToken, + WaitExpired: waitExpired, + } + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var request = &clouddebuggerpb.ListActiveBreakpointsRequest{ + DebuggeeId: debuggeeId, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListActiveBreakpoints(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2ListActiveBreakpointsError(t *testing.T) { + errCode := codes.PermissionDenied + mockController2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var request = &clouddebuggerpb.ListActiveBreakpointsRequest{ + DebuggeeId: debuggeeId, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListActiveBreakpoints(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2UpdateActiveBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.UpdateActiveBreakpointResponse = &clouddebuggerpb.UpdateActiveBreakpointResponse{} + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var request = &clouddebuggerpb.UpdateActiveBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateActiveBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2UpdateActiveBreakpointError(t *testing.T) { + errCode := codes.PermissionDenied + mockController2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var request = &clouddebuggerpb.UpdateActiveBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateActiveBreakpoint(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go b/vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go new file mode 100644 index 0000000..3167650 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go @@ -0,0 +1,812 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + InspectContent []gax.CallOption + RedactImage []gax.CallOption + DeidentifyContent []gax.CallOption + ReidentifyContent []gax.CallOption + ListInfoTypes []gax.CallOption + CreateInspectTemplate []gax.CallOption + UpdateInspectTemplate []gax.CallOption + GetInspectTemplate []gax.CallOption + ListInspectTemplates []gax.CallOption + DeleteInspectTemplate []gax.CallOption + CreateDeidentifyTemplate []gax.CallOption + UpdateDeidentifyTemplate []gax.CallOption + GetDeidentifyTemplate []gax.CallOption + ListDeidentifyTemplates []gax.CallOption + DeleteDeidentifyTemplate []gax.CallOption + CreateDlpJob []gax.CallOption + ListDlpJobs []gax.CallOption + GetDlpJob []gax.CallOption + DeleteDlpJob []gax.CallOption + CancelDlpJob []gax.CallOption + ListJobTriggers []gax.CallOption + GetJobTrigger []gax.CallOption + DeleteJobTrigger []gax.CallOption + UpdateJobTrigger []gax.CallOption + CreateJobTrigger []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("dlp.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + InspectContent: retry[[2]string{"default", "idempotent"}], + RedactImage: retry[[2]string{"default", "idempotent"}], + DeidentifyContent: retry[[2]string{"default", "idempotent"}], + ReidentifyContent: retry[[2]string{"default", "idempotent"}], + ListInfoTypes: retry[[2]string{"default", "idempotent"}], + CreateInspectTemplate: retry[[2]string{"default", "non_idempotent"}], + UpdateInspectTemplate: retry[[2]string{"default", "non_idempotent"}], + GetInspectTemplate: retry[[2]string{"default", "idempotent"}], + ListInspectTemplates: retry[[2]string{"default", "idempotent"}], + DeleteInspectTemplate: retry[[2]string{"default", "idempotent"}], + CreateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}], + UpdateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}], + GetDeidentifyTemplate: retry[[2]string{"default", "idempotent"}], + ListDeidentifyTemplates: retry[[2]string{"default", "idempotent"}], + DeleteDeidentifyTemplate: retry[[2]string{"default", "idempotent"}], + CreateDlpJob: retry[[2]string{"default", "non_idempotent"}], + ListDlpJobs: retry[[2]string{"default", "idempotent"}], + GetDlpJob: retry[[2]string{"default", "idempotent"}], + DeleteDlpJob: retry[[2]string{"default", "idempotent"}], + CancelDlpJob: retry[[2]string{"default", "non_idempotent"}], + ListJobTriggers: retry[[2]string{"default", "idempotent"}], + GetJobTrigger: retry[[2]string{"default", "idempotent"}], + DeleteJobTrigger: retry[[2]string{"default", "idempotent"}], + UpdateJobTrigger: retry[[2]string{"default", "non_idempotent"}], + CreateJobTrigger: retry[[2]string{"default", "non_idempotent"}], + } +} + +// Client is a client for interacting with Cloud Data Loss Prevention (DLP) API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client dlppb.DlpServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new dlp service client. +// +// The Cloud Data Loss Prevention (DLP) API is a service that allows clients +// to detect the presence of Personally Identifiable Information (PII) and other +// privacy-sensitive data in user-supplied, unstructured data streams, like text +// blocks or images. +// The service also includes methods for sensitive data redaction and +// scheduling of data scans on Google Cloud Platform based data sets. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: dlppb.NewDlpServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// InspectContent finds potentially sensitive info in content. +// This method has limits on input size, processing time, and output size. +// How-to guide for text (at /dlp/docs/inspecting-text), How-to guide for +// images (at /dlp/docs/inspecting-images) +func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...) + var resp *dlppb.InspectContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.InspectContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RedactImage redacts potentially sensitive info from an image. +// This method has limits on input size, processing time, and output size. +// How-to guide (at /dlp/docs/redacting-sensitive-data-images) +func (c *Client) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest, opts ...gax.CallOption) (*dlppb.RedactImageResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RedactImage[0:len(c.CallOptions.RedactImage):len(c.CallOptions.RedactImage)], opts...) + var resp *dlppb.RedactImageResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RedactImage(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeidentifyContent de-identifies potentially sensitive info from a ContentItem. +// This method has limits on input size and output size. +// How-to guide (at /dlp/docs/deidentify-sensitive-data) +func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...) + var resp *dlppb.DeidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ReidentifyContent re-identifies content that has been de-identified. +func (c *Client) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest, opts ...gax.CallOption) (*dlppb.ReidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ReidentifyContent[0:len(c.CallOptions.ReidentifyContent):len(c.CallOptions.ReidentifyContent)], opts...) + var resp *dlppb.ReidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ReidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInfoTypes returns a list of the sensitive information types that the DLP API +// supports. For more information, see Listing supported predefined +// infoTypes (at /dlp/docs/listing-infotypes). +func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...) + var resp *dlppb.ListInfoTypesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateInspectTemplate creates an inspect template for re-using frequently used configuration +// for inspecting content, images, and storage. +func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateInspectTemplate[0:len(c.CallOptions.CreateInspectTemplate):len(c.CallOptions.CreateInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateInspectTemplate updates the inspect template. +func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateInspectTemplate[0:len(c.CallOptions.UpdateInspectTemplate):len(c.CallOptions.UpdateInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetInspectTemplate gets an inspect template. +func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetInspectTemplate[0:len(c.CallOptions.GetInspectTemplate):len(c.CallOptions.GetInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInspectTemplates lists inspect templates. +func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest, opts ...gax.CallOption) *InspectTemplateIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInspectTemplates[0:len(c.CallOptions.ListInspectTemplates):len(c.CallOptions.ListInspectTemplates)], opts...) + it := &InspectTemplateIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.InspectTemplate, string, error) { + var resp *dlppb.ListInspectTemplatesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInspectTemplates(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.InspectTemplates, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteInspectTemplate deletes an inspect template. +func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteInspectTemplate[0:len(c.CallOptions.DeleteInspectTemplate):len(c.CallOptions.DeleteInspectTemplate)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateDeidentifyTemplate creates a de-identify template for re-using frequently used configuration +// for Deidentifying content, images, and storage. +func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDeidentifyTemplate[0:len(c.CallOptions.CreateDeidentifyTemplate):len(c.CallOptions.CreateDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDeidentifyTemplate updates the de-identify template. +func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateDeidentifyTemplate[0:len(c.CallOptions.UpdateDeidentifyTemplate):len(c.CallOptions.UpdateDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetDeidentifyTemplate gets a de-identify template. +func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDeidentifyTemplate[0:len(c.CallOptions.GetDeidentifyTemplate):len(c.CallOptions.GetDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDeidentifyTemplates lists de-identify templates. +func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest, opts ...gax.CallOption) *DeidentifyTemplateIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDeidentifyTemplates[0:len(c.CallOptions.ListDeidentifyTemplates):len(c.CallOptions.ListDeidentifyTemplates)], opts...) + it := &DeidentifyTemplateIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DeidentifyTemplate, string, error) { + var resp *dlppb.ListDeidentifyTemplatesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDeidentifyTemplates(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.DeidentifyTemplates, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteDeidentifyTemplate deletes a de-identify template. +func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDeidentifyTemplate[0:len(c.CallOptions.DeleteDeidentifyTemplate):len(c.CallOptions.DeleteDeidentifyTemplate)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateDlpJob creates a new job to inspect storage or calculate risk metrics How-to +// guide (at /dlp/docs/compute-risk-analysis). +func (c *Client) CreateDlpJob(ctx context.Context, req *dlppb.CreateDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDlpJob[0:len(c.CallOptions.CreateDlpJob):len(c.CallOptions.CreateDlpJob)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDlpJobs lists DlpJobs that match the specified filter in the request. +func (c *Client) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest, opts ...gax.CallOption) *DlpJobIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDlpJobs[0:len(c.CallOptions.ListDlpJobs):len(c.CallOptions.ListDlpJobs)], opts...) + it := &DlpJobIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DlpJob, string, error) { + var resp *dlppb.ListDlpJobsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDlpJobs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Jobs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetDlpJob gets the latest state of a long-running DlpJob. +func (c *Client) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDlpJob[0:len(c.CallOptions.GetDlpJob):len(c.CallOptions.GetDlpJob)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteDlpJob deletes a long-running DlpJob. This method indicates that the client is +// no longer interested in the DlpJob result. The job will be cancelled if +// possible. +func (c *Client) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDlpJob[0:len(c.CallOptions.DeleteDlpJob):len(c.CallOptions.DeleteDlpJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CancelDlpJob starts asynchronous cancellation on a long-running DlpJob. The server +// makes a best effort to cancel the DlpJob, but success is not +// guaranteed. +func (c *Client) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelDlpJob[0:len(c.CallOptions.CancelDlpJob):len(c.CallOptions.CancelDlpJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.CancelDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListJobTriggers lists job triggers. +func (c *Client) ListJobTriggers(ctx context.Context, req *dlppb.ListJobTriggersRequest, opts ...gax.CallOption) *JobTriggerIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListJobTriggers[0:len(c.CallOptions.ListJobTriggers):len(c.CallOptions.ListJobTriggers)], opts...) + it := &JobTriggerIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.JobTrigger, string, error) { + var resp *dlppb.ListJobTriggersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListJobTriggers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.JobTriggers, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetJobTrigger gets a job trigger. +func (c *Client) GetJobTrigger(ctx context.Context, req *dlppb.GetJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetJobTrigger[0:len(c.CallOptions.GetJobTrigger):len(c.CallOptions.GetJobTrigger)], opts...) + var resp *dlppb.JobTrigger + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetJobTrigger(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteJobTrigger deletes a job trigger. +func (c *Client) DeleteJobTrigger(ctx context.Context, req *dlppb.DeleteJobTriggerRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteJobTrigger[0:len(c.CallOptions.DeleteJobTrigger):len(c.CallOptions.DeleteJobTrigger)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteJobTrigger(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// UpdateJobTrigger updates a job trigger. +func (c *Client) UpdateJobTrigger(ctx context.Context, req *dlppb.UpdateJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateJobTrigger[0:len(c.CallOptions.UpdateJobTrigger):len(c.CallOptions.UpdateJobTrigger)], opts...) + var resp *dlppb.JobTrigger + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateJobTrigger(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateJobTrigger creates a job trigger to run DLP actions such as scanning storage for +// sensitive information on a set schedule. +func (c *Client) CreateJobTrigger(ctx context.Context, req *dlppb.CreateJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateJobTrigger[0:len(c.CallOptions.CreateJobTrigger):len(c.CallOptions.CreateJobTrigger)], opts...) + var resp *dlppb.JobTrigger + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateJobTrigger(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeidentifyTemplateIterator manages a stream of *dlppb.DeidentifyTemplate. +type DeidentifyTemplateIterator struct { + items []*dlppb.DeidentifyTemplate + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DeidentifyTemplate, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DeidentifyTemplateIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DeidentifyTemplateIterator) Next() (*dlppb.DeidentifyTemplate, error) { + var item *dlppb.DeidentifyTemplate + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DeidentifyTemplateIterator) bufLen() int { + return len(it.items) +} + +func (it *DeidentifyTemplateIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// DlpJobIterator manages a stream of *dlppb.DlpJob. +type DlpJobIterator struct { + items []*dlppb.DlpJob + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DlpJob, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DlpJobIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DlpJobIterator) Next() (*dlppb.DlpJob, error) { + var item *dlppb.DlpJob + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DlpJobIterator) bufLen() int { + return len(it.items) +} + +func (it *DlpJobIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InspectTemplateIterator manages a stream of *dlppb.InspectTemplate. +type InspectTemplateIterator struct { + items []*dlppb.InspectTemplate + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.InspectTemplate, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InspectTemplateIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InspectTemplateIterator) Next() (*dlppb.InspectTemplate, error) { + var item *dlppb.InspectTemplate + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InspectTemplateIterator) bufLen() int { + return len(it.items) +} + +func (it *InspectTemplateIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// JobTriggerIterator manages a stream of *dlppb.JobTrigger. +type JobTriggerIterator struct { + items []*dlppb.JobTrigger + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.JobTrigger, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *JobTriggerIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *JobTriggerIterator) Next() (*dlppb.JobTrigger, error) { + var item *dlppb.JobTrigger + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *JobTriggerIterator) bufLen() int { + return len(it.items) +} + +func (it *JobTriggerIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go b/vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go new file mode 100644 index 0000000..9a0f129 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go @@ -0,0 +1,498 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp_test + +import ( + "cloud.google.com/go/dlp/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_InspectContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.InspectContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.InspectContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_RedactImage() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.RedactImageRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RedactImage(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ReidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ReidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ReidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInfoTypes() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInfoTypesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListInfoTypes(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_CreateInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInspectTemplates() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInspectTemplatesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInspectTemplates(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDeidentifyTemplates() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListDeidentifyTemplatesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDeidentifyTemplates(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateDlpJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDlpJobs() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListDlpJobsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDlpJobs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_GetDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetDlpJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteDlpJobRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CancelDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CancelDlpJobRequest{ + // TODO: Fill request struct fields. + } + err = c.CancelDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_ListJobTriggers() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListJobTriggersRequest{ + // TODO: Fill request struct fields. + } + it := c.ListJobTriggers(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_GetJobTrigger() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetJobTriggerRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetJobTrigger(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteJobTrigger() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteJobTriggerRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteJobTrigger(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_UpdateJobTrigger() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateJobTriggerRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateJobTrigger(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_CreateJobTrigger() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateJobTriggerRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateJobTrigger(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2/doc.go b/vendor/cloud.google.com/go/dlp/apiv2/doc.go new file mode 100644 index 0000000..943a05e --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2/doc.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package dlp is an auto-generated package for the +// Cloud Data Loss Prevention (DLP) API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Provides methods for detection of privacy-sensitive fragments in text, +// images, and Google Cloud Platform storage repositories. +package dlp // import "cloud.google.com/go/dlp/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2/mock_test.go b/vendor/cloud.google.com/go/dlp/apiv2/mock_test.go new file mode 100644 index 0000000..cda3a74 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2/mock_test.go @@ -0,0 +1,1902 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDlpServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + dlppb.DlpServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectContentResponse), nil +} + +func (s *mockDlpServer) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest) (*dlppb.RedactImageResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.RedactImageResponse), nil +} + +func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyContentResponse), nil +} + +func (s *mockDlpServer) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest) (*dlppb.ReidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ReidentifyContentResponse), nil +} + +func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInfoTypesResponse), nil +} + +func (s *mockDlpServer) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest) (*dlppb.ListInspectTemplatesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInspectTemplatesResponse), nil +} + +func (s *mockDlpServer) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest) (*dlppb.ListDeidentifyTemplatesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListDeidentifyTemplatesResponse), nil +} + +func (s *mockDlpServer) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CreateJobTrigger(ctx context.Context, req *dlppb.CreateJobTriggerRequest) (*dlppb.JobTrigger, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.JobTrigger), nil +} + +func (s *mockDlpServer) UpdateJobTrigger(ctx context.Context, req *dlppb.UpdateJobTriggerRequest) (*dlppb.JobTrigger, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.JobTrigger), nil +} + +func (s *mockDlpServer) GetJobTrigger(ctx context.Context, req *dlppb.GetJobTriggerRequest) (*dlppb.JobTrigger, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.JobTrigger), nil +} + +func (s *mockDlpServer) ListJobTriggers(ctx context.Context, req *dlppb.ListJobTriggersRequest) (*dlppb.ListJobTriggersResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListJobTriggersResponse), nil +} + +func (s *mockDlpServer) DeleteJobTrigger(ctx context.Context, req *dlppb.DeleteJobTriggerRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CreateDlpJob(ctx context.Context, req *dlppb.CreateDlpJobRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest) (*dlppb.ListDlpJobsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListDlpJobsResponse), nil +} + +func (s *mockDlpServer) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDlp mockDlpServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + dlppb.RegisterDlpServiceServer(serv, &mockDlp) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDlpServiceInspectContent(t *testing.T) { + var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceInspectContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceRedactImage(t *testing.T) { + var redactedImage []byte = []byte("28") + var extractedText string = "extractedText998260012" + var expectedResponse = &dlppb.RedactImageResponse{ + RedactedImage: redactedImage, + ExtractedText: extractedText, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.RedactImageRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactImage(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceRedactImageError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.RedactImageRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactImage(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeidentifyContent(t *testing.T) { + var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.DeidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceDeidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.DeidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceReidentifyContent(t *testing.T) { + var expectedResponse *dlppb.ReidentifyContentResponse = &dlppb.ReidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ReidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceReidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ReidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInfoTypes(t *testing.T) { + var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInfoTypesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceCreateInspectTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateInspectTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateInspectTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceUpdateInspectTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.UpdateInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.UpdateInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetInspectTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInspectTemplates(t *testing.T) { + var nextPageToken string = "" + var inspectTemplatesElement *dlppb.InspectTemplate = &dlppb.InspectTemplate{} + var inspectTemplates = []*dlppb.InspectTemplate{inspectTemplatesElement} + var expectedResponse = &dlppb.ListInspectTemplatesResponse{ + NextPageToken: nextPageToken, + InspectTemplates: inspectTemplates, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListInspectTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectTemplates(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.InspectTemplates[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInspectTemplatesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListInspectTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectTemplates(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteInspectTemplate(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.DeleteInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.DeleteInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCreateDeidentifyTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateDeidentifyTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateDeidentifyTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceUpdateDeidentifyTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.UpdateDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.UpdateDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetDeidentifyTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.GetDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.GetDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListDeidentifyTemplates(t *testing.T) { + var nextPageToken string = "" + var deidentifyTemplatesElement *dlppb.DeidentifyTemplate = &dlppb.DeidentifyTemplate{} + var deidentifyTemplates = []*dlppb.DeidentifyTemplate{deidentifyTemplatesElement} + var expectedResponse = &dlppb.ListDeidentifyTemplatesResponse{ + NextPageToken: nextPageToken, + DeidentifyTemplates: deidentifyTemplates, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListDeidentifyTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.DeidentifyTemplates[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListDeidentifyTemplatesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListDeidentifyTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteDeidentifyTemplate(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.DeleteDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.DeleteDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCreateDlpJob(t *testing.T) { + var name string = "name3373707" + var jobTriggerName string = "jobTriggerName1819490804" + var expectedResponse = &dlppb.DlpJob{ + Name: name, + JobTriggerName: jobTriggerName, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.CreateDlpJobRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.CreateDlpJobRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListDlpJobs(t *testing.T) { + var nextPageToken string = "" + var jobsElement *dlppb.DlpJob = &dlppb.DlpJob{} + var jobs = []*dlppb.DlpJob{jobsElement} + var expectedResponse = &dlppb.ListDlpJobsResponse{ + NextPageToken: nextPageToken, + Jobs: jobs, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListDlpJobsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDlpJobs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Jobs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListDlpJobsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListDlpJobsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDlpJobs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetDlpJob(t *testing.T) { + var name2 string = "name2-1052831874" + var jobTriggerName string = "jobTriggerName1819490804" + var expectedResponse = &dlppb.DlpJob{ + Name: name2, + JobTriggerName: jobTriggerName, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.GetDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.GetDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteDlpJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.DeleteDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.DeleteDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCancelDlpJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.CancelDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceCancelDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.CancelDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceListJobTriggers(t *testing.T) { + var nextPageToken string = "" + var jobTriggersElement *dlppb.JobTrigger = &dlppb.JobTrigger{} + var jobTriggers = []*dlppb.JobTrigger{jobTriggersElement} + var expectedResponse = &dlppb.ListJobTriggersResponse{ + NextPageToken: nextPageToken, + JobTriggers: jobTriggers, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListJobTriggersRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListJobTriggers(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.JobTriggers[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListJobTriggersError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListJobTriggersRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListJobTriggers(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetJobTrigger(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.JobTrigger{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/jobTriggers/%s", "[PROJECT]", "[JOB_TRIGGER]") + var request = &dlppb.GetJobTriggerRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetJobTrigger(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetJobTriggerError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/jobTriggers/%s", "[PROJECT]", "[JOB_TRIGGER]") + var request = &dlppb.GetJobTriggerRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetJobTrigger(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteJobTrigger(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var name string = "name3373707" + var request = &dlppb.DeleteJobTriggerRequest{ + Name: name, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteJobTrigger(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteJobTriggerError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var request = &dlppb.DeleteJobTriggerRequest{ + Name: name, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteJobTrigger(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceUpdateJobTrigger(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.JobTrigger{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/jobTriggers/%s", "[PROJECT]", "[JOB_TRIGGER]") + var request = &dlppb.UpdateJobTriggerRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateJobTrigger(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateJobTriggerError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/jobTriggers/%s", "[PROJECT]", "[JOB_TRIGGER]") + var request = &dlppb.UpdateJobTriggerRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateJobTrigger(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceCreateJobTrigger(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.JobTrigger{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.CreateJobTriggerRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateJobTrigger(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateJobTriggerError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.CreateJobTriggerRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateJobTrigger(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go new file mode 100644 index 0000000..3c66669 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestDlpServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var minLikelihood dlppb.Likelihood = dlppb.Likelihood_POSSIBLE + var inspectConfig = &dlppb.InspectConfig{ + MinLikelihood: minLikelihood, + } + var type_ string = "text/plain" + var value string = "my phone number is 215-512-1212" + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var request = &dlppb.InspectContentRequest{ + InspectConfig: inspectConfig, + Items: items, + } + + if _, err := c.InspectContent(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go new file mode 100644 index 0000000..f61f214 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go @@ -0,0 +1,429 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + InspectContent []gax.CallOption + RedactContent []gax.CallOption + DeidentifyContent []gax.CallOption + AnalyzeDataSourceRisk []gax.CallOption + CreateInspectOperation []gax.CallOption + ListInspectFindings []gax.CallOption + ListInfoTypes []gax.CallOption + ListRootCategories []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("dlp.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + InspectContent: retry[[2]string{"default", "non_idempotent"}], + RedactContent: retry[[2]string{"default", "non_idempotent"}], + DeidentifyContent: retry[[2]string{"default", "idempotent"}], + AnalyzeDataSourceRisk: retry[[2]string{"default", "idempotent"}], + CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}], + ListInspectFindings: retry[[2]string{"default", "idempotent"}], + ListInfoTypes: retry[[2]string{"default", "idempotent"}], + ListRootCategories: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with DLP API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client dlppb.DlpServiceClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new dlp service client. +// +// The DLP API is a service that allows clients +// to detect the presence of Personally Identifiable Information (PII) and other +// privacy-sensitive data in user-supplied, unstructured data streams, like text +// blocks or images. +// The service also includes methods for sensitive data redaction and +// scheduling of data scans on Google Cloud Platform based data sets. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: dlppb.NewDlpServiceClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// InspectContent finds potentially sensitive info in a list of strings. +// This method has limits on input size, processing time, and output size. +func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...) + var resp *dlppb.InspectContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.InspectContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RedactContent redacts potentially sensitive info from a list of strings. +// This method has limits on input size, processing time, and output size. +func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...) + var resp *dlppb.RedactContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RedactContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeidentifyContent de-identifies potentially sensitive info from a list of strings. +// This method has limits on input size and output size. +func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...) + var resp *dlppb.DeidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google +// Cloud Platform repository. +func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AnalyzeDataSourceRiskOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data +// repository. +func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateInspectOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CreateInspectOperationHandle{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// ListInspectFindings returns list of results for given inspect operation result set id. +func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...) + var resp *dlppb.ListInspectFindingsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInspectFindings(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInfoTypes returns sensitive information types for given category. +func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...) + var resp *dlppb.ListInfoTypesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListRootCategories returns the list of root categories of sensitive information. +func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...) + var resp *dlppb.ListRootCategoriesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListRootCategories(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeDataSourceRiskOperation manages a long-running operation from AnalyzeDataSourceRisk. +type AnalyzeDataSourceRiskOperation struct { + lro *longrunning.Operation +} + +// AnalyzeDataSourceRiskOperation returns a new AnalyzeDataSourceRiskOperation from a given name. +// The name must be that of a previously created AnalyzeDataSourceRiskOperation, possibly from a different process. +func (c *Client) AnalyzeDataSourceRiskOperation(name string) *AnalyzeDataSourceRiskOperation { + return &AnalyzeDataSourceRiskOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AnalyzeDataSourceRiskOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) { + var resp dlppb.RiskAnalysisOperationResult + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AnalyzeDataSourceRiskOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) { + var resp dlppb.RiskAnalysisOperationResult + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AnalyzeDataSourceRiskOperation) Metadata() (*dlppb.RiskAnalysisOperationMetadata, error) { + var meta dlppb.RiskAnalysisOperationMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AnalyzeDataSourceRiskOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AnalyzeDataSourceRiskOperation) Name() string { + return op.lro.Name() +} + +// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation. +type CreateInspectOperationHandle struct { + lro *longrunning.Operation +} + +// CreateInspectOperationHandle returns a new CreateInspectOperationHandle from a given name. +// The name must be that of a previously created CreateInspectOperationHandle, possibly from a different process. +func (c *Client) CreateInspectOperationHandle(name string) *CreateInspectOperationHandle { + return &CreateInspectOperationHandle{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) { + var resp dlppb.InspectOperationResult + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CreateInspectOperationHandle) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) { + var resp dlppb.InspectOperationResult + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CreateInspectOperationHandle) Metadata() (*dlppb.InspectOperationMetadata, error) { + var meta dlppb.InspectOperationMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CreateInspectOperationHandle) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CreateInspectOperationHandle) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go new file mode 100644 index 0000000..d33fbd3 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go @@ -0,0 +1,187 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp_test + +import ( + "cloud.google.com/go/dlp/apiv2beta1" + "golang.org/x/net/context" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_InspectContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.InspectContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.InspectContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_RedactContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.RedactContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RedactContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeDataSourceRisk() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.AnalyzeDataSourceRiskRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AnalyzeDataSourceRisk(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_CreateInspectOperation() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateInspectOperationRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateInspectOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInspectFindings() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInspectFindingsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListInspectFindings(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInfoTypes() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInfoTypesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListInfoTypes(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListRootCategories() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListRootCategoriesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListRootCategories(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go new file mode 100644 index 0000000..28a2d82 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package dlp is an auto-generated package for the +// DLP API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// The Google Data Loss Prevention API provides methods for detection of +// privacy-sensitive fragments in text, images, and Google Cloud Platform +// storage repositories. +package dlp // import "cloud.google.com/go/dlp/apiv2beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go new file mode 100644 index 0000000..1adb03b --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go @@ -0,0 +1,844 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDlpServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + dlppb.DlpServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectContentResponse), nil +} + +func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest) (*dlppb.RedactContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.RedactContentResponse), nil +} + +func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyContentResponse), nil +} + +func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInspectFindingsResponse), nil +} + +func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInfoTypesResponse), nil +} + +func (s *mockDlpServer) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest) (*dlppb.ListRootCategoriesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListRootCategoriesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDlp mockDlpServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + dlppb.RegisterDlpServiceServer(serv, &mockDlp) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDlpServiceInspectContent(t *testing.T) { + var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var type_ string = "text/plain" + var value string = "My email is example@example.com." + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var request = &dlppb.InspectContentRequest{ + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceInspectContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var type_ string = "text/plain" + var value string = "My email is example@example.com." + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var request = &dlppb.InspectContentRequest{ + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceRedactContent(t *testing.T) { + var expectedResponse *dlppb.RedactContentResponse = &dlppb.RedactContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var type_ string = "text/plain" + var value string = "My email is example@example.com." + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var name2 string = "EMAIL_ADDRESS" + var infoType = &dlppb.InfoType{ + Name: name2, + } + var replaceWith string = "REDACTED" + var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{ + InfoType: infoType, + ReplaceWith: replaceWith, + } + var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement} + var request = &dlppb.RedactContentRequest{ + InspectConfig: inspectConfig, + Items: items, + ReplaceConfigs: replaceConfigs, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceRedactContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var type_ string = "text/plain" + var value string = "My email is example@example.com." + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var name2 string = "EMAIL_ADDRESS" + var infoType = &dlppb.InfoType{ + Name: name2, + } + var replaceWith string = "REDACTED" + var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{ + InfoType: infoType, + ReplaceWith: replaceWith, + } + var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement} + var request = &dlppb.RedactContentRequest{ + InspectConfig: inspectConfig, + Items: items, + ReplaceConfigs: replaceConfigs, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeidentifyContent(t *testing.T) { + var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{} + var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} + var items []*dlppb.ContentItem = nil + var request = &dlppb.DeidentifyContentRequest{ + DeidentifyConfig: deidentifyConfig, + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceDeidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{} + var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} + var items []*dlppb.ContentItem = nil + var request = &dlppb.DeidentifyContentRequest{ + DeidentifyConfig: deidentifyConfig, + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) { + var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{} + + mockDlp.err = nil + mockDlp.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{} + var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{} + var request = &dlppb.AnalyzeDataSourceRiskRequest{ + PrivacyMetric: privacyMetric, + SourceTable: sourceTable, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = nil + mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{} + var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{} + var request = &dlppb.AnalyzeDataSourceRiskRequest{ + PrivacyMetric: privacyMetric, + SourceTable: sourceTable, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceCreateInspectOperation(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &dlppb.InspectOperationResult{ + Name: name2, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var url string = "gs://example_bucket/example_file.png" + var fileSet = &dlppb.CloudStorageOptions_FileSet{ + Url: url, + } + var cloudStorageOptions = &dlppb.CloudStorageOptions{ + FileSet: fileSet, + } + var storageConfig = &dlppb.StorageConfig{ + Type: &dlppb.StorageConfig_CloudStorageOptions{ + CloudStorageOptions: cloudStorageOptions, + }, + } + var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{} + var request = &dlppb.CreateInspectOperationRequest{ + InspectConfig: inspectConfig, + StorageConfig: storageConfig, + OutputConfig: outputConfig, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInspectOperation(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateInspectOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = nil + mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var url string = "gs://example_bucket/example_file.png" + var fileSet = &dlppb.CloudStorageOptions_FileSet{ + Url: url, + } + var cloudStorageOptions = &dlppb.CloudStorageOptions{ + FileSet: fileSet, + } + var storageConfig = &dlppb.StorageConfig{ + Type: &dlppb.StorageConfig_CloudStorageOptions{ + CloudStorageOptions: cloudStorageOptions, + }, + } + var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{} + var request = &dlppb.CreateInspectOperationRequest{ + InspectConfig: inspectConfig, + StorageConfig: storageConfig, + OutputConfig: outputConfig, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInspectOperation(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInspectFindings(t *testing.T) { + var nextPageToken string = "nextPageToken-1530815211" + var expectedResponse = &dlppb.ListInspectFindingsResponse{ + NextPageToken: nextPageToken, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]") + var request = &dlppb.ListInspectFindingsRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectFindings(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInspectFindingsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]") + var request = &dlppb.ListInspectFindingsRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectFindings(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInfoTypes(t *testing.T) { + var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var category string = "PII" + var languageCode string = "en" + var request = &dlppb.ListInfoTypesRequest{ + Category: category, + LanguageCode: languageCode, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInfoTypesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var category string = "PII" + var languageCode string = "en" + var request = &dlppb.ListInfoTypesRequest{ + Category: category, + LanguageCode: languageCode, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListRootCategories(t *testing.T) { + var expectedResponse *dlppb.ListRootCategoriesResponse = &dlppb.ListRootCategoriesResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var languageCode string = "en" + var request = &dlppb.ListRootCategoriesRequest{ + LanguageCode: languageCode, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListRootCategories(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListRootCategoriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var languageCode string = "en" + var request = &dlppb.ListRootCategoriesRequest{ + LanguageCode: languageCode, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListRootCategories(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/path_funcs.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/path_funcs.go new file mode 100644 index 0000000..2450034 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/path_funcs.go @@ -0,0 +1,27 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dlp + +// ResultPath returns the path for the result resource. +// +// Deprecated: Use +// fmt.Sprintf("inspect/results/%s", result) +// instead. +func ResultPath(result string) string { + return "" + + "inspect/results/" + + result + + "" +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go new file mode 100644 index 0000000..4aadfbd --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestReportErrorsServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewReportErrorsClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var formattedProjectName string = fmt.Sprintf("projects/%s", projectId) + var message string = "[MESSAGE]" + var service string = "[SERVICE]" + var serviceContext = &clouderrorreportingpb.ServiceContext{ + Service: service, + } + var filePath string = "path/to/file.lang" + var lineNumber int32 = 42 + var functionName string = "meaningOfLife" + var reportLocation = &clouderrorreportingpb.SourceLocation{ + FilePath: filePath, + LineNumber: lineNumber, + FunctionName: functionName, + } + var context_ = &clouderrorreportingpb.ErrorContext{ + ReportLocation: reportLocation, + } + var event = &clouderrorreportingpb.ReportedErrorEvent{ + Message: message, + ServiceContext: serviceContext, + Context: context_, + } + var request = &clouderrorreportingpb.ReportErrorEventRequest{ + ProjectName: formattedProjectName, + Event: event, + } + + if _, err := c.ReportErrorEvent(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go new file mode 100644 index 0000000..b78e1e3 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package errorreporting is an auto-generated package for the +// Stackdriver Error Reporting API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Stackdriver Error Reporting groups and counts similar errors from cloud +// services. The Stackdriver Error Reporting API provides a way to report new +// errors and read access to error groups and their associated errors. +// +// Use the client at cloud.google.com/go/errorreporting in preference to this. +package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go new file mode 100644 index 0000000..e915fe5 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go @@ -0,0 +1,151 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient. +type ErrorGroupCallOptions struct { + GetGroup []gax.CallOption + UpdateGroup []gax.CallOption +} + +func defaultErrorGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultErrorGroupCallOptions() *ErrorGroupCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ErrorGroupCallOptions{ + GetGroup: retry[[2]string{"default", "idempotent"}], + UpdateGroup: retry[[2]string{"default", "idempotent"}], + } +} + +// ErrorGroupClient is a client for interacting with Stackdriver Error Reporting API. +type ErrorGroupClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + errorGroupClient clouderrorreportingpb.ErrorGroupServiceClient + + // The call options for this service. + CallOptions *ErrorGroupCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewErrorGroupClient creates a new error group service client. +// +// Service for retrieving and updating individual error groups. +func NewErrorGroupClient(ctx context.Context, opts ...option.ClientOption) (*ErrorGroupClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultErrorGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ErrorGroupClient{ + conn: conn, + CallOptions: defaultErrorGroupCallOptions(), + + errorGroupClient: clouderrorreportingpb.NewErrorGroupServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ErrorGroupClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ErrorGroupClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// GetGroup get the specified group. +func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) + var resp *clouderrorreportingpb.ErrorGroup + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorGroupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup replace the data for the specified group. +// Fails if the group does not exist. +func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) + var resp *clouderrorreportingpb.ErrorGroup + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorGroupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go new file mode 100644 index 0000000..b846a1e --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go @@ -0,0 +1,69 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewErrorGroupClient() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleErrorGroupClient_GetGroup() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.GetGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleErrorGroupClient_UpdateGroup() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.UpdateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go new file mode 100644 index 0000000..0d70b00 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go @@ -0,0 +1,293 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient. +type ErrorStatsCallOptions struct { + ListGroupStats []gax.CallOption + ListEvents []gax.CallOption + DeleteEvents []gax.CallOption +} + +func defaultErrorStatsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultErrorStatsCallOptions() *ErrorStatsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ErrorStatsCallOptions{ + ListGroupStats: retry[[2]string{"default", "idempotent"}], + ListEvents: retry[[2]string{"default", "idempotent"}], + DeleteEvents: retry[[2]string{"default", "idempotent"}], + } +} + +// ErrorStatsClient is a client for interacting with Stackdriver Error Reporting API. +type ErrorStatsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + errorStatsClient clouderrorreportingpb.ErrorStatsServiceClient + + // The call options for this service. + CallOptions *ErrorStatsCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewErrorStatsClient creates a new error stats service client. +// +// An API for retrieving and managing error statistics as well as data for +// individual events. +func NewErrorStatsClient(ctx context.Context, opts ...option.ClientOption) (*ErrorStatsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultErrorStatsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ErrorStatsClient{ + conn: conn, + CallOptions: defaultErrorStatsCallOptions(), + + errorStatsClient: clouderrorreportingpb.NewErrorStatsServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ErrorStatsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ErrorStatsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListGroupStats lists the specified groups. +func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...) + it := &ErrorGroupStatsIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) { + var resp *clouderrorreportingpb.ListGroupStatsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorStatsClient.ListGroupStats(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ErrorGroupStats, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListEvents lists the specified events. +func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...) + it := &ErrorEventIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) { + var resp *clouderrorreportingpb.ListEventsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorStatsClient.ListEvents(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ErrorEvents, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteEvents deletes all error events of a given project. +func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...) + var resp *clouderrorreportingpb.DeleteEventsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorStatsClient.DeleteEvents(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ErrorEventIterator manages a stream of *clouderrorreportingpb.ErrorEvent. +type ErrorEventIterator struct { + items []*clouderrorreportingpb.ErrorEvent + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorEvent, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ErrorEventIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ErrorEventIterator) Next() (*clouderrorreportingpb.ErrorEvent, error) { + var item *clouderrorreportingpb.ErrorEvent + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ErrorEventIterator) bufLen() int { + return len(it.items) +} + +func (it *ErrorEventIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ErrorGroupStatsIterator manages a stream of *clouderrorreportingpb.ErrorGroupStats. +type ErrorGroupStatsIterator struct { + items []*clouderrorreportingpb.ErrorGroupStats + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorGroupStats, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ErrorGroupStatsIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ErrorGroupStatsIterator) Next() (*clouderrorreportingpb.ErrorGroupStats, error) { + var item *clouderrorreportingpb.ErrorGroupStats + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ErrorGroupStatsIterator) bufLen() int { + return len(it.items) +} + +func (it *ErrorGroupStatsIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go new file mode 100644 index 0000000..6f1858c --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go @@ -0,0 +1,100 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewErrorStatsClient() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleErrorStatsClient_ListGroupStats() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ListGroupStatsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroupStats(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleErrorStatsClient_ListEvents() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ListEventsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListEvents(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleErrorStatsClient_DeleteEvents() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.DeleteEventsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeleteEvents(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go new file mode 100644 index 0000000..870dc22 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go @@ -0,0 +1,587 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockErrorGroupServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ErrorGroupServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockErrorGroupServer) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil +} + +func (s *mockErrorGroupServer) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil +} + +type mockErrorStatsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ErrorStatsServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockErrorStatsServer) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) (*clouderrorreportingpb.ListGroupStatsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ListGroupStatsResponse), nil +} + +func (s *mockErrorStatsServer) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest) (*clouderrorreportingpb.ListEventsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ListEventsResponse), nil +} + +func (s *mockErrorStatsServer) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.DeleteEventsResponse), nil +} + +type mockReportErrorsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ReportErrorsServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockReportErrorsServer) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ReportErrorEventResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockErrorGroup mockErrorGroupServer + mockErrorStats mockErrorStatsServer + mockReportErrors mockReportErrorsServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + clouderrorreportingpb.RegisterErrorGroupServiceServer(serv, &mockErrorGroup) + clouderrorreportingpb.RegisterErrorStatsServiceServer(serv, &mockErrorStats) + clouderrorreportingpb.RegisterReportErrorsServiceServer(serv, &mockReportErrors) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestErrorGroupServiceGetGroup(t *testing.T) { + var name string = "name3373707" + var groupId string = "groupId506361563" + var expectedResponse = &clouderrorreportingpb.ErrorGroup{ + Name: name, + GroupId: groupId, + } + + mockErrorGroup.err = nil + mockErrorGroup.reqs = nil + + mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse) + + var formattedGroupName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]") + var request = &clouderrorreportingpb.GetGroupRequest{ + GroupName: formattedGroupName, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorGroupServiceGetGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorGroup.err = gstatus.Error(errCode, "test error") + + var formattedGroupName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]") + var request = &clouderrorreportingpb.GetGroupRequest{ + GroupName: formattedGroupName, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorGroupServiceUpdateGroup(t *testing.T) { + var name string = "name3373707" + var groupId string = "groupId506361563" + var expectedResponse = &clouderrorreportingpb.ErrorGroup{ + Name: name, + GroupId: groupId, + } + + mockErrorGroup.err = nil + mockErrorGroup.reqs = nil + + mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse) + + var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{} + var request = &clouderrorreportingpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorGroupServiceUpdateGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorGroup.err = gstatus.Error(errCode, "test error") + + var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{} + var request = &clouderrorreportingpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceListGroupStats(t *testing.T) { + var nextPageToken string = "" + var errorGroupStatsElement *clouderrorreportingpb.ErrorGroupStats = &clouderrorreportingpb.ErrorGroupStats{} + var errorGroupStats = []*clouderrorreportingpb.ErrorGroupStats{errorGroupStatsElement} + var expectedResponse = &clouderrorreportingpb.ListGroupStatsResponse{ + NextPageToken: nextPageToken, + ErrorGroupStats: errorGroupStats, + } + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{} + var request = &clouderrorreportingpb.ListGroupStatsRequest{ + ProjectName: formattedProjectName, + TimeRange: timeRange, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupStats(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ErrorGroupStats[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceListGroupStatsError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorStats.err = gstatus.Error(errCode, "test error") + + var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{} + var request = &clouderrorreportingpb.ListGroupStatsRequest{ + ProjectName: formattedProjectName, + TimeRange: timeRange, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupStats(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceListEvents(t *testing.T) { + var nextPageToken string = "" + var errorEventsElement *clouderrorreportingpb.ErrorEvent = &clouderrorreportingpb.ErrorEvent{} + var errorEvents = []*clouderrorreportingpb.ErrorEvent{errorEventsElement} + var expectedResponse = &clouderrorreportingpb.ListEventsResponse{ + NextPageToken: nextPageToken, + ErrorEvents: errorEvents, + } + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var groupId string = "groupId506361563" + var request = &clouderrorreportingpb.ListEventsRequest{ + ProjectName: formattedProjectName, + GroupId: groupId, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListEvents(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ErrorEvents[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceListEventsError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorStats.err = gstatus.Error(errCode, "test error") + + var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var groupId string = "groupId506361563" + var request = &clouderrorreportingpb.ListEventsRequest{ + ProjectName: formattedProjectName, + GroupId: groupId, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListEvents(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceDeleteEvents(t *testing.T) { + var expectedResponse *clouderrorreportingpb.DeleteEventsResponse = &clouderrorreportingpb.DeleteEventsResponse{} + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &clouderrorreportingpb.DeleteEventsRequest{ + ProjectName: formattedProjectName, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteEvents(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceDeleteEventsError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorStats.err = gstatus.Error(errCode, "test error") + + var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &clouderrorreportingpb.DeleteEventsRequest{ + ProjectName: formattedProjectName, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteEvents(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestReportErrorsServiceReportErrorEvent(t *testing.T) { + var expectedResponse *clouderrorreportingpb.ReportErrorEventResponse = &clouderrorreportingpb.ReportErrorEventResponse{} + + mockReportErrors.err = nil + mockReportErrors.reqs = nil + + mockReportErrors.resps = append(mockReportErrors.resps[:0], expectedResponse) + + var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{} + var request = &clouderrorreportingpb.ReportErrorEventRequest{ + ProjectName: formattedProjectName, + Event: event, + } + + c, err := NewReportErrorsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReportErrorEvent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockReportErrors.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestReportErrorsServiceReportErrorEventError(t *testing.T) { + errCode := codes.PermissionDenied + mockReportErrors.err = gstatus.Error(errCode, "test error") + + var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{} + var request = &clouderrorreportingpb.ReportErrorEventRequest{ + ProjectName: formattedProjectName, + Event: event, + } + + c, err := NewReportErrorsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReportErrorEvent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/path_funcs.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/path_funcs.go new file mode 100644 index 0000000..5ca5e92 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/path_funcs.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errorreporting + +// ResultPath returns the path for the result resource. +// +// Deprecated: Use +// fmt.Sprintf("inspect/results/%s", result) +// instead. +func ResultPath(result string) string { + return "" + + "inspect/results/" + + result + + "" +} + +// ErrorStatsProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ErrorStatsProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// ReportErrorsProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ReportErrorsProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go new file mode 100644 index 0000000..0d3440a --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go @@ -0,0 +1,122 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient. +type ReportErrorsCallOptions struct { + ReportErrorEvent []gax.CallOption +} + +func defaultReportErrorsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultReportErrorsCallOptions() *ReportErrorsCallOptions { + retry := map[[2]string][]gax.CallOption{} + return &ReportErrorsCallOptions{ + ReportErrorEvent: retry[[2]string{"default", "non_idempotent"}], + } +} + +// ReportErrorsClient is a client for interacting with Stackdriver Error Reporting API. +type ReportErrorsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + reportErrorsClient clouderrorreportingpb.ReportErrorsServiceClient + + // The call options for this service. + CallOptions *ReportErrorsCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewReportErrorsClient creates a new report errors service client. +// +// An API for reporting error events. +func NewReportErrorsClient(ctx context.Context, opts ...option.ClientOption) (*ReportErrorsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultReportErrorsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ReportErrorsClient{ + conn: conn, + CallOptions: defaultReportErrorsCallOptions(), + + reportErrorsClient: clouderrorreportingpb.NewReportErrorsServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ReportErrorsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ReportErrorsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ReportErrorEvent report an individual error event. +// +// This endpoint accepts either an OAuth token, +// or an +// API key +// for authentication. To use an API key, append it to the URL as the value of +// a key parameter. For example:
POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456
+func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...) + var resp *clouderrorreportingpb.ReportErrorEventResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.reportErrorsClient.ReportErrorEvent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go new file mode 100644 index 0000000..999ad8c --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewReportErrorsClient() { + ctx := context.Background() + c, err := errorreporting.NewReportErrorsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleReportErrorsClient_ReportErrorEvent() { + ctx := context.Background() + c, err := errorreporting.NewReportErrorsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ReportErrorEventRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ReportErrorEvent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/errors.go b/vendor/cloud.google.com/go/errorreporting/errors.go new file mode 100644 index 0000000..744057c --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/errors.go @@ -0,0 +1,230 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package errorreporting is a Google Stackdriver Error Reporting library. +// +// This package is still experimental and subject to change. +// +// See https://cloud.google.com/error-reporting/ for more information. +package errorreporting // import "cloud.google.com/go/errorreporting" + +import ( + "bytes" + "fmt" + "log" + "net/http" + "runtime" + "time" + + api "cloud.google.com/go/errorreporting/apiv1beta1" + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +const ( + userAgent = `gcloud-golang-errorreporting/20160701` +) + +// Config is additional configuration for Client. +type Config struct { + // ServiceName identifies the running program and is included in the error reports. + // Optional. + ServiceName string + + // ServiceVersion identifies the version of the running program and is + // included in the error reports. + // Optional. + ServiceVersion string + + // OnError is the function to call if any background + // tasks errored. By default, errors are logged. + OnError func(err error) +} + +// Entry holds information about the reported error. +type Entry struct { + Error error + Req *http.Request // if error is associated with a request. + Stack []byte // if user does not provide a stack trace, runtime.Stack will be called +} + +// Client represents a Google Cloud Error Reporting client. +type Client struct { + projectID string + apiClient client + serviceContext erpb.ServiceContext + bundler *bundler.Bundler + + onErrorFn func(err error) +} + +var newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) { + client, err := api.NewReportErrorsClient(ctx, opts...) + if err != nil { + return nil, err + } + client.SetGoogleClientInfo("gccl", version.Repo) + return client, nil +} + +// NewClient returns a new error reporting client. Generally you will want +// to create a client on program initialization and use it through the lifetime +// of the process. +func NewClient(ctx context.Context, projectID string, cfg Config, opts ...option.ClientOption) (*Client, error) { + if cfg.ServiceName == "" { + cfg.ServiceName = "goapp" + } + c, err := newClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("creating client: %v", err) + } + + client := &Client{ + apiClient: c, + projectID: "projects/" + projectID, + serviceContext: erpb.ServiceContext{ + Service: cfg.ServiceName, + Version: cfg.ServiceVersion, + }, + } + bundler := bundler.NewBundler((*erpb.ReportErrorEventRequest)(nil), func(bundle interface{}) { + reqs := bundle.([]*erpb.ReportErrorEventRequest) + for _, req := range reqs { + _, err = client.apiClient.ReportErrorEvent(ctx, req) + if err != nil { + client.onError(fmt.Errorf("failed to upload: %v", err)) + } + } + }) + // TODO(jbd): Optimize bundler limits. + bundler.DelayThreshold = 2 * time.Second + bundler.BundleCountThreshold = 100 + bundler.BundleByteThreshold = 1000 + bundler.BundleByteLimit = 1000 + bundler.BufferedByteLimit = 10000 + client.bundler = bundler + return client, nil +} + +func (c *Client) onError(err error) { + if c.onErrorFn != nil { + c.onErrorFn(err) + return + } + log.Println(err) +} + +// Close closes any resources held by the client. +// Close should be called when the client is no longer needed. +// It need not be called at program exit. +func (c *Client) Close() error { + return c.apiClient.Close() +} + +// Report writes an error report. It doesn't block. Errors in +// writing the error report can be handled via Client.OnError. +func (c *Client) Report(e Entry) { + var stack string + if e.Stack != nil { + stack = string(e.Stack) + } + req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack) + c.bundler.Add(req, 1) +} + +// ReportSync writes an error report. It blocks until the entry is written. +func (c *Client) ReportSync(ctx context.Context, e Entry) error { + var stack string + if e.Stack != nil { + stack = string(e.Stack) + } + req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack) + _, err := c.apiClient.ReportErrorEvent(ctx, req) + return err +} + +// Flush blocks until all currently buffered error reports are sent. +// +// If any errors occurred since the last call to Flush, or the +// creation of the client if this is the first call, then Flush report the +// error via the (*Client).OnError handler. +func (c *Client) Flush() { + c.bundler.Flush() +} + +func (c *Client) makeReportErrorEventRequest(r *http.Request, msg string, stack string) *erpb.ReportErrorEventRequest { + if stack == "" { + // limit the stack trace to 16k. + var buf [16 * 1024]byte + stack = chopStack(buf[0:runtime.Stack(buf[:], false)]) + } + message := msg + "\n" + stack + + var errorContext *erpb.ErrorContext + if r != nil { + errorContext = &erpb.ErrorContext{ + HttpRequest: &erpb.HttpRequestContext{ + Method: r.Method, + Url: r.Host + r.RequestURI, + UserAgent: r.UserAgent(), + Referrer: r.Referer(), + RemoteIp: r.RemoteAddr, + }, + } + } + return &erpb.ReportErrorEventRequest{ + ProjectName: c.projectID, + Event: &erpb.ReportedErrorEvent{ + EventTime: ptypes.TimestampNow(), + ServiceContext: &c.serviceContext, + Message: message, + Context: errorContext, + }, + } +} + +// chopStack trims a stack trace so that the function which panics or calls +// Report is first. +func chopStack(s []byte) string { + f := []byte("cloud.google.com/go/errorreporting.(*Client).Report") + + lfFirst := bytes.IndexByte(s, '\n') + if lfFirst == -1 { + return string(s) + } + stack := s[lfFirst:] + panicLine := bytes.Index(stack, f) + if panicLine == -1 { + return string(s) + } + stack = stack[panicLine+1:] + for i := 0; i < 2; i++ { + nextLine := bytes.IndexByte(stack, '\n') + if nextLine == -1 { + return string(s) + } + stack = stack[nextLine+1:] + } + return string(s[:lfFirst+1]) + string(stack) +} + +type client interface { + ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) + Close() error +} diff --git a/vendor/cloud.google.com/go/errorreporting/errors_test.go b/vendor/cloud.google.com/go/errorreporting/errors_test.go new file mode 100644 index 0000000..7673e76 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/errors_test.go @@ -0,0 +1,113 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errorreporting + +import ( + "errors" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + "google.golang.org/api/option" + erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +type fakeReportErrorsClient struct { + req *erpb.ReportErrorEventRequest + fail bool + doneCh chan struct{} +} + +func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) { + defer func() { + close(c.doneCh) + }() + if c.fail { + return nil, errors.New("request failed") + } + c.req = req + return &erpb.ReportErrorEventResponse{}, nil +} + +func (c *fakeReportErrorsClient) Close() error { + return nil +} + +func newFakeReportErrorsClient() *fakeReportErrorsClient { + c := &fakeReportErrorsClient{} + c.doneCh = make(chan struct{}) + return c +} + +func newTestClient(c *fakeReportErrorsClient) *Client { + newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) { + return c, nil + } + t, err := NewClient(context.Background(), testutil.ProjID(), Config{ + ServiceName: "myservice", + ServiceVersion: "v1.0", + }) + if err != nil { + panic(err) + } + return t +} + +func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, fn string) { + if req.Event.ServiceContext.Service != "myservice" { + t.Errorf("error report didn't contain service name") + } + if req.Event.ServiceContext.Version != "v1.0" { + t.Errorf("error report didn't contain version name") + } + if !strings.Contains(req.Event.Message, "error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(req.Event.Message, fn) { + t.Errorf("error report didn't contain stack trace") + } +} + +func TestReport(t *testing.T) { + fc := newFakeReportErrorsClient() + c := newTestClient(fc) + c.Report(Entry{Error: errors.New("error")}) + + <-fc.doneCh + r := fc.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, r, "errorreporting.TestReport") +} +func TestReportSync(t *testing.T) { + ctx := context.Background() + fc := newFakeReportErrorsClient() + c := newTestClient(fc) + if err := c.ReportSync(ctx, Entry{Error: errors.New("error")}); err != nil { + t.Fatalf("cannot upload errors: %v", err) + } + + <-fc.doneCh + r := fc.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, r, "errorreporting.TestReport") +} diff --git a/vendor/cloud.google.com/go/errorreporting/example_test.go b/vendor/cloud.google.com/go/errorreporting/example_test.go new file mode 100644 index 0000000..0744558 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/example_test.go @@ -0,0 +1,49 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errorreporting_test + +import ( + "errors" + "log" + + "cloud.google.com/go/errorreporting" + "golang.org/x/net/context" +) + +func Example() { + // Create the client. + ctx := context.Background() + ec, err := errorreporting.NewClient(ctx, "my-gcp-project", errorreporting.Config{ + ServiceName: "myservice", + ServiceVersion: "v1.0", + }) + defer func() { + if err := ec.Close(); err != nil { + log.Printf("failed to report errors to Stackdriver: %v", err) + } + }() + + // Report an error. + err = doSomething() + if err != nil { + ec.Report(errorreporting.Entry{ + Error: err, + }) + } +} + +func doSomething() error { + return errors.New("something went wrong") +} diff --git a/vendor/cloud.google.com/go/errorreporting/stack_test.go b/vendor/cloud.google.com/go/errorreporting/stack_test.go new file mode 100644 index 0000000..92ece0b --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/stack_test.go @@ -0,0 +1,56 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errorreporting + +import "testing" + +func TestChopStack(t *testing.T) { + for _, test := range []struct { + name string + in []byte + expected string + }{ + { + name: "Report", + in: []byte(` goroutine 39 [running]: +runtime/debug.Stack() + /gopath/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errorreporting.(*Client).logInternal() + /gopath/cloud.google.com/go/errorreporting/errors.go:259 +0x18b +cloud.google.com/go/errorreporting.(*Client).Report() + /gopath/cloud.google.com/go/errorreporting/errors.go:248 +0x4ed +cloud.google.com/go/errorreporting.TestReport() + /gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1 +testing.tRunner() + /gopath/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/testing/testing.go:646 +0x2ec +`), + expected: ` goroutine 39 [running]: +cloud.google.com/go/errorreporting.TestReport() + /gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1 +testing.tRunner() + /gopath/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/testing/testing.go:646 +0x2ec +`, + }, + } { + out := chopStack(test.in) + if out != test.expected { + t.Errorf("case %q: chopStack(%q): got %q want %q", test.name, test.in, out, test.expected) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/Makefile b/vendor/cloud.google.com/go/firestore/Makefile new file mode 100644 index 0000000..b1f9ff7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/Makefile @@ -0,0 +1,13 @@ +# Copy textproto files in this directory from the source of truth. + +SRC=$(GOPATH)/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore + +.PHONY: refresh-tests + +refresh-tests: + -rm genproto/*.pb.go + cp $(SRC)/genproto/*.pb.go genproto + -rm testdata/*.textproto + cp $(SRC)/testdata/*.textproto testdata + openssl dgst -sha1 $(SRC)/testdata/test-suite.binproto > testdata/VERSION + diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go new file mode 100644 index 0000000..3494e35 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package firestore is an auto-generated package for the +// Google Cloud Firestore API. +// +// NOTE: This package is in beta. It is not stable, and may be subject to changes. +// +// +// Use the client at cloud.google.com/go/firestore in preference to this. +package firestore // import "cloud.google.com/go/firestore/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore", + } +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go new file mode 100644 index 0000000..c19300f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go @@ -0,0 +1,497 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package firestore + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + GetDocument []gax.CallOption + ListDocuments []gax.CallOption + CreateDocument []gax.CallOption + UpdateDocument []gax.CallOption + DeleteDocument []gax.CallOption + BatchGetDocuments []gax.CallOption + BeginTransaction []gax.CallOption + Commit []gax.CallOption + Rollback []gax.CallOption + RunQuery []gax.CallOption + Write []gax.CallOption + Listen []gax.CallOption + ListCollectionIds []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("firestore.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"streaming", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + GetDocument: retry[[2]string{"default", "idempotent"}], + ListDocuments: retry[[2]string{"default", "idempotent"}], + CreateDocument: retry[[2]string{"default", "non_idempotent"}], + UpdateDocument: retry[[2]string{"default", "non_idempotent"}], + DeleteDocument: retry[[2]string{"default", "idempotent"}], + BatchGetDocuments: retry[[2]string{"streaming", "idempotent"}], + BeginTransaction: retry[[2]string{"default", "idempotent"}], + Commit: retry[[2]string{"default", "non_idempotent"}], + Rollback: retry[[2]string{"default", "idempotent"}], + RunQuery: retry[[2]string{"default", "idempotent"}], + Write: retry[[2]string{"streaming", "non_idempotent"}], + Listen: retry[[2]string{"streaming", "idempotent"}], + ListCollectionIds: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Firestore API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client firestorepb.FirestoreClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new firestore client. +// +// The Cloud Firestore service. +// +// This service exposes several types of comparable timestamps: +// +// create_time - The time at which a document was created. Changes only +// when a document is deleted, then re-created. Increases in a strict +// monotonic fashion. +// +// update_time - The time at which a document was last updated. Changes +// every time a document is modified. Does not change when a write results +// in no modifications. Increases in a strict monotonic fashion. +// +// read_time - The time at which a particular state was observed. Used +// to denote a consistent snapshot of the database or the time at which a +// Document was observed to not exist. +// +// commit_time - The time at which the writes in a transaction were +// committed. Any read with an equal or greater read_time is guaranteed +// to see the effects of the transaction. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: firestorepb.NewFirestoreClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// GetDocument gets a single document. +func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDocument[0:len(c.CallOptions.GetDocument):len(c.CallOptions.GetDocument)], opts...) + var resp *firestorepb.Document + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDocument(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDocuments lists documents. +func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest, opts ...gax.CallOption) *DocumentIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDocuments[0:len(c.CallOptions.ListDocuments):len(c.CallOptions.ListDocuments)], opts...) + it := &DocumentIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*firestorepb.Document, string, error) { + var resp *firestorepb.ListDocumentsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDocuments(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Documents, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateDocument creates a new document. +func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDocument[0:len(c.CallOptions.CreateDocument):len(c.CallOptions.CreateDocument)], opts...) + var resp *firestorepb.Document + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateDocument(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDocument updates or inserts a document. +func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateDocument[0:len(c.CallOptions.UpdateDocument):len(c.CallOptions.UpdateDocument)], opts...) + var resp *firestorepb.Document + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateDocument(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteDocument deletes a document. +func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDocument[0:len(c.CallOptions.DeleteDocument):len(c.CallOptions.DeleteDocument)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDocument(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// BatchGetDocuments gets multiple documents. +// +// Documents returned by this method are not guaranteed to be returned in the +// same order that they were requested. +func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGetDocumentsRequest, opts ...gax.CallOption) (firestorepb.Firestore_BatchGetDocumentsClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchGetDocuments[0:len(c.CallOptions.BatchGetDocuments):len(c.CallOptions.BatchGetDocuments)], opts...) + var resp firestorepb.Firestore_BatchGetDocumentsClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BatchGetDocuments(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// BeginTransaction starts a new transaction. +func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest, opts ...gax.CallOption) (*firestorepb.BeginTransactionResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...) + var resp *firestorepb.BeginTransactionResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Commit commits a transaction, while optionally updating documents. +func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opts ...gax.CallOption) (*firestorepb.CommitResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...) + var resp *firestorepb.CommitResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Commit(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Rollback rolls back a transaction. +func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.Rollback(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// RunQuery runs a query. +func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest, opts ...gax.CallOption) (firestorepb.Firestore_RunQueryClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RunQuery[0:len(c.CallOptions.RunQuery):len(c.CallOptions.RunQuery)], opts...) + var resp firestorepb.Firestore_RunQueryClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RunQuery(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Write streams batches of document updates and deletes, in order. +func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_WriteClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Write[0:len(c.CallOptions.Write):len(c.CallOptions.Write)], opts...) + var resp firestorepb.Firestore_WriteClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Write(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Listen listens to changes. +func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_ListenClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Listen[0:len(c.CallOptions.Listen):len(c.CallOptions.Listen)], opts...) + var resp firestorepb.Firestore_ListenClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Listen(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListCollectionIds lists all the collection IDs underneath a document. +func (c *Client) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest, opts ...gax.CallOption) *StringIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListCollectionIds[0:len(c.CallOptions.ListCollectionIds):len(c.CallOptions.ListCollectionIds)], opts...) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *firestorepb.ListCollectionIdsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListCollectionIds(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.CollectionIds, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DocumentIterator manages a stream of *firestorepb.Document. +type DocumentIterator struct { + items []*firestorepb.Document + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*firestorepb.Document, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DocumentIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DocumentIterator) Next() (*firestorepb.Document, error) { + var item *firestorepb.Document + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DocumentIterator) bufLen() int { + return len(it.items) +} + +func (it *DocumentIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go new file mode 100644 index 0000000..85b7db7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go @@ -0,0 +1,328 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package firestore_test + +import ( + "io" + + "cloud.google.com/go/firestore/apiv1beta1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_GetDocument() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.GetDocumentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDocument(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDocuments() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.ListDocumentsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDocuments(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_CreateDocument() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.CreateDocumentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateDocument(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateDocument() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.UpdateDocumentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateDocument(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteDocument() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.DeleteDocumentRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDocument(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_BatchGetDocuments() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.BatchGetDocumentsRequest{ + // TODO: Fill request struct fields. + } + stream, err := c.BatchGetDocuments(ctx, req) + if err != nil { + // TODO: Handle error. + } + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_BeginTransaction() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.BeginTransactionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BeginTransaction(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_Commit() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.CommitRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Commit(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_Rollback() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.RollbackRequest{ + // TODO: Fill request struct fields. + } + err = c.Rollback(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_RunQuery() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.RunQueryRequest{ + // TODO: Fill request struct fields. + } + stream, err := c.RunQuery(ctx, req) + if err != nil { + // TODO: Handle error. + } + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_Write() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.Write(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*firestorepb.WriteRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_Listen() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.Listen(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*firestorepb.ListenRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListCollectionIds() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.ListCollectionIdsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListCollectionIds(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go new file mode 100644 index 0000000..71391ba --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go @@ -0,0 +1,1153 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package firestore + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockFirestoreServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + firestorepb.FirestoreServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockFirestoreServer) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest) (*firestorepb.Document, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.Document), nil +} + +func (s *mockFirestoreServer) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest) (*firestorepb.ListDocumentsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.ListDocumentsResponse), nil +} + +func (s *mockFirestoreServer) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest) (*firestorepb.Document, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.Document), nil +} + +func (s *mockFirestoreServer) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest) (*firestorepb.Document, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.Document), nil +} + +func (s *mockFirestoreServer) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockFirestoreServer) BatchGetDocuments(req *firestorepb.BatchGetDocumentsRequest, stream firestorepb.Firestore_BatchGetDocumentsServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*firestorepb.BatchGetDocumentsResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockFirestoreServer) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest) (*firestorepb.BeginTransactionResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.BeginTransactionResponse), nil +} + +func (s *mockFirestoreServer) Commit(ctx context.Context, req *firestorepb.CommitRequest) (*firestorepb.CommitResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.CommitResponse), nil +} + +func (s *mockFirestoreServer) Rollback(ctx context.Context, req *firestorepb.RollbackRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockFirestoreServer) RunQuery(req *firestorepb.RunQueryRequest, stream firestorepb.Firestore_RunQueryServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*firestorepb.RunQueryResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockFirestoreServer) Write(stream firestorepb.Firestore_WriteServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*firestorepb.WriteResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockFirestoreServer) Listen(stream firestorepb.Firestore_ListenServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*firestorepb.ListenResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockFirestoreServer) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest) (*firestorepb.ListCollectionIdsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.ListCollectionIdsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockFirestore mockFirestoreServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + firestorepb.RegisterFirestoreServer(serv, &mockFirestore) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestFirestoreGetDocument(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &firestorepb.Document{ + Name: name2, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.GetDocumentRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDocument(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreGetDocumentError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.GetDocumentRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDocument(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreListDocuments(t *testing.T) { + var nextPageToken string = "" + var documentsElement *firestorepb.Document = &firestorepb.Document{} + var documents = []*firestorepb.Document{documentsElement} + var expectedResponse = &firestorepb.ListDocumentsResponse{ + NextPageToken: nextPageToken, + Documents: documents, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var collectionId string = "collectionId-821242276" + var request = &firestorepb.ListDocumentsRequest{ + Parent: formattedParent, + CollectionId: collectionId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDocuments(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Documents[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreListDocumentsError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var collectionId string = "collectionId-821242276" + var request = &firestorepb.ListDocumentsRequest{ + Parent: formattedParent, + CollectionId: collectionId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDocuments(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreCreateDocument(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &firestorepb.Document{ + Name: name, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var collectionId string = "collectionId-821242276" + var documentId string = "documentId506676927" + var document *firestorepb.Document = &firestorepb.Document{} + var request = &firestorepb.CreateDocumentRequest{ + Parent: formattedParent, + CollectionId: collectionId, + DocumentId: documentId, + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDocument(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreCreateDocumentError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var collectionId string = "collectionId-821242276" + var documentId string = "documentId506676927" + var document *firestorepb.Document = &firestorepb.Document{} + var request = &firestorepb.CreateDocumentRequest{ + Parent: formattedParent, + CollectionId: collectionId, + DocumentId: documentId, + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDocument(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreUpdateDocument(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &firestorepb.Document{ + Name: name, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var document *firestorepb.Document = &firestorepb.Document{} + var updateMask *firestorepb.DocumentMask = &firestorepb.DocumentMask{} + var request = &firestorepb.UpdateDocumentRequest{ + Document: document, + UpdateMask: updateMask, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDocument(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreUpdateDocumentError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var document *firestorepb.Document = &firestorepb.Document{} + var updateMask *firestorepb.DocumentMask = &firestorepb.DocumentMask{} + var request = &firestorepb.UpdateDocumentRequest{ + Document: document, + UpdateMask: updateMask, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDocument(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreDeleteDocument(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.DeleteDocumentRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDocument(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestFirestoreDeleteDocumentError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.DeleteDocumentRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDocument(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestFirestoreBatchGetDocuments(t *testing.T) { + var missing string = "missing1069449574" + var transaction []byte = []byte("-34") + var expectedResponse = &firestorepb.BatchGetDocumentsResponse{ + Result: &firestorepb.BatchGetDocumentsResponse_Missing{ + Missing: missing, + }, + Transaction: transaction, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var documents []string = nil + var request = &firestorepb.BatchGetDocumentsRequest{ + Database: formattedDatabase, + Documents: documents, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.BatchGetDocuments(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreBatchGetDocumentsError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var documents []string = nil + var request = &firestorepb.BatchGetDocumentsRequest{ + Database: formattedDatabase, + Documents: documents, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.BatchGetDocuments(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreBeginTransaction(t *testing.T) { + var transaction []byte = []byte("-34") + var expectedResponse = &firestorepb.BeginTransactionResponse{ + Transaction: transaction, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var request = &firestorepb.BeginTransactionRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BeginTransaction(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreBeginTransactionError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var request = &firestorepb.BeginTransactionRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BeginTransaction(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreCommit(t *testing.T) { + var expectedResponse *firestorepb.CommitResponse = &firestorepb.CommitResponse{} + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var writes []*firestorepb.Write = nil + var request = &firestorepb.CommitRequest{ + Database: formattedDatabase, + Writes: writes, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Commit(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreCommitError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var writes []*firestorepb.Write = nil + var request = &firestorepb.CommitRequest{ + Database: formattedDatabase, + Writes: writes, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Commit(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreRollback(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var transaction []byte = []byte("-34") + var request = &firestorepb.RollbackRequest{ + Database: formattedDatabase, + Transaction: transaction, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Rollback(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestFirestoreRollbackError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var transaction []byte = []byte("-34") + var request = &firestorepb.RollbackRequest{ + Database: formattedDatabase, + Transaction: transaction, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Rollback(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestFirestoreRunQuery(t *testing.T) { + var transaction []byte = []byte("-34") + var skippedResults int32 = 880286183 + var expectedResponse = &firestorepb.RunQueryResponse{ + Transaction: transaction, + SkippedResults: skippedResults, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.RunQueryRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.RunQuery(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreRunQueryError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.RunQueryRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.RunQuery(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreWrite(t *testing.T) { + var streamId string = "streamId-315624902" + var streamToken []byte = []byte("122") + var expectedResponse = &firestorepb.WriteResponse{ + StreamId: streamId, + StreamToken: streamToken, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var request = &firestorepb.WriteRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.Write(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreWriteError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var request = &firestorepb.WriteRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.Write(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreListen(t *testing.T) { + var expectedResponse *firestorepb.ListenResponse = &firestorepb.ListenResponse{} + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var request = &firestorepb.ListenRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.Listen(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreListenError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]") + var request = &firestorepb.ListenRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.Listen(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreListCollectionIds(t *testing.T) { + var nextPageToken string = "" + var collectionIdsElement string = "collectionIdsElement1368994900" + var collectionIds = []string{collectionIdsElement} + var expectedResponse = &firestorepb.ListCollectionIdsResponse{ + NextPageToken: nextPageToken, + CollectionIds: collectionIds, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.ListCollectionIdsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListCollectionIds(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.CollectionIds[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreListCollectionIdsError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.ListCollectionIdsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListCollectionIds(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/path_funcs.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/path_funcs.go new file mode 100644 index 0000000..7f4b0ad --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/path_funcs.go @@ -0,0 +1,78 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +// DatabaseRootPath returns the path for the database root resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/databases/%s", project, database) +// instead. +func DatabaseRootPath(project, database string) string { + return "" + + "projects/" + + project + + "/databases/" + + database + + "" +} + +// DocumentRootPath returns the path for the document root resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/databases/%s/documents", project, database) +// instead. +func DocumentRootPath(project, database string) string { + return "" + + "projects/" + + project + + "/databases/" + + database + + "/documents" + + "" +} + +// DocumentPathPath returns the path for the document path resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/databases/%s/documents/%s", project, database, documentPath) +// instead. +func DocumentPathPath(project, database, documentPath string) string { + return "" + + "projects/" + + project + + "/databases/" + + database + + "/documents/" + + documentPath + + "" +} + +// AnyPathPath returns the path for the any path resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", project, database, document, anyPath) +// instead. +func AnyPathPath(project, database, document, anyPath string) string { + return "" + + "projects/" + + project + + "/databases/" + + database + + "/documents/" + + document + + "/" + + anyPath + + "" +} diff --git a/vendor/cloud.google.com/go/firestore/client.go b/vendor/cloud.google.com/go/firestore/client.go new file mode 100644 index 0000000..ba3a9bb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/client.go @@ -0,0 +1,283 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "io" + "strings" + "time" + + "google.golang.org/api/iterator" + + vkit "cloud.google.com/go/firestore/apiv1beta1" + + "cloud.google.com/go/internal/version" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" + +// A Client provides access to the Firestore service. +type Client struct { + c *vkit.Client + projectID string + databaseID string // A client is tied to a single database. +} + +// NewClient creates a new Firestore client that uses the given project. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + vc, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + vc.SetGoogleClientInfo("gccl", version.Repo) + c := &Client{ + c: vc, + projectID: projectID, + databaseID: "(default)", // always "(default)", for now + } + return c, nil + +} + +// Close closes any resources held by the client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + return c.c.Close() +} + +func (c *Client) path() string { + return fmt.Sprintf("projects/%s/databases/%s", c.projectID, c.databaseID) +} + +func withResourceHeader(ctx context.Context, resource string) context.Context { + md, _ := metadata.FromOutgoingContext(ctx) + md = md.Copy() + md[resourcePrefixHeader] = []string{resource} + return metadata.NewOutgoingContext(ctx, md) +} + +// Collection creates a reference to a collection with the given path. +// A path is a sequence of IDs separated by slashes. +// +// Collection returns nil if path contains an even number of IDs or any ID is empty. +func (c *Client) Collection(path string) *CollectionRef { + coll, _ := c.idsToRef(strings.Split(path, "/"), c.path()) + return coll +} + +// Doc creates a reference to a document with the given path. +// A path is a sequence of IDs separated by slashes. +// +// Doc returns nil if path contains an odd number of IDs or any ID is empty. +func (c *Client) Doc(path string) *DocumentRef { + _, doc := c.idsToRef(strings.Split(path, "/"), c.path()) + return doc +} + +func (c *Client) idsToRef(IDs []string, dbPath string) (*CollectionRef, *DocumentRef) { + if len(IDs) == 0 { + return nil, nil + } + for _, id := range IDs { + if id == "" { + return nil, nil + } + } + coll := newTopLevelCollRef(c, dbPath, IDs[0]) + i := 1 + for i < len(IDs) { + doc := newDocRef(coll, IDs[i]) + i++ + if i == len(IDs) { + return nil, doc + } + coll = newCollRefWithParent(c, doc, IDs[i]) + i++ + } + return coll, nil +} + +// GetAll retrieves multiple documents with a single call. The DocumentSnapshots are +// returned in the order of the given DocumentRefs. +// +// If a document is not present, the corresponding DocumentSnapshot's Exists method will return false. +func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*DocumentSnapshot, error) { + if err := checkTransaction(ctx); err != nil { + return nil, err + } + return c.getAll(ctx, docRefs, nil) +} + +func (c *Client) getAll(ctx context.Context, docRefs []*DocumentRef, tid []byte) ([]*DocumentSnapshot, error) { + var docNames []string + docIndex := map[string]int{} // doc name to position in docRefs + for i, dr := range docRefs { + if dr == nil { + return nil, errNilDocRef + } + docNames = append(docNames, dr.Path) + docIndex[dr.Path] = i + } + req := &pb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: docNames, + } + if tid != nil { + req.ConsistencySelector = &pb.BatchGetDocumentsRequest_Transaction{tid} + } + streamClient, err := c.c.BatchGetDocuments(withResourceHeader(ctx, req.Database), req) + if err != nil { + return nil, err + } + + // Read and remember all results from the stream. + var resps []*pb.BatchGetDocumentsResponse + for { + resp, err := streamClient.Recv() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + resps = append(resps, resp) + } + + // Results may arrive out of order. Put each at the right index. + docs := make([]*DocumentSnapshot, len(docNames)) + for _, resp := range resps { + var ( + i int + doc *pb.Document + err error + ) + switch r := resp.Result.(type) { + case *pb.BatchGetDocumentsResponse_Found: + i = docIndex[r.Found.Name] + doc = r.Found + case *pb.BatchGetDocumentsResponse_Missing: + i = docIndex[r.Missing] + doc = nil + default: + return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type") + } + if docs[i] != nil { + return nil, fmt.Errorf("firestore: %q seen twice", docRefs[i].Path) + } + docs[i], err = newDocumentSnapshot(docRefs[i], doc, c, resp.ReadTime) + if err != nil { + return nil, err + } + } + return docs, nil +} + +// Collections returns an interator over the top-level collections. +func (c *Client) Collections(ctx context.Context) *CollectionIterator { + it := &CollectionIterator{ + err: checkTransaction(ctx), + client: c, + it: c.c.ListCollectionIds( + withResourceHeader(ctx, c.path()), + &pb.ListCollectionIdsRequest{Parent: c.path()}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// Batch returns a WriteBatch. +func (c *Client) Batch() *WriteBatch { + return &WriteBatch{c: c} +} + +// commit calls the Commit RPC outside of a transaction. +func (c *Client) commit(ctx context.Context, ws []*pb.Write) ([]*WriteResult, error) { + if err := checkTransaction(ctx); err != nil { + return nil, err + } + req := &pb.CommitRequest{ + Database: c.path(), + Writes: ws, + } + res, err := c.c.Commit(withResourceHeader(ctx, req.Database), req) + if err != nil { + return nil, err + } + if len(res.WriteResults) == 0 { + return nil, errors.New("firestore: missing WriteResult") + } + var wrs []*WriteResult + for _, pwr := range res.WriteResults { + wr, err := writeResultFromProto(pwr) + if err != nil { + return nil, err + } + wrs = append(wrs, wr) + } + return wrs, nil +} + +func (c *Client) commit1(ctx context.Context, ws []*pb.Write) (*WriteResult, error) { + wrs, err := c.commit(ctx, ws) + if err != nil { + return nil, err + } + return wrs[0], nil +} + +// A WriteResult is returned by methods that write documents. +type WriteResult struct { + // The time at which the document was updated, or created if it did not + // previously exist. Writes that do not actually change the document do + // not change the update time. + UpdateTime time.Time +} + +func writeResultFromProto(wr *pb.WriteResult) (*WriteResult, error) { + t, err := ptypes.Timestamp(wr.UpdateTime) + if err != nil { + t = time.Time{} + // TODO(jba): Follow up if Delete is supposed to return a nil timestamp. + } + return &WriteResult{UpdateTime: t}, nil +} + +func sleep(ctx context.Context, dur time.Duration) error { + switch err := gax.Sleep(ctx, dur); err { + case context.Canceled: + return status.Error(codes.Canceled, "context canceled") + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, "context deadline exceeded") + default: + return err + } +} diff --git a/vendor/cloud.google.com/go/firestore/client_test.go b/vendor/cloud.google.com/go/firestore/client_test.go new file mode 100644 index 0000000..1ce3131 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/client_test.go @@ -0,0 +1,212 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var testClient = &Client{ + projectID: "projectID", + databaseID: "(default)", +} + +func TestClientCollectionAndDoc(t *testing.T) { + coll1 := testClient.Collection("X") + db := "projects/projectID/databases/(default)" + wantc1 := &CollectionRef{ + c: testClient, + parentPath: db, + Parent: nil, + ID: "X", + Path: "projects/projectID/databases/(default)/documents/X", + Query: Query{c: testClient, collectionID: "X", parentPath: db}, + } + if !testEqual(coll1, wantc1) { + t.Fatalf("got\n%+v\nwant\n%+v", coll1, wantc1) + } + doc1 := testClient.Doc("X/a") + wantd1 := &DocumentRef{ + Parent: coll1, + ID: "a", + Path: "projects/projectID/databases/(default)/documents/X/a", + } + + if !testEqual(doc1, wantd1) { + t.Fatalf("got %+v, want %+v", doc1, wantd1) + } + coll2 := testClient.Collection("X/a/Y") + parentPath := "projects/projectID/databases/(default)/documents/X/a" + wantc2 := &CollectionRef{ + c: testClient, + parentPath: parentPath, + Parent: doc1, + ID: "Y", + Path: "projects/projectID/databases/(default)/documents/X/a/Y", + Query: Query{c: testClient, collectionID: "Y", parentPath: parentPath}, + } + if !testEqual(coll2, wantc2) { + t.Fatalf("\ngot %+v\nwant %+v", coll2, wantc2) + } + doc2 := testClient.Doc("X/a/Y/b") + wantd2 := &DocumentRef{ + Parent: coll2, + ID: "b", + Path: "projects/projectID/databases/(default)/documents/X/a/Y/b", + } + if !testEqual(doc2, wantd2) { + t.Fatalf("got %+v, want %+v", doc2, wantd2) + } +} + +func TestClientCollDocErrors(t *testing.T) { + for _, badColl := range []string{"", "/", "/a/", "/a/b", "a/b/", "a//b"} { + coll := testClient.Collection(badColl) + if coll != nil { + t.Errorf("coll path %q: got %+v, want nil", badColl, coll) + } + } + for _, badDoc := range []string{"", "a", "/", "/a", "a/", "a/b/c", "a//b/c"} { + doc := testClient.Doc(badDoc) + if doc != nil { + t.Errorf("doc path %q: got %+v, want nil", badDoc, doc) + } + } +} + +func TestGetAll(t *testing.T) { + c, srv := newMock(t) + defer c.Close() + const dbPath = "projects/projectID/databases/(default)" + req := &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{ + dbPath + "/documents/C/a", + dbPath + "/documents/C/b", + dbPath + "/documents/C/c", + }, + } + testGetAll(t, c, srv, dbPath, func(drs []*DocumentRef) ([]*DocumentSnapshot, error) { + return c.GetAll(context.Background(), drs) + }, req) +} + +func testGetAll(t *testing.T, c *Client, srv *mockServer, dbPath string, getAll func([]*DocumentRef) ([]*DocumentSnapshot, error), req *pb.BatchGetDocumentsRequest) { + wantPBDocs := []*pb.Document{ + { + Name: dbPath + "/documents/C/a", + CreateTime: aTimestamp, + UpdateTime: aTimestamp, + Fields: map[string]*pb.Value{"f": intval(2)}, + }, + nil, + { + Name: dbPath + "/documents/C/c", + CreateTime: aTimestamp, + UpdateTime: aTimestamp, + Fields: map[string]*pb.Value{"f": intval(1)}, + }, + } + wantReadTimes := []*tspb.Timestamp{aTimestamp, aTimestamp2, aTimestamp3} + srv.addRPC(req, + []interface{}{ + // deliberately put these out of order + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]}, + ReadTime: aTimestamp3, + }, + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]}, + ReadTime: aTimestamp, + }, + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"}, + ReadTime: aTimestamp2, + }, + }, + ) + coll := c.Collection("C") + var docRefs []*DocumentRef + for _, name := range []string{"a", "b", "c"} { + docRefs = append(docRefs, coll.Doc(name)) + } + docs, err := getAll(docRefs) + if err != nil { + t.Fatal(err) + } + if got, want := len(docs), len(wantPBDocs); got != want { + t.Errorf("got %d docs, wanted %d", got, want) + } + for i, got := range docs { + want, err := newDocumentSnapshot(docRefs[i], wantPBDocs[i], c, wantReadTimes[i]) + if err != nil { + t.Fatal(err) + } + if diff := testDiff(got, want); diff != "" { + t.Errorf("#%d: got=--, want==++\n%s", i, diff) + } + } +} + +func TestGetAllErrors(t *testing.T) { + ctx := context.Background() + const ( + dbPath = "projects/projectID/databases/(default)" + docPath = dbPath + "/documents/C/a" + ) + c, srv := newMock(t) + if _, err := c.GetAll(ctx, []*DocumentRef{nil}); err != errNilDocRef { + t.Errorf("got %v, want errNilDocRef", err) + } + + // Internal server error. + srv.addRPC( + &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{docPath}, + }, + []interface{}{status.Errorf(codes.Internal, "")}, + ) + _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}) + codeEq(t, "GetAll #1", codes.Internal, err) + + // Doc appears as both found and missing (server bug). + srv.reset() + srv.addRPC( + &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{docPath}, + }, + []interface{}{ + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}}, + ReadTime: aTimestamp, + }, + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Missing{docPath}, + ReadTime: aTimestamp, + }, + }, + ) + if _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}); err == nil { + t.Error("got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/firestore/collref.go b/vendor/cloud.google.com/go/firestore/collref.go new file mode 100644 index 0000000..8459c66 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/collref.go @@ -0,0 +1,114 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "math/rand" + "os" + "sync" + "time" + + "golang.org/x/net/context" +) + +// A CollectionRef is a reference to Firestore collection. +type CollectionRef struct { + c *Client + + // Typically Parent.Path, or c.path if Parent is nil. + // May be different if this CollectionRef was created from a stored reference + // to a different project/DB. + parentPath string + + // Parent is the document of which this collection is a part. It is + // nil for top-level collections. + Parent *DocumentRef + + // The full resource path of the collection: "projects/P/databases/D/documents..." + Path string + + // ID is the collection identifier. + ID string + + // Use the methods of Query on a CollectionRef to create and run queries. + Query +} + +func newTopLevelCollRef(c *Client, dbPath, id string) *CollectionRef { + return &CollectionRef{ + c: c, + ID: id, + parentPath: dbPath, + Path: dbPath + "/documents/" + id, + Query: Query{c: c, collectionID: id, parentPath: dbPath}, + } +} + +func newCollRefWithParent(c *Client, parent *DocumentRef, id string) *CollectionRef { + return &CollectionRef{ + c: c, + Parent: parent, + ID: id, + parentPath: parent.Path, + Path: parent.Path + "/" + id, + Query: Query{c: c, collectionID: id, parentPath: parent.Path}, + } +} + +// Doc returns a DocumentRef that refers to the document in the collection with the +// given identifier. +func (c *CollectionRef) Doc(id string) *DocumentRef { + if c == nil { + return nil + } + return newDocRef(c, id) +} + +// NewDoc returns a DocumentRef with a uniquely generated ID. +func (c *CollectionRef) NewDoc() *DocumentRef { + return c.Doc(uniqueID()) +} + +// Add generates a DocumentRef with a unique ID. It then creates the document +// with the given data, which can be a map[string]interface{}, a struct or a +// pointer to a struct. +// +// Add returns an error in the unlikely event that a document with the same ID +// already exists. +func (c *CollectionRef) Add(ctx context.Context, data interface{}) (*DocumentRef, *WriteResult, error) { + d := c.NewDoc() + wr, err := d.Create(ctx, data) + if err != nil { + return nil, nil, err + } + return d, wr, nil +} + +const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var ( + rngMu sync.Mutex + rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid()))) +) + +func uniqueID() string { + var b [20]byte + rngMu.Lock() + for i := 0; i < len(b); i++ { + b[i] = alphanum[rng.Intn(len(alphanum))] + } + rngMu.Unlock() + return string(b[:]) +} diff --git a/vendor/cloud.google.com/go/firestore/collref_test.go b/vendor/cloud.google.com/go/firestore/collref_test.go new file mode 100644 index 0000000..3d82be5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/collref_test.go @@ -0,0 +1,97 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "golang.org/x/net/context" +) + +func TestDoc(t *testing.T) { + coll := testClient.Collection("C") + got := coll.Doc("d") + want := &DocumentRef{ + Parent: coll, + ID: "d", + Path: "projects/projectID/databases/(default)/documents/C/d", + } + if !testEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestNewDoc(t *testing.T) { + c := &Client{} + coll := c.Collection("C") + got := coll.NewDoc() + if got.Parent != coll { + t.Errorf("got %v, want %v", got.Parent, coll) + } + if len(got.ID) != 20 { + t.Errorf("got %d-char ID, wanted 20", len(got.ID)) + } + + got2 := coll.NewDoc() + if got.ID == got2.ID { + t.Error("got same ID") + } +} + +func TestAdd(t *testing.T) { + ctx := context.Background() + c, srv := newMock(t) + wantReq := commitRequestForSet() + w := wantReq.Writes[0] + w.CurrentDocument = &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{false}, + } + srv.addRPCAdjust(wantReq, commitResponseForSet, func(gotReq proto.Message) { + // We can't know the doc ID before Add is called, so we take it from + // the request. + w.Operation.(*pb.Write_Update).Update.Name = gotReq.(*pb.CommitRequest).Writes[0].Operation.(*pb.Write_Update).Update.Name + }) + _, wr, err := c.Collection("C").Add(ctx, testData) + if err != nil { + t.Fatal(err) + } + if !testEqual(wr, writeResultForSet) { + t.Errorf("got %v, want %v", wr, writeResultForSet) + } +} + +func TestNilErrors(t *testing.T) { + ctx := context.Background() + c, _ := newMock(t) + // Test that a nil CollectionRef results in a nil DocumentRef and errors + // where possible. + coll := c.Collection("a/b") // nil because "a/b" denotes a doc. + if coll != nil { + t.Fatal("collection not nil") + } + if got := coll.Doc("d"); got != nil { + t.Fatalf("got %v, want nil", got) + } + if got := coll.NewDoc(); got != nil { + t.Fatalf("got %v, want nil", got) + } + if _, _, err := coll.Add(ctx, testData); err != errNilDocRef { + t.Fatalf("got <%v>, want <%v>", err, errNilDocRef) + } +} diff --git a/vendor/cloud.google.com/go/firestore/cross_language_test.go b/vendor/cloud.google.com/go/firestore/cross_language_test.go new file mode 100644 index 0000000..476345b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/cross_language_test.go @@ -0,0 +1,357 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A runner for the cross-language tests. + +package firestore + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "path" + "path/filepath" + "strings" + "testing" + + pb "cloud.google.com/go/firestore/genproto" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + ts "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + fspb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +func TestCrossLanguageTests(t *testing.T) { + const dir = "testdata" + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + n := 0 + for _, fi := range fis { + if strings.HasSuffix(fi.Name(), ".textproto") { + runTestFromFile(t, filepath.Join(dir, fi.Name())) + n++ + } + } + t.Logf("ran %d cross-language tests", n) +} + +func runTestFromFile(t *testing.T, filename string) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("%s: %v", filename, err) + } + var test pb.Test + if err := proto.UnmarshalText(string(bytes), &test); err != nil { + t.Fatalf("unmarshalling %s: %v", filename, err) + } + msg := fmt.Sprintf("%s (file %s)", test.Description, filepath.Base(filename)) + runTest(t, msg, &test) +} + +func runTest(t *testing.T, msg string, test *pb.Test) { + check := func(gotErr error, wantErr bool) bool { + if wantErr && gotErr == nil { + t.Errorf("%s: got nil, want error", msg) + return false + } else if !wantErr && gotErr != nil { + t.Errorf("%s: %v", msg, gotErr) + return false + } + return true + } + + ctx := context.Background() + c, srv := newMock(t) + switch tt := test.Test.(type) { + case *pb.Test_Get: + req := &fspb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: []string{tt.Get.DocRefPath}, + } + srv.addRPC(req, []interface{}{ + &fspb.BatchGetDocumentsResponse{ + Result: &fspb.BatchGetDocumentsResponse_Found{&fspb.Document{ + Name: tt.Get.DocRefPath, + CreateTime: &ts.Timestamp{}, + UpdateTime: &ts.Timestamp{}, + }}, + ReadTime: &ts.Timestamp{}, + }, + }) + ref := docRefFromPath(tt.Get.DocRefPath, c) + _, err := ref.Get(ctx) + if err != nil { + t.Errorf("%s: %v", msg, err) + return + } + // Checking response would just be testing the function converting a Document + // proto to a DocumentSnapshot, hence uninteresting. + + case *pb.Test_Create: + srv.addRPC(tt.Create.Request, commitResponseForSet) + ref := docRefFromPath(tt.Create.DocRefPath, c) + data, err := convertData(tt.Create.JsonData) + if err != nil { + t.Errorf("%s: %v", msg, err) + return + } + _, err = ref.Create(ctx, data) + check(err, tt.Create.IsError) + + case *pb.Test_Set: + srv.addRPC(tt.Set.Request, commitResponseForSet) + ref := docRefFromPath(tt.Set.DocRefPath, c) + data, err := convertData(tt.Set.JsonData) + if err != nil { + t.Errorf("%s: %v", msg, err) + return + } + var opts []SetOption + if tt.Set.Option != nil { + opts = []SetOption{convertSetOption(tt.Set.Option)} + } + _, err = ref.Set(ctx, data, opts...) + check(err, tt.Set.IsError) + + case *pb.Test_Update: + // Ignore Update test because we only support UpdatePaths. + // Not to worry, every Update test has a corresponding UpdatePaths test. + + case *pb.Test_UpdatePaths: + srv.addRPC(tt.UpdatePaths.Request, commitResponseForSet) + ref := docRefFromPath(tt.UpdatePaths.DocRefPath, c) + preconds := convertPrecondition(t, tt.UpdatePaths.Precondition) + paths := convertFieldPaths(tt.UpdatePaths.FieldPaths) + var ups []Update + for i, path := range paths { + val, err := convertJSONValue(tt.UpdatePaths.JsonValues[i]) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + ups = append(ups, Update{ + FieldPath: path, + Value: val, + }) + } + _, err := ref.Update(ctx, ups, preconds...) + check(err, tt.UpdatePaths.IsError) + + case *pb.Test_Delete: + srv.addRPC(tt.Delete.Request, commitResponseForSet) + ref := docRefFromPath(tt.Delete.DocRefPath, c) + preconds := convertPrecondition(t, tt.Delete.Precondition) + _, err := ref.Delete(ctx, preconds...) + check(err, tt.Delete.IsError) + + case *pb.Test_Query: + q := convertQuery(t, tt.Query) + got, err := q.toProto() + if check(err, tt.Query.IsError) && err == nil { + if want := tt.Query.Query; !proto.Equal(got, want) { + t.Errorf("%s\ngot: %s\nwant: %s", msg, proto.MarshalTextString(got), proto.MarshalTextString(want)) + } + } + + default: + t.Fatalf("unknown test type %T", tt) + } +} + +func docRefFromPath(p string, c *Client) *DocumentRef { + return &DocumentRef{ + Path: p, + ID: path.Base(p), + Parent: &CollectionRef{c: c}, + } +} + +func convertJSONValue(jv string) (interface{}, error) { + var val interface{} + if err := json.Unmarshal([]byte(jv), &val); err != nil { + return nil, err + } + return convertTestValue(val), nil +} + +func convertData(jsonData string) (map[string]interface{}, error) { + var m map[string]interface{} + if err := json.Unmarshal([]byte(jsonData), &m); err != nil { + return nil, err + } + return convertTestMap(m), nil +} + +func convertTestMap(m map[string]interface{}) map[string]interface{} { + for k, v := range m { + m[k] = convertTestValue(v) + } + return m +} + +func convertTestValue(v interface{}) interface{} { + switch v := v.(type) { + case string: + switch v { + case "ServerTimestamp": + return ServerTimestamp + case "Delete": + return Delete + case "NaN": + return math.NaN() + default: + return v + } + case float64: + if v == float64(int(v)) { + return int(v) + } + return v + case []interface{}: + for i, e := range v { + v[i] = convertTestValue(e) + } + return v + case map[string]interface{}: + return convertTestMap(v) + default: + return v + } +} + +func convertSetOption(opt *pb.SetOption) SetOption { + if opt.All { + return MergeAll + } + return Merge(convertFieldPaths(opt.Fields)...) +} + +func convertFieldPaths(fps []*pb.FieldPath) []FieldPath { + var res []FieldPath + for _, fp := range fps { + res = append(res, fp.Field) + } + return res +} + +func convertPrecondition(t *testing.T, fp *fspb.Precondition) []Precondition { + if fp == nil { + return nil + } + var pc Precondition + switch fp := fp.ConditionType.(type) { + case *fspb.Precondition_Exists: + pc = exists(fp.Exists) + case *fspb.Precondition_UpdateTime: + tm, err := ptypes.Timestamp(fp.UpdateTime) + if err != nil { + t.Fatal(err) + } + pc = LastUpdateTime(tm) + default: + t.Fatalf("unknown precondition type %T", fp) + } + return []Precondition{pc} +} + +func convertQuery(t *testing.T, qt *pb.QueryTest) Query { + parts := strings.Split(qt.CollPath, "/") + q := Query{ + parentPath: strings.Join(parts[:len(parts)-2], "/"), + collectionID: parts[len(parts)-1], + } + for _, c := range qt.Clauses { + switch c := c.Clause.(type) { + case *pb.Clause_Select: + q = q.SelectPaths(convertFieldPaths(c.Select.Fields)...) + case *pb.Clause_OrderBy: + var dir Direction + switch c.OrderBy.Direction { + case "asc": + dir = Asc + case "desc": + dir = Desc + default: + t.Fatalf("bad direction: %q", c.OrderBy.Direction) + } + q = q.OrderByPath(FieldPath(c.OrderBy.Path.Field), dir) + case *pb.Clause_Where: + val, err := convertJSONValue(c.Where.JsonValue) + if err != nil { + t.Fatal(err) + } + q = q.WherePath(FieldPath(c.Where.Path.Field), c.Where.Op, val) + case *pb.Clause_Offset: + q = q.Offset(int(c.Offset)) + case *pb.Clause_Limit: + q = q.Limit(int(c.Limit)) + case *pb.Clause_StartAt: + q = q.StartAt(convertCursor(t, c.StartAt)...) + case *pb.Clause_StartAfter: + q = q.StartAfter(convertCursor(t, c.StartAfter)...) + case *pb.Clause_EndAt: + q = q.EndAt(convertCursor(t, c.EndAt)...) + case *pb.Clause_EndBefore: + q = q.EndBefore(convertCursor(t, c.EndBefore)...) + default: + t.Fatalf("bad clause type %T", c) + } + } + return q +} + +// Returns args to a cursor method (StartAt, etc.). +func convertCursor(t *testing.T, c *pb.Cursor) []interface{} { + if c.DocSnapshot != nil { + ds, err := convertDocSnapshot(c.DocSnapshot) + if err != nil { + t.Fatal(err) + } + return []interface{}{ds} + } + var vals []interface{} + for _, jv := range c.JsonValues { + v, err := convertJSONValue(jv) + if err != nil { + t.Fatal(err) + } + vals = append(vals, v) + } + return vals +} + +func convertDocSnapshot(ds *pb.DocSnapshot) (*DocumentSnapshot, error) { + data, err := convertData(ds.JsonData) + if err != nil { + return nil, err + } + doc, transformPaths, err := toProtoDocument(data) + if err != nil { + return nil, err + } + if len(transformPaths) > 0 { + return nil, errors.New("saw transform paths in DocSnapshot") + } + return &DocumentSnapshot{ + Ref: &DocumentRef{ + Path: ds.Path, + Parent: &CollectionRef{Path: path.Dir(ds.Path)}, + }, + proto: doc, + }, nil +} diff --git a/vendor/cloud.google.com/go/firestore/doc.go b/vendor/cloud.google.com/go/firestore/doc.go new file mode 100644 index 0000000..b862f2e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/doc.go @@ -0,0 +1,218 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT doc.go. Modify internal/doc.template, then run make -C internal. + +/* +Package firestore provides a client for reading and writing to a Cloud Firestore +database. + +See https://cloud.google.com/firestore/docs for an introduction +to Cloud Firestore and additional help on using the Firestore API. + +Note: you can't use both Cloud Firestore and Cloud Datastore in the same +project. + +Creating a Client + +To start working with this package, create a client with a project ID: + + ctx := context.Background() + client, err := firestore.NewClient(ctx, "projectID") + if err != nil { + // TODO: Handle error. + } + +CollectionRefs and DocumentRefs + +In Firestore, documents are sets of key-value pairs, and collections are groups of +documents. A Firestore database consists of a hierarchy of alternating collections +and documents, referred to by slash-separated paths like +"States/California/Cities/SanFrancisco". + +This client is built around references to collections and documents. CollectionRefs +and DocumentRefs are lightweight values that refer to the corresponding database +entities. Creating a ref does not involve any network traffic. + + states := client.Collection("States") + ny := states.Doc("NewYork") + // Or, in a single call: + ny = client.Doc("States/NewYork") + +Reading + +Use DocumentRef.Get to read a document. The result is a DocumentSnapshot. +Call its Data method to obtain the entire document contents as a map. + + docsnap, err := ny.Get(ctx) + if err != nil { + // TODO: Handle error. + } + dataMap := docsnap.Data() + fmt.Println(dataMap) + +You can also obtain a single field with DataAt, or extract the data into a struct +with DataTo. With the type definition + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + +we can extract the document's data into a value of type State: + + var nyData State + if err := docsnap.DataTo(&nyData); err != nil { + // TODO: Handle error. + } + +Note that this client supports struct tags beginning with "firestore:" that work like +the tags of the encoding/json package, letting you rename fields, ignore them, or +omit their values when empty. + +To retrieve multiple documents from their references in a single call, use +Client.GetAll. + + docsnaps, err := client.GetAll(ctx, []*firestore.DocumentRef{ + states.Doc("Wisconsin"), states.Doc("Ohio"), + }) + if err != nil { + // TODO: Handle error. + } + for _, ds := range docsnaps { + _ = ds // TODO: Use ds. + } + + +Writing + +For writing individual documents, use the methods on DocumentReference. +Create creates a new document. + + wr, err := ny.Create(ctx, State{ + Capital: "Albany", + Population: 19.8, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr) + +The first return value is a WriteResult, which contains the time +at which the document was updated. + +Create fails if the document exists. Another method, Set, either replaces an existing +document or creates a new one. + + ca := states.Doc("California") + _, err = ca.Set(ctx, State{ + Capital: "Sacramento", + Population: 39.14, + }) + +To update some fields of an existing document, use Update. It takes a list of +paths to update and their corresponding values. + + _, err = ca.Update(ctx, []firestore.Update{{Path: "capital", Value: "Sacramento"}}) + +Use DocumentRef.Delete to delete a document. + + _, err = ny.Delete(ctx) + +Preconditions + +You can condition Deletes or Updates on when a document was last changed. Specify +these preconditions as an option to a Delete or Update method. The check and the +write happen atomically with a single RPC. + + docsnap, err = ca.Get(ctx) + if err != nil { + // TODO: Handle error. + } + _, err = ca.Update(ctx, + []firestore.Update{{Path: "capital", Value: "Sacramento"}}, + firestore.LastUpdateTime(docsnap.UpdateTime)) + +Here we update a doc only if it hasn't changed since we read it. +You could also do this with a transaction. + +To perform multiple writes at once, use a WriteBatch. Its methods chain +for convenience. + +WriteBatch.Commit sends the collected writes to the server, where they happen +atomically. + + writeResults, err := client.Batch(). + Create(ny, State{Capital: "Albany"}). + Update(ca, []firestore.Update{{Path: "capital", Value: "Sacramento"}}). + Delete(client.Doc("States/WestDakota")). + Commit(ctx) + +Queries + +You can use SQL to select documents from a collection. Begin with the collection, and +build up a query using Select, Where and other methods of Query. + + q := states.Where("pop", ">", 10).OrderBy("pop", firestore.Desc) + +Call the Query's Documents method to get an iterator, and use it like +the other Google Cloud Client iterators. + + iter := q.Documents(ctx) + defer iter.Stop() + for { + doc, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(doc.Data()) + } + +To get all the documents in a collection, you can use the collection itself +as a query. + + iter = client.Collection("States").Documents(ctx) + +Transactions + +Use a transaction to execute reads and writes atomically. All reads must happen +before any writes. Transaction creation, commit, rollback and retry are handled for +you by the Client.RunTransaction method; just provide a function and use the +read and write methods of the Transaction passed to it. + + ny := client.Doc("States/NewYork") + err := client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error { + doc, err := tx.Get(ny) // tx.Get, NOT ny.Get! + if err != nil { + return err + } + pop, err := doc.DataAt("pop") + if err != nil { + return err + } + return tx.Update(ny, []firestore.Update{{Path: "pop", Value: pop.(float64) + 0.2}}) + }) + if err != nil { + // TODO: Handle error. + } + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package firestore diff --git a/vendor/cloud.google.com/go/firestore/docref.go b/vendor/cloud.google.com/go/firestore/docref.go new file mode 100644 index 0000000..488c92f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/docref.go @@ -0,0 +1,611 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "io" + "reflect" + "sort" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + vkit "cloud.google.com/go/firestore/apiv1beta1" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +var errNilDocRef = errors.New("firestore: nil DocumentRef") + +// A DocumentRef is a reference to a Firestore document. +type DocumentRef struct { + // The CollectionRef that this document is a part of. Never nil. + Parent *CollectionRef + + // The full resource path of the document: "projects/P/databases/D/documents..." + Path string + + // The ID of the document: the last component of the resource path. + ID string +} + +func newDocRef(parent *CollectionRef, id string) *DocumentRef { + return &DocumentRef{ + Parent: parent, + ID: id, + Path: parent.Path + "/" + id, + } +} + +// Collection returns a reference to sub-collection of this document. +func (d *DocumentRef) Collection(id string) *CollectionRef { + return newCollRefWithParent(d.Parent.c, d, id) +} + +// Get retrieves the document. If the document does not exist, Get return a NotFound error, which +// can be checked with +// grpc.Code(err) == codes.NotFound +// In that case, Get returns a non-nil DocumentSnapshot whose Exists method return false and whose +// ReadTime is the time of the failed read operation. +func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) { + if err := checkTransaction(ctx); err != nil { + return nil, err + } + if d == nil { + return nil, errNilDocRef + } + docsnaps, err := d.Parent.c.getAll(ctx, []*DocumentRef{d}, nil) + if err != nil { + return nil, err + } + ds := docsnaps[0] + if !ds.Exists() { + return ds, status.Errorf(codes.NotFound, "%q not found", d.Path) + } + return ds, nil +} + +// Create creates the document with the given data. +// It returns an error if a document with the same ID already exists. +// +// The data argument can be a map with string keys, a struct, or a pointer to a +// struct. The map keys or exported struct fields become the fields of the firestore +// document. +// The values of data are converted to Firestore values as follows: +// +// - bool converts to Bool. +// - string converts to String. +// - int, int8, int16, int32 and int64 convert to Integer. +// - uint8, uint16 and uint32 convert to Integer. uint64 is disallowed, +// because it can represent values that cannot be represented in an int64, which +// is the underlying type of a Integer. +// - float32 and float64 convert to Double. +// - []byte converts to Bytes. +// - time.Time converts to Timestamp. +// - latlng.LatLng converts to GeoPoint. latlng is the package +// "google.golang.org/genproto/googleapis/type/latlng". +// - Slices convert to Array. +// - Maps and structs convert to Map. +// - nils of any type convert to Null. +// +// Pointers and interface{} are also permitted, and their elements processed +// recursively. +// +// Struct fields can have tags like those used by the encoding/json package. Tags +// begin with "firestore:" and are followed by "-", meaning "ignore this field," or +// an alternative name for the field. Following the name, these comma-separated +// options may be provided: +// +// - omitempty: Do not encode this field if it is empty. A value is empty +// if it is a zero value, or an array, slice or map of length zero. +// - serverTimestamp: The field must be of type time.Time. When writing, if +// the field has the zero value, the server will populate the stored document with +// the time that the request is processed. +func (d *DocumentRef) Create(ctx context.Context, data interface{}) (*WriteResult, error) { + ws, err := d.newCreateWrites(data) + if err != nil { + return nil, err + } + return d.Parent.c.commit1(ctx, ws) +} + +func (d *DocumentRef) newCreateWrites(data interface{}) ([]*pb.Write, error) { + if d == nil { + return nil, errNilDocRef + } + doc, serverTimestampPaths, err := toProtoDocument(data) + if err != nil { + return nil, err + } + doc.Name = d.Path + pc, err := exists(false).preconditionProto() + if err != nil { + return nil, err + } + return d.newUpdateWithTransform(doc, nil, pc, serverTimestampPaths, false), nil +} + +// Set creates or overwrites the document with the given data. See DocumentRef.Create +// for the acceptable values of data. Without options, Set overwrites the document +// completely. Specify one of the Merge options to preserve an existing document's +// fields. +func (d *DocumentRef) Set(ctx context.Context, data interface{}, opts ...SetOption) (*WriteResult, error) { + ws, err := d.newSetWrites(data, opts) + if err != nil { + return nil, err + } + return d.Parent.c.commit1(ctx, ws) +} + +func (d *DocumentRef) newSetWrites(data interface{}, opts []SetOption) ([]*pb.Write, error) { + if d == nil { + return nil, errNilDocRef + } + if data == nil { + return nil, errors.New("firestore: nil document contents") + } + if len(opts) == 0 { // Set without merge + doc, serverTimestampPaths, err := toProtoDocument(data) + if err != nil { + return nil, err + } + doc.Name = d.Path + return d.newUpdateWithTransform(doc, nil, nil, serverTimestampPaths, true), nil + } + // Set with merge. + // This is just like Update, except for the existence precondition. + // So we turn data into a list of (FieldPath, interface{}) pairs (fpv's), as we do + // for Update. + fieldPaths, allPaths, err := processSetOptions(opts) + if err != nil { + return nil, err + } + var fpvs []fpv + v := reflect.ValueOf(data) + if allPaths { + // Set with MergeAll. Collect all the leaves of the map. + if v.Kind() != reflect.Map { + return nil, errors.New("firestore: MergeAll can only be specified with map data") + } + fpvsFromData(v, nil, &fpvs) + } else { + // Set with merge paths. Collect only the values at the given paths. + for _, fp := range fieldPaths { + val, err := getAtPath(v, fp) + if err != nil { + return nil, err + } + fpvs = append(fpvs, fpv{fp, val}) + } + } + return d.fpvsToWrites(fpvs, nil) +} + +// fpvsFromData converts v into a list of (FieldPath, value) pairs. +func fpvsFromData(v reflect.Value, prefix FieldPath, fpvs *[]fpv) { + switch v.Kind() { + case reflect.Map: + for _, k := range v.MapKeys() { + fpvsFromData(v.MapIndex(k), prefix.with(k.String()), fpvs) + } + case reflect.Interface: + fpvsFromData(v.Elem(), prefix, fpvs) + + default: + var val interface{} + if v.IsValid() { + val = v.Interface() + } + *fpvs = append(*fpvs, fpv{prefix, val}) + } +} + +// removePathsIf creates a new slice of FieldPaths that contains +// exactly those elements of fps for which pred returns false. +func removePathsIf(fps []FieldPath, pred func(FieldPath) bool) []FieldPath { + var result []FieldPath + for _, fp := range fps { + if !pred(fp) { + result = append(result, fp) + } + } + return result +} + +// Delete deletes the document. If the document doesn't exist, it does nothing +// and returns no error. +func (d *DocumentRef) Delete(ctx context.Context, preconds ...Precondition) (*WriteResult, error) { + ws, err := d.newDeleteWrites(preconds) + if err != nil { + return nil, err + } + return d.Parent.c.commit1(ctx, ws) +} + +func (d *DocumentRef) newDeleteWrites(preconds []Precondition) ([]*pb.Write, error) { + if d == nil { + return nil, errNilDocRef + } + pc, err := processPreconditionsForDelete(preconds) + if err != nil { + return nil, err + } + return []*pb.Write{{ + Operation: &pb.Write_Delete{d.Path}, + CurrentDocument: pc, + }}, nil +} + +func (d *DocumentRef) newUpdatePathWrites(updates []Update, preconds []Precondition) ([]*pb.Write, error) { + if len(updates) == 0 { + return nil, errors.New("firestore: no paths to update") + } + var fpvs []fpv + for _, u := range updates { + v, err := u.process() + if err != nil { + return nil, err + } + fpvs = append(fpvs, v) + } + pc, err := processPreconditionsForUpdate(preconds) + if err != nil { + return nil, err + } + return d.fpvsToWrites(fpvs, pc) +} + +func (d *DocumentRef) fpvsToWrites(fpvs []fpv, pc *pb.Precondition) ([]*pb.Write, error) { + // Make sure there are no duplications or prefixes among the field paths. + var fps []FieldPath + for _, fpv := range fpvs { + fps = append(fps, fpv.fieldPath) + } + if err := checkNoDupOrPrefix(fps); err != nil { + return nil, err + } + + // Process each fpv. + var updatePaths, transformPaths []FieldPath + doc := &pb.Document{ + Name: d.Path, + Fields: map[string]*pb.Value{}, + } + for _, fpv := range fpvs { + switch fpv.value { + case Delete: + // Send the field path without a corresponding value. + updatePaths = append(updatePaths, fpv.fieldPath) + + case ServerTimestamp: + // Use the path in a transform operation. + transformPaths = append(transformPaths, fpv.fieldPath) + + default: + updatePaths = append(updatePaths, fpv.fieldPath) + // Convert the value to a proto and put it into the document. + v := reflect.ValueOf(fpv.value) + pv, sawServerTimestamp, err := toProtoValue(v) + if err != nil { + return nil, err + } + setAtPath(doc.Fields, fpv.fieldPath, pv) + // Also accumulate any serverTimestamp values within the value. + if sawServerTimestamp { + stps, err := extractTransformPaths(v, nil) + if err != nil { + return nil, err + } + for _, p := range stps { + transformPaths = append(transformPaths, fpv.fieldPath.concat(p)) + } + } + } + } + return d.newUpdateWithTransform(doc, updatePaths, pc, transformPaths, false), nil +} + +var requestTimeTransform = &pb.DocumentTransform_FieldTransform_SetToServerValue{ + pb.DocumentTransform_FieldTransform_REQUEST_TIME, +} + +// newUpdateWithTransform constructs operations for a commit. Most generally, it +// returns an update operation followed by a transform. +// +// If there are no serverTimestampPaths, the transform is omitted. +// +// If doc.Fields is empty, there are no updatePaths, and there is no precondition, +// the update is omitted, unless updateOnEmpty is true. +func (d *DocumentRef) newUpdateWithTransform(doc *pb.Document, updatePaths []FieldPath, pc *pb.Precondition, serverTimestampPaths []FieldPath, updateOnEmpty bool) []*pb.Write { + // Remove server timestamp fields from updatePaths. Those fields were removed + // from the document by toProtoDocument, so they should not be in the update + // mask. + // Note: this is technically O(n^2), but it is unlikely that there is + // more than one server timestamp path. + updatePaths = removePathsIf(updatePaths, func(fp FieldPath) bool { + return fp.in(serverTimestampPaths) + }) + var ws []*pb.Write + if updateOnEmpty || len(doc.Fields) > 0 || + len(updatePaths) > 0 || (pc != nil && len(serverTimestampPaths) == 0) { + var mask *pb.DocumentMask + if len(updatePaths) > 0 { + sfps := toServiceFieldPaths(updatePaths) + sort.Strings(sfps) // TODO(jba): make tests pass without this + mask = &pb.DocumentMask{FieldPaths: sfps} + } + w := &pb.Write{ + Operation: &pb.Write_Update{doc}, + UpdateMask: mask, + CurrentDocument: pc, + } + ws = append(ws, w) + pc = nil // If the precondition is in the write, we don't need it in the transform. + } + if len(serverTimestampPaths) > 0 || pc != nil { + ws = append(ws, d.newTransform(serverTimestampPaths, pc)) + } + return ws +} + +func (d *DocumentRef) newTransform(serverTimestampFieldPaths []FieldPath, pc *pb.Precondition) *pb.Write { + sort.Sort(byPath(serverTimestampFieldPaths)) // TODO(jba): make tests pass without this + var fts []*pb.DocumentTransform_FieldTransform + for _, p := range serverTimestampFieldPaths { + fts = append(fts, &pb.DocumentTransform_FieldTransform{ + FieldPath: p.toServiceFieldPath(), + TransformType: requestTimeTransform, + }) + } + return &pb.Write{ + Operation: &pb.Write_Transform{ + &pb.DocumentTransform{ + Document: d.Path, + FieldTransforms: fts, + // TODO(jba): should the transform have the same preconditions as the write? + }, + }, + CurrentDocument: pc, + } +} + +type sentinel int + +const ( + // Delete is used as a value in a call to Update or Set with merge to indicate + // that the corresponding key should be deleted. + Delete sentinel = iota + + // ServerTimestamp is used as a value in a call to Update to indicate that the + // key's value should be set to the time at which the server processed + // the request. + ServerTimestamp +) + +func (s sentinel) String() string { + switch s { + case Delete: + return "Delete" + case ServerTimestamp: + return "ServerTimestamp" + default: + return "" + } +} + +func isStructOrStructPtr(x interface{}) bool { + v := reflect.ValueOf(x) + if v.Kind() == reflect.Struct { + return true + } + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + return true + } + return false +} + +// An Update describes an update to a value referred to by a path. +// An Update should have either a non-empty Path or a non-empty FieldPath, +// but not both. +// +// See DocumentRef.Create for acceptable values. +// To delete a field, specify firestore.Delete as the value. +type Update struct { + Path string // Will be split on dots, and must not contain any of "Ëœ*/[]". + FieldPath FieldPath + Value interface{} +} + +// An fpv is a pair of validated FieldPath and value. +type fpv struct { + fieldPath FieldPath + value interface{} +} + +func (u *Update) process() (fpv, error) { + if (u.Path != "") == (u.FieldPath != nil) { + return fpv{}, fmt.Errorf("firestore: update %+v should have exactly one of Path or FieldPath", u) + } + fp := u.FieldPath + var err error + if fp == nil { + fp, err = parseDotSeparatedString(u.Path) + if err != nil { + return fpv{}, err + } + } + if err := fp.validate(); err != nil { + return fpv{}, err + } + return fpv{fp, u.Value}, nil +} + +// Update updates the document. The values at the given +// field paths are replaced, but other fields of the stored document are untouched. +func (d *DocumentRef) Update(ctx context.Context, updates []Update, preconds ...Precondition) (*WriteResult, error) { + ws, err := d.newUpdatePathWrites(updates, preconds) + if err != nil { + return nil, err + } + return d.Parent.c.commit1(ctx, ws) +} + +// Collections returns an interator over the immediate sub-collections of the document. +func (d *DocumentRef) Collections(ctx context.Context) *CollectionIterator { + client := d.Parent.c + it := &CollectionIterator{ + err: checkTransaction(ctx), + client: client, + parent: d, + it: client.c.ListCollectionIds( + withResourceHeader(ctx, client.path()), + &pb.ListCollectionIdsRequest{Parent: d.Path}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// CollectionIterator is an iterator over sub-collections of a document. +type CollectionIterator struct { + client *Client + parent *DocumentRef + it *vkit.StringIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*CollectionRef + err error +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *CollectionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if there +// are no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *CollectionIterator) Next() (*CollectionRef, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *CollectionIterator) fetch(pageSize int, pageToken string) (string, error) { + if it.err != nil { + return "", it.err + } + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + id, err := it.it.Next() + if err != nil { + return err + } + var cr *CollectionRef + if it.parent == nil { + cr = newTopLevelCollRef(it.client, it.client.path(), id) + } else { + cr = newCollRefWithParent(it.client, it.parent, id) + } + it.items = append(it.items, cr) + return nil + }) +} + +// GetAll returns all the collections remaining from the iterator. +func (it *CollectionIterator) GetAll() ([]*CollectionRef, error) { + var crs []*CollectionRef + for { + cr, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + crs = append(crs, cr) + } + return crs, nil +} + +// Common fetch code for iterators that are backed by vkit iterators. +// TODO(jba): dedup with same function in logging/logadmin. +func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func() error) (string, error) { + pi.MaxSize = pageSize + pi.Token = pageToken + // Get one item, which will fill the buffer. + if err := next(); err != nil { + return "", err + } + // Collect the rest of the buffer. + for pi.Remaining() > 0 { + if err := next(); err != nil { + return "", err + } + } + return pi.Token, nil +} + +// Snapshots returns an iterator over snapshots of the document. Each time the document +// changes or is added or deleted, a new snapshot will be generated. +func (d *DocumentRef) Snapshots(ctx context.Context) *DocumentSnapshotIterator { + return &DocumentSnapshotIterator{ + docref: d, + ws: newWatchStreamForDocument(ctx, d), + } +} + +// DocumentSnapshotIterator is an iterator over snapshots of a document. +// Call Next on the iterator to get a snapshot of the document each time it changes. +// Call Stop on the iterator when done. +// +// For an example, see DocumentRef.Snapshots. +type DocumentSnapshotIterator struct { + docref *DocumentRef + ws *watchStream +} + +// Next blocks until the document changes, then returns the DocumentSnapshot for +// the current state of the document. If the document has been deleted, Next +// returns a DocumentSnapshot whose Exists method returns false. +// +// Next never returns iterator.Done unless it is called after Stop. +func (it *DocumentSnapshotIterator) Next() (*DocumentSnapshot, error) { + btree, _, readTime, err := it.ws.nextSnapshot() + if err != nil { + if err == io.EOF { + err = iterator.Done + } + // watchStream's error is sticky, so SnapshotIterator does not need to remember it. + return nil, err + } + if btree.Len() == 0 { // document deleted + return &DocumentSnapshot{Ref: it.docref, ReadTime: readTime}, nil + } + snap, _ := btree.At(0) + return snap.(*DocumentSnapshot), nil +} + +// Stop stops receiving snapshots. +// You should always call Stop when you are done with an iterator, to free up resources. +// It is not safe to call Stop concurrently with Next. +func (it *DocumentSnapshotIterator) Stop() { + it.ws.stop() +} diff --git a/vendor/cloud.google.com/go/firestore/docref_test.go b/vendor/cloud.google.com/go/firestore/docref_test.go new file mode 100644 index 0000000..92d31fd --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/docref_test.go @@ -0,0 +1,312 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "reflect" + "sort" + "testing" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "golang.org/x/net/context" + "google.golang.org/genproto/googleapis/type/latlng" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + writeResultForSet = &WriteResult{UpdateTime: aTime} + commitResponseForSet = &pb.CommitResponse{ + WriteResults: []*pb.WriteResult{{UpdateTime: aTimestamp}}, + } +) + +func TestDocGet(t *testing.T) { + ctx := context.Background() + c, srv := newMock(t) + path := "projects/projectID/databases/(default)/documents/C/a" + pdoc := &pb.Document{ + Name: path, + CreateTime: aTimestamp, + UpdateTime: aTimestamp, + Fields: map[string]*pb.Value{"f": intval(1)}, + } + srv.addRPC(&pb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: []string{path}, + }, []interface{}{ + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{pdoc}, + ReadTime: aTimestamp2, + }, + }) + ref := c.Collection("C").Doc("a") + gotDoc, err := ref.Get(ctx) + if err != nil { + t.Fatal(err) + } + wantDoc := &DocumentSnapshot{ + Ref: ref, + CreateTime: aTime, + UpdateTime: aTime, + ReadTime: aTime2, + proto: pdoc, + c: c, + } + if !testEqual(gotDoc, wantDoc) { + t.Fatalf("\ngot %+v\nwant %+v", gotDoc, wantDoc) + } + + path2 := "projects/projectID/databases/(default)/documents/C/b" + srv.addRPC( + &pb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: []string{path2}, + }, []interface{}{ + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Missing{path2}, + ReadTime: aTimestamp3, + }, + }) + _, err = c.Collection("C").Doc("b").Get(ctx) + if grpc.Code(err) != codes.NotFound { + t.Errorf("got %v, want NotFound", err) + } +} + +func TestDocSet(t *testing.T) { + // Most tests for Set are in the cross-language tests. + ctx := context.Background() + c, srv := newMock(t) + + doc := c.Collection("C").Doc("d") + // Merge with a struct and FieldPaths. + srv.addRPC(&pb.CommitRequest{ + Database: "projects/projectID/databases/(default)", + Writes: []*pb.Write{ + { + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: "projects/projectID/databases/(default)/documents/C/d", + Fields: map[string]*pb.Value{ + "*": mapval(map[string]*pb.Value{ + "~": boolval(true), + }), + }, + }, + }, + UpdateMask: &pb.DocumentMask{FieldPaths: []string{"`*`.`~`"}}, + }, + }, + }, commitResponseForSet) + data := struct { + A map[string]bool `firestore:"*"` + }{A: map[string]bool{"~": true}} + wr, err := doc.Set(ctx, data, Merge([]string{"*", "~"})) + if err != nil { + t.Fatal(err) + } + if !testEqual(wr, writeResultForSet) { + t.Errorf("got %v, want %v", wr, writeResultForSet) + } + + // MergeAll cannot be used with structs. + _, err = doc.Set(ctx, data, MergeAll) + if err == nil { + t.Errorf("got nil, want error") + } +} + +func TestDocCreate(t *testing.T) { + // Verify creation with structs. In particular, make sure zero values + // are handled well. + // Other tests for Create are handled by the cross-language tests. + ctx := context.Background() + c, srv := newMock(t) + + type create struct { + Time time.Time + Bytes []byte + Geo *latlng.LatLng + } + srv.addRPC( + &pb.CommitRequest{ + Database: "projects/projectID/databases/(default)", + Writes: []*pb.Write{ + { + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: "projects/projectID/databases/(default)/documents/C/d", + Fields: map[string]*pb.Value{ + "Time": tsval(time.Time{}), + "Bytes": bytesval(nil), + "Geo": nullValue, + }, + }, + }, + CurrentDocument: &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{false}, + }, + }, + }, + }, + commitResponseForSet, + ) + _, err := c.Collection("C").Doc("d").Create(ctx, &create{}) + if err != nil { + t.Fatal(err) + } +} + +func TestDocDelete(t *testing.T) { + ctx := context.Background() + c, srv := newMock(t) + srv.addRPC( + &pb.CommitRequest{ + Database: "projects/projectID/databases/(default)", + Writes: []*pb.Write{ + {Operation: &pb.Write_Delete{"projects/projectID/databases/(default)/documents/C/d"}}, + }, + }, + &pb.CommitResponse{ + WriteResults: []*pb.WriteResult{{}}, + }) + wr, err := c.Collection("C").Doc("d").Delete(ctx) + if err != nil { + t.Fatal(err) + } + if !testEqual(wr, &WriteResult{}) { + t.Errorf("got %+v, want %+v", wr, writeResultForSet) + } +} + +var ( + testData = map[string]interface{}{"a": 1} + testFields = map[string]*pb.Value{"a": intval(1)} +) + +// Update is tested by the cross-language tests. + +func TestFPVsFromData(t *testing.T) { + type S struct{ X int } + + for _, test := range []struct { + in interface{} + want []fpv + }{ + { + in: nil, + want: []fpv{{nil, nil}}, + }, + { + in: map[string]interface{}{"a": nil}, + want: []fpv{{[]string{"a"}, nil}}, + }, + { + in: map[string]interface{}{"a": 1}, + want: []fpv{{[]string{"a"}, 1}}, + }, + { + in: map[string]interface{}{ + "a": 1, + "b": map[string]interface{}{"c": 2}, + }, + want: []fpv{{[]string{"a"}, 1}, {[]string{"b", "c"}, 2}}, + }, + { + in: map[string]interface{}{"s": &S{X: 3}}, + want: []fpv{{[]string{"s"}, &S{X: 3}}}, + }, + } { + var got []fpv + fpvsFromData(reflect.ValueOf(test.in), nil, &got) + sort.Sort(byFieldPath(got)) + if !testEqual(got, test.want) { + t.Errorf("%+v: got %v, want %v", test.in, got, test.want) + } + } +} + +type byFieldPath []fpv + +func (b byFieldPath) Len() int { return len(b) } +func (b byFieldPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byFieldPath) Less(i, j int) bool { return b[i].fieldPath.less(b[j].fieldPath) } + +func commitRequestForSet() *pb.CommitRequest { + return &pb.CommitRequest{ + Database: "projects/projectID/databases/(default)", + Writes: []*pb.Write{ + { + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: "projects/projectID/databases/(default)/documents/C/d", + Fields: testFields, + }, + }, + }, + }, + } +} + +func TestUpdateProcess(t *testing.T) { + for _, test := range []struct { + in Update + want fpv + wantErr bool + }{ + { + in: Update{Path: "a", Value: 1}, + want: fpv{fieldPath: []string{"a"}, value: 1}, + }, + { + in: Update{Path: "c.d", Value: Delete}, + want: fpv{fieldPath: []string{"c", "d"}, value: Delete}, + }, + { + in: Update{FieldPath: []string{"*", "~"}, Value: ServerTimestamp}, + want: fpv{fieldPath: []string{"*", "~"}, value: ServerTimestamp}, + }, + { + in: Update{Path: "*"}, + wantErr: true, // bad rune in path + }, + { + in: Update{Path: "a", FieldPath: []string{"b"}}, + wantErr: true, // both Path and FieldPath + }, + { + in: Update{Value: 1}, + wantErr: true, // neither Path nor FieldPath + }, + { + in: Update{FieldPath: []string{"", "a"}}, + wantErr: true, // empty FieldPath component + }, + } { + got, err := test.in.process() + if test.wantErr { + if err == nil { + t.Errorf("%+v: got nil, want error", test.in) + } + } else if err != nil { + t.Errorf("%+v: got error %v, want nil", test.in, err) + } else if !testEqual(got, test.want) { + t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/document.go b/vendor/cloud.google.com/go/firestore/document.go new file mode 100644 index 0000000..348327b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/document.go @@ -0,0 +1,301 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "reflect" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/golang/protobuf/ptypes" + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +// A DocumentSnapshot contains document data and metadata. +type DocumentSnapshot struct { + // The DocumentRef for this document. + Ref *DocumentRef + + // Read-only. The time at which the document was created. + // Increases monotonically when a document is deleted then + // recreated. It can also be compared to values from other documents and + // the read time of a query. + CreateTime time.Time + + // Read-only. The time at which the document was last changed. This value + // is initally set to CreateTime then increases monotonically with each + // change to the document. It can also be compared to values from other + // documents and the read time of a query. + UpdateTime time.Time + + // Read-only. The time at which the document was read. + ReadTime time.Time + + c *Client + proto *pb.Document +} + +// Exists reports whether the DocumentSnapshot represents an existing document. +// Even if Exists returns false, the Ref and ReadTime fields of the DocumentSnapshot +// are valid. +func (d *DocumentSnapshot) Exists() bool { + return d.proto != nil +} + +// Data returns the DocumentSnapshot's fields as a map. +// It is equivalent to +// var m map[string]interface{} +// d.DataTo(&m) +// except that it returns nil if the document does not exist. +func (d *DocumentSnapshot) Data() map[string]interface{} { + if !d.Exists() { + return nil + } + m, err := createMapFromValueMap(d.proto.Fields, d.c) + // Any error here is a bug in the client. + if err != nil { + panic(fmt.Sprintf("firestore: %v", err)) + } + return m +} + +// DataTo uses the document's fields to populate p, which can be a pointer to a +// map[string]interface{} or a pointer to a struct. +// +// Firestore field values are converted to Go values as follows: +// - Null converts to nil. +// - Bool converts to bool. +// - String converts to string. +// - Integer converts int64. When setting a struct field, any signed or unsigned +// integer type is permitted except uint64. Overflow is detected and results in +// an error. +// - Double converts to float64. When setting a struct field, float32 is permitted. +// Overflow is detected and results in an error. +// - Bytes is converted to []byte. +// - Timestamp converts to time.Time. +// - GeoPoint converts to latlng.LatLng, where latlng is the package +// "google.golang.org/genproto/googleapis/type/latlng". +// - Arrays convert to []interface{}. When setting a struct field, the field +// may be a slice or array of any type and is populated recursively. +// Slices are resized to the incoming value's size, while arrays that are too +// long have excess elements filled with zero values. If the array is too short, +// excess incoming values will be dropped. +// - Maps convert to map[string]interface{}. When setting a struct field, +// maps of key type string and any value type are permitted, and are populated +// recursively. +// - References are converted to DocumentRefs. +// +// Field names given by struct field tags are observed, as described in +// DocumentRef.Create. +// +// If the document does not exist, DataTo returns a NotFound error. +func (d *DocumentSnapshot) DataTo(p interface{}) error { + if !d.Exists() { + return status.Errorf(codes.NotFound, "document %s does not exist", d.Ref.Path) + } + return setFromProtoValue(p, &pb.Value{&pb.Value_MapValue{&pb.MapValue{d.proto.Fields}}}, d.c) +} + +// DataAt returns the data value denoted by path. +// +// The path argument can be a single field or a dot-separated sequence of +// fields, and must not contain any of the runes "Ëœ*/[]". Use DataAtPath instead for +// such a path. +// +// See DocumentSnapshot.DataTo for how Firestore values are converted to Go values. +// +// If the document does not exist, DataAt returns a NotFound error. +func (d *DocumentSnapshot) DataAt(path string) (interface{}, error) { + if !d.Exists() { + return nil, status.Errorf(codes.NotFound, "document %s does not exist", d.Ref.Path) + } + fp, err := parseDotSeparatedString(path) + if err != nil { + return nil, err + } + return d.DataAtPath(fp) +} + +// DataAtPath returns the data value denoted by the FieldPath fp. +// If the document does not exist, DataAtPath returns a NotFound error. +func (d *DocumentSnapshot) DataAtPath(fp FieldPath) (interface{}, error) { + if !d.Exists() { + return nil, status.Errorf(codes.NotFound, "document %s does not exist", d.Ref.Path) + } + v, err := valueAtPath(fp, d.proto.Fields) + if err != nil { + return nil, err + } + return createFromProtoValue(v, d.c) +} + +// valueAtPath returns the value of m referred to by fp. +func valueAtPath(fp FieldPath, m map[string]*pb.Value) (*pb.Value, error) { + for _, k := range fp[:len(fp)-1] { + v := m[k] + if v == nil { + return nil, fmt.Errorf("firestore: no field %q", k) + } + mv := v.GetMapValue() + if mv == nil { + return nil, fmt.Errorf("firestore: value for field %q is not a map", k) + } + m = mv.Fields + } + k := fp[len(fp)-1] + v := m[k] + if v == nil { + return nil, fmt.Errorf("firestore: no field %q", k) + } + return v, nil +} + +// toProtoDocument converts a Go value to a Document proto. +// Valid values are: map[string]T, struct, or pointer to a valid value. +// It also returns a list of field paths for DocumentTransform (server timestamp). +func toProtoDocument(x interface{}) (*pb.Document, []FieldPath, error) { + if x == nil { + return nil, nil, errors.New("firestore: nil document contents") + } + v := reflect.ValueOf(x) + pv, sawTransform, err := toProtoValue(v) + if err != nil { + return nil, nil, err + } + var fieldPaths []FieldPath + if sawTransform { + fieldPaths, err = extractTransformPaths(v, nil) + if err != nil { + return nil, nil, err + } + } + var fields map[string]*pb.Value + if pv != nil { + m := pv.GetMapValue() + if m == nil { + return nil, nil, fmt.Errorf("firestore: cannot covert value of type %T into a map", x) + } + fields = m.Fields + } + return &pb.Document{Fields: fields}, fieldPaths, nil +} + +func extractTransformPaths(v reflect.Value, prefix FieldPath) ([]FieldPath, error) { + switch v.Kind() { + case reflect.Map: + return extractTransformPathsFromMap(v, prefix) + case reflect.Struct: + return extractTransformPathsFromStruct(v, prefix) + case reflect.Ptr: + if v.IsNil() { + return nil, nil + } + return extractTransformPaths(v.Elem(), prefix) + case reflect.Interface: + if v.NumMethod() == 0 { // empty interface: recurse on its contents + return extractTransformPaths(v.Elem(), prefix) + } + return nil, nil + default: + return nil, nil + } +} + +func extractTransformPathsFromMap(v reflect.Value, prefix FieldPath) ([]FieldPath, error) { + var paths []FieldPath + for _, k := range v.MapKeys() { + sk := k.Interface().(string) // assume keys are strings; checked in toProtoValue + path := prefix.with(sk) + mi := v.MapIndex(k) + if mi.Interface() == ServerTimestamp { + paths = append(paths, path) + } else { + ps, err := extractTransformPaths(mi, path) + if err != nil { + return nil, err + } + paths = append(paths, ps...) + } + } + return paths, nil +} + +func extractTransformPathsFromStruct(v reflect.Value, prefix FieldPath) ([]FieldPath, error) { + var paths []FieldPath + fields, err := fieldCache.Fields(v.Type()) + if err != nil { + return nil, err + } + for _, f := range fields { + fv := v.FieldByIndex(f.Index) + path := prefix.with(f.Name) + opts := f.ParsedTag.(tagOptions) + if opts.serverTimestamp { + var isZero bool + switch f.Type { + case typeOfGoTime: + isZero = fv.Interface().(time.Time).IsZero() + case reflect.PtrTo(typeOfGoTime): + isZero = fv.IsNil() || fv.Elem().Interface().(time.Time).IsZero() + default: + return nil, fmt.Errorf("firestore: field %s of struct %s with serverTimestamp tag must be of type time.Time or *time.Time", + f.Name, v.Type()) + } + if isZero { + paths = append(paths, path) + } + } else { + ps, err := extractTransformPaths(fv, path) + if err != nil { + return nil, err + } + paths = append(paths, ps...) + } + } + return paths, nil +} + +func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client, readTime *tspb.Timestamp) (*DocumentSnapshot, error) { + d := &DocumentSnapshot{ + Ref: ref, + c: c, + proto: proto, + } + if proto != nil { + ts, err := ptypes.Timestamp(proto.CreateTime) + if err != nil { + return nil, err + } + d.CreateTime = ts + ts, err = ptypes.Timestamp(proto.UpdateTime) + if err != nil { + return nil, err + } + d.UpdateTime = ts + } + if readTime != nil { + ts, err := ptypes.Timestamp(readTime) + if err != nil { + return nil, err + } + d.ReadTime = ts + } + return d, nil +} diff --git a/vendor/cloud.google.com/go/firestore/document_test.go b/vendor/cloud.google.com/go/firestore/document_test.go new file mode 100644 index 0000000..982b41a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/document_test.go @@ -0,0 +1,239 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "reflect" + "sort" + "testing" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +func TestToProtoDocument(t *testing.T) { + type s struct{ I int } + + for _, test := range []struct { + in interface{} + want *pb.Document + wantErr bool + }{ + {nil, nil, true}, + {[]int{1}, nil, true}, + {map[string]int{"a": 1}, + &pb.Document{Fields: map[string]*pb.Value{"a": intval(1)}}, + false}, + {s{2}, &pb.Document{Fields: map[string]*pb.Value{"I": intval(2)}}, false}, + {&s{3}, &pb.Document{Fields: map[string]*pb.Value{"I": intval(3)}}, false}, + } { + got, _, gotErr := toProtoDocument(test.in) + if (gotErr != nil) != test.wantErr { + t.Errorf("%v: got error %v, want %t", test.in, gotErr, test.wantErr) + } + if gotErr != nil { + continue + } + if !testEqual(got, test.want) { + t.Errorf("%v: got %v, want %v", test.in, got, test.want) + } + } +} + +func TestNewDocumentSnapshot(t *testing.T) { + c := &Client{ + projectID: "projID", + databaseID: "(database)", + } + docRef := c.Doc("C/a") + in := &pb.Document{ + CreateTime: &tspb.Timestamp{Seconds: 10}, + UpdateTime: &tspb.Timestamp{Seconds: 20}, + Fields: map[string]*pb.Value{"a": intval(1)}, + } + want := &DocumentSnapshot{ + Ref: docRef, + CreateTime: time.Unix(10, 0).UTC(), + UpdateTime: time.Unix(20, 0).UTC(), + ReadTime: aTime, + proto: in, + c: c, + } + got, err := newDocumentSnapshot(docRef, in, c, aTimestamp) + if err != nil { + t.Fatal(err) + } + if !testEqual(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } +} + +func TestData(t *testing.T) { + doc := &DocumentSnapshot{ + proto: &pb.Document{ + Fields: map[string]*pb.Value{"a": intval(1), "b": strval("x")}, + }, + } + got := doc.Data() + want := map[string]interface{}{"a": int64(1), "b": "x"} + if !testEqual(got, want) { + t.Errorf("got %#v\nwant %#v", got, want) + } + var got2 map[string]interface{} + if err := doc.DataTo(&got2); err != nil { + t.Fatal(err) + } + if !testEqual(got2, want) { + t.Errorf("got %#v\nwant %#v", got2, want) + } + + type s struct { + A int + B string + } + var got3 s + if err := doc.DataTo(&got3); err != nil { + t.Fatal(err) + } + want2 := s{A: 1, B: "x"} + if !testEqual(got3, want2) { + t.Errorf("got %#v\nwant %#v", got3, want2) + } +} + +var testDoc = &DocumentSnapshot{ + proto: &pb.Document{ + Fields: map[string]*pb.Value{ + "a": intval(1), + "b": mapval(map[string]*pb.Value{ + "`": intval(2), + "~": mapval(map[string]*pb.Value{ + "x": intval(3), + }), + }), + }, + }, +} + +func TestDataAt(t *testing.T) { + for _, test := range []struct { + fieldPath string + want interface{} + }{ + {"a", int64(1)}, + {"b.`", int64(2)}, + } { + got, err := testDoc.DataAt(test.fieldPath) + if err != nil { + t.Errorf("%q: %v", test.fieldPath, err) + continue + } + if !testEqual(got, test.want) { + t.Errorf("%q: got %v, want %v", test.fieldPath, got, test.want) + } + } + + for _, bad := range []string{ + "c.~.x", // bad field path + "a.b", // "a" isn't a map + "z.b", // bad non-final key + "b.z", // bad final key + } { + _, err := testDoc.DataAt(bad) + if err == nil { + t.Errorf("%q: got nil, want error", bad) + } + } +} + +func TestDataAtPath(t *testing.T) { + for _, test := range []struct { + fieldPath FieldPath + want interface{} + }{ + {[]string{"a"}, int64(1)}, + {[]string{"b", "`"}, int64(2)}, + {[]string{"b", "~"}, map[string]interface{}{"x": int64(3)}}, + {[]string{"b", "~", "x"}, int64(3)}, + } { + got, err := testDoc.DataAtPath(test.fieldPath) + if err != nil { + t.Errorf("%v: %v", test.fieldPath, err) + continue + } + if !testEqual(got, test.want) { + t.Errorf("%v: got %v, want %v", test.fieldPath, got, test.want) + } + } + + for _, bad := range []FieldPath{ + []string{"c", "", "x"}, // bad field path + []string{"a", "b"}, // "a" isn't a map + []string{"z", "~"}, // bad non-final key + []string{"b", "z"}, // bad final key + } { + _, err := testDoc.DataAtPath(bad) + if err == nil { + t.Errorf("%v: got nil, want error", bad) + } + } +} + +func TestExtractTransformPaths(t *testing.T) { + type S struct { + A time.Time `firestore:",serverTimestamp"` + B time.Time `firestore:",serverTimestamp"` + C *time.Time `firestore:",serverTimestamp"` + D *time.Time `firestore:"d.d,serverTimestamp"` + E *time.Time `firestore:",serverTimestamp"` + F time.Time + G int + } + + m := map[string]interface{}{ + "x": 1, + "y": &S{ + // A is a zero time: included + B: aTime, // not a zero time: excluded + // C is nil: included + D: &time.Time{}, // pointer to a zero time: included + E: &aTime, // pointer to a non-zero time: excluded + // F is a zero time, but does not have the right tag: excluded + G: 15, // not a time.Time + }, + "z": map[string]interface{}{"w": ServerTimestamp}, + } + got, err := extractTransformPaths(reflect.ValueOf(m), nil) + if err != nil { + t.Fatal(err) + } + sort.Sort(byPath(got)) + want := []FieldPath{{"y", "A"}, {"y", "C"}, {"y", "d.d"}, {"z", "w"}} + if !testEqual(got, want) { + t.Errorf("got %#v, want %#v", got, want) + } +} + +func TestExtractTransformPathsErrors(t *testing.T) { + type S struct { + A int `firestore:",serverTimestamp"` + } + _, err := extractTransformPaths(reflect.ValueOf(S{}), nil) + if err == nil { + t.Error("got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/firestore/examples_test.go b/vendor/cloud.google.com/go/firestore/examples_test.go new file mode 100644 index 0000000..aacdc93 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/examples_test.go @@ -0,0 +1,528 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): add Output comments to examples when feasible. + +package firestore_test + +import ( + "fmt" + + "cloud.google.com/go/firestore" + "golang.org/x/net/context" + + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() // Close client when done. + _ = client // TODO: Use client. +} + +func ExampleClient_Collection() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + coll1 := client.Collection("States") + coll2 := client.Collection("States/NewYork/Cities") + fmt.Println(coll1, coll2) +} + +func ExampleClient_Doc() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + doc1 := client.Doc("States/NewYork") + doc2 := client.Doc("States/NewYork/Cities/Albany") + fmt.Println(doc1, doc2) +} + +func ExampleClient_GetAll() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + docs, err := client.GetAll(ctx, []*firestore.DocumentRef{ + client.Doc("States/NorthCarolina"), + client.Doc("States/SouthCarolina"), + client.Doc("States/WestCarolina"), + client.Doc("States/EastCarolina"), + }) + if err != nil { + // TODO: Handle error. + } + // docs is a slice with four DocumentSnapshots, but the last two are + // nil because there is no West or East Carolina. + fmt.Println(docs) +} + +func ExampleClient_Batch() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + b := client.Batch() + _ = b // TODO: Use batch. +} + +func ExampleWriteBatch_Commit() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + + ny := client.Doc("States/NewYork") + ca := client.Doc("States/California") + + writeResults, err := client.Batch(). + Create(ny, State{Capital: "Albany", Population: 19.8}). + Set(ca, State{Capital: "Sacramento", Population: 39.14}). + Delete(client.Doc("States/WestDakota")). + Commit(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(writeResults) +} + +func ExampleCollectionRef_Add() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + doc, wr, err := client.Collection("Users").Add(ctx, map[string]interface{}{ + "name": "Alice", + "email": "aj@example.com", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(doc, wr) +} + +func ExampleCollectionRef_Doc() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + fl := client.Collection("States").Doc("Florida") + ta := client.Collection("States").Doc("Florida/Cities/Tampa") + + fmt.Println(fl, ta) +} + +func ExampleCollectionRef_NewDoc() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + doc := client.Collection("Users").NewDoc() + + fmt.Println(doc) +} + +func ExampleDocumentRef_Collection() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + mi := client.Collection("States").Doc("Michigan") + cities := mi.Collection("Cities") + + fmt.Println(cities) +} + +func ExampleDocumentRef_Create_map() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + wr, err := client.Doc("States/Colorado").Create(ctx, map[string]interface{}{ + "capital": "Denver", + "pop": 5.5, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr.UpdateTime) +} + +func ExampleDocumentRef_Create_struct() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + + wr, err := client.Doc("States/Colorado").Create(ctx, State{ + Capital: "Denver", + Population: 5.5, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr.UpdateTime) +} + +func ExampleDocumentRef_Set() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + // Overwrite the document with the given data. Any other fields currently + // in the document will be removed. + wr, err := client.Doc("States/Alabama").Set(ctx, map[string]interface{}{ + "capital": "Montgomery", + "pop": 4.9, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr.UpdateTime) +} + +func ExampleDocumentRef_Set_merge() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + // Overwrite only the fields in the map; preserve all others. + _, err = client.Doc("States/Alabama").Set(ctx, map[string]interface{}{ + "pop": 5.2, + }, firestore.MergeAll) + if err != nil { + // TODO: Handle error. + } + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + + // To do a merging Set with struct data, specify the exact fields to overwrite. + // MergeAll is disallowed here, because it would probably be a mistake: the "capital" + // field would be overwritten with the empty string. + _, err = client.Doc("States/Alabama").Set(ctx, State{Population: 5.2}, firestore.Merge([]string{"pop"})) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDocumentRef_Update() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + tenn := client.Doc("States/Tennessee") + wr, err := tenn.Update(ctx, []firestore.Update{ + {Path: "pop", Value: 6.6}, + {FieldPath: []string{".", "*", "/"}, Value: "odd"}, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr.UpdateTime) +} + +func ExampleDocumentRef_Delete() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + // Oops, Ontario is a Canadian province... + if _, err = client.Doc("States/Ontario").Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleDocumentRef_Get() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + _ = docsnap // TODO: Use DocumentSnapshot. +} + +func ExampleDocumentRef_Snapshots() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + iter := client.Doc("States/Idaho").Snapshots(ctx) + defer iter.Stop() + for { + docsnap, err := iter.Next() + if err != nil { + // TODO: Handle error. + } + _ = docsnap // TODO: Use DocumentSnapshot. + } +} + +func ExampleDocumentSnapshot_Data() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + ohioMap := docsnap.Data() + fmt.Println(ohioMap["capital"]) +} + +func ExampleDocumentSnapshot_DataAt() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + cap, err := docsnap.DataAt("capital") + if err != nil { + // TODO: Handle error. + } + fmt.Println(cap) +} + +func ExampleDocumentSnapshot_DataAtPath() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + pop, err := docsnap.DataAtPath([]string{"capital", "population"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(pop) +} + +func ExampleDocumentSnapshot_DataTo() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + + var s State + if err := docsnap.DataTo(&s); err != nil { + // TODO: Handle error. + } + fmt.Println(s) +} + +func ExampleQuery_Documents() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + q := client.Collection("States").Select("pop"). + Where("pop", ">", 10). + OrderBy("pop", firestore.Desc). + Limit(10) + iter1 := q.Documents(ctx) + _ = iter1 // TODO: Use iter1. + + // You can call Documents directly on a CollectionRef as well. + iter2 := client.Collection("States").Documents(ctx) + _ = iter2 // TODO: Use iter2. +} + +// This example is just like the one above, but illustrates +// how to use the XXXPath methods of Query for field paths +// that can't be expressed as a dot-separated string. +func ExampleQuery_Documents_path_methods() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + q := client.Collection("Unusual").SelectPaths([]string{"*"}, []string{"[~]"}). + WherePath([]string{"/"}, ">", 10). + OrderByPath([]string{"/"}, firestore.Desc). + Limit(10) + iter1 := q.Documents(ctx) + _ = iter1 // TODO: Use iter1. + + // You can call Documents directly on a CollectionRef as well. + iter2 := client.Collection("States").Documents(ctx) + _ = iter2 // TODO: Use iter2. +} + +func ExampleDocumentIterator_Next() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + q := client.Collection("States"). + Where("pop", ">", 10). + OrderBy("pop", firestore.Desc) + iter := q.Documents(ctx) + defer iter.Stop() + for { + doc, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(doc.Data()) + } +} + +func ExampleDocumentIterator_GetAll() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + q := client.Collection("States"). + Where("pop", ">", 10). + OrderBy("pop", firestore.Desc). + Limit(10) // a good idea with GetAll, to avoid filling memory + docs, err := q.Documents(ctx).GetAll() + if err != nil { + // TODO: Handle error. + } + for _, doc := range docs { + fmt.Println(doc.Data()) + } +} + +func ExampleClient_RunTransaction() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + nm := client.Doc("States/NewMexico") + err = client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error { + doc, err := tx.Get(nm) // tx.Get, NOT nm.Get! + if err != nil { + return err + } + pop, err := doc.DataAt("pop") + if err != nil { + return err + } + return tx.Update(nm, []firestore.Update{{Path: "pop", Value: pop.(float64) + 0.2}}) + }) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/firestore/fieldpath.go b/vendor/cloud.google.com/go/firestore/fieldpath.go new file mode 100644 index 0000000..5e24f49 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/fieldpath.go @@ -0,0 +1,275 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "sort" + "strings" + + "cloud.google.com/go/internal/atomiccache" + "cloud.google.com/go/internal/fields" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +// A FieldPath is a non-empty sequence of non-empty fields that reference a value. +// +// A FieldPath value should only be necessary if one of the field names contains +// one of the runes ".Ëœ*/[]". Most methods accept a simpler form of field path +// as a string in which the individual fields are separated by dots. +// For example, +// []string{"a", "b"} +// is equivalent to the string form +// "a.b" +// but +// []string{"*"} +// has no equivalent string form. +type FieldPath []string + +// parseDotSeparatedString constructs a FieldPath from a string that separates +// path components with dots. Other than splitting at dots and checking for invalid +// characters, it ignores everything else about the string, +// including attempts to quote field path compontents. So "a.`b.c`.d" is parsed into +// four parts, "a", "`b", "c`" and "d". +func parseDotSeparatedString(s string) (FieldPath, error) { + const invalidRunes = "~*/[]" + if strings.ContainsAny(s, invalidRunes) { + return nil, fmt.Errorf("firestore: %q contains an invalid rune (one of %s)", s, invalidRunes) + } + fp := FieldPath(strings.Split(s, ".")) + if err := fp.validate(); err != nil { + return nil, err + } + return fp, nil +} + +func (fp1 FieldPath) equal(fp2 FieldPath) bool { + if len(fp1) != len(fp2) { + return false + } + for i, c1 := range fp1 { + if c1 != fp2[i] { + return false + } + } + return true +} + +func (fp1 FieldPath) prefixOf(fp2 FieldPath) bool { + return len(fp1) <= len(fp2) && fp1.equal(fp2[:len(fp1)]) +} + +// Lexicographic ordering. +func (fp1 FieldPath) less(fp2 FieldPath) bool { + for i := range fp1 { + switch { + case i >= len(fp2): + return false + case fp1[i] < fp2[i]: + return true + case fp1[i] > fp2[i]: + return false + } + } + // fp1 and fp2 are equal up to len(fp1). + return len(fp1) < len(fp2) +} + +// validate checks the validity of fp and returns an error if it is invalid. +func (fp FieldPath) validate() error { + if len(fp) == 0 { + return errors.New("firestore: empty field path") + } + for _, c := range fp { + if len(c) == 0 { + return errors.New("firestore: empty component in field path") + } + } + return nil +} + +// with creates a new FieldPath consisting of fp followed by k. +func (fp FieldPath) with(k string) FieldPath { + r := make(FieldPath, len(fp), len(fp)+1) + copy(r, fp) + return append(r, k) +} + +// concat creates a new FieldPath consisting of fp1 followed by fp2. +func (fp1 FieldPath) concat(fp2 FieldPath) FieldPath { + r := make(FieldPath, len(fp1)+len(fp2)) + copy(r, fp1) + copy(r[len(fp1):], fp2) + return r +} + +// in reports whether fp is equal to one of the fps. +func (fp FieldPath) in(fps []FieldPath) bool { + for _, e := range fps { + if fp.equal(e) { + return true + } + } + return false +} + +// checkNoDupOrPrefix checks whether any FieldPath is a prefix of (or equal to) +// another. +// It modifies the order of FieldPaths in its argument (via sorting). +func checkNoDupOrPrefix(fps []FieldPath) error { + // Sort fps lexicographically. + sort.Sort(byPath(fps)) + // Check adjacent pairs for prefix. + for i := 1; i < len(fps); i++ { + if fps[i-1].prefixOf(fps[i]) { + return fmt.Errorf("field path %v cannot be used in the same update as %v", fps[i-1], fps[i]) + } + } + return nil +} + +type byPath []FieldPath + +func (b byPath) Len() int { return len(b) } +func (b byPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPath) Less(i, j int) bool { return b[i].less(b[j]) } + +// setAtPath sets val at the location in m specified by fp, creating sub-maps as +// needed. m must not be nil. fp is assumed to be valid. +func setAtPath(m map[string]*pb.Value, fp FieldPath, val *pb.Value) { + if val == nil { + return + } + if len(fp) == 1 { + m[fp[0]] = val + } else { + v, ok := m[fp[0]] + if !ok { + v = &pb.Value{&pb.Value_MapValue{&pb.MapValue{map[string]*pb.Value{}}}} + m[fp[0]] = v + } + // The type assertion below cannot fail, because setAtPath is only called + // with either an empty map or one filled by setAtPath itself, and the + // set of FieldPaths it is called with has been checked to make sure that + // no path is the prefix of any other. + setAtPath(v.GetMapValue().Fields, fp[1:], val) + } +} + +// getAtPath gets the value in data referred to by fp. The data argument can +// be a map or a struct. +// Compare with valueAtPath, which does the same thing for a document. +func getAtPath(v reflect.Value, fp FieldPath) (interface{}, error) { + var err error + for _, k := range fp { + v, err = getAtField(v, k) + if err != nil { + return nil, err + } + } + return v.Interface(), nil +} + +// getAtField returns the equivalent of v[k], if v is a map, or v.k if v is a struct. +func getAtField(v reflect.Value, k string) (reflect.Value, error) { + switch v.Kind() { + case reflect.Map: + if r := v.MapIndex(reflect.ValueOf(k)); r.IsValid() { + return r, nil + } + + case reflect.Struct: + fm, err := fieldMap(v.Type()) + if err != nil { + return reflect.Value{}, err + } + if f, ok := fm[k]; ok { + return v.FieldByIndex(f.Index), nil + } + + case reflect.Interface: + return getAtField(v.Elem(), k) + + case reflect.Ptr: + return getAtField(v.Elem(), k) + } + return reflect.Value{}, fmt.Errorf("firestore: no field %q for value %#v", k, v) +} + +// fieldMapCache holds maps from from Firestore field name to struct field, +// keyed by struct type. +// TODO(jba): replace with sync.Map for Go 1.9. +var fieldMapCache atomiccache.Cache + +func fieldMap(t reflect.Type) (map[string]fields.Field, error) { + x := fieldMapCache.Get(t, func() interface{} { + fieldList, err := fieldCache.Fields(t) + if err != nil { + return err + } + m := map[string]fields.Field{} + for _, f := range fieldList { + m[f.Name] = f + } + return m + }) + if err, ok := x.(error); ok { + return nil, err + } + return x.(map[string]fields.Field), nil +} + +// toServiceFieldPath converts fp the form required by the Firestore service. +// It assumes fp has been validated. +func (fp FieldPath) toServiceFieldPath() string { + cs := make([]string, len(fp)) + for i, c := range fp { + cs[i] = toServiceFieldPathComponent(c) + } + return strings.Join(cs, ".") +} + +func toServiceFieldPaths(fps []FieldPath) []string { + var sfps []string + for _, fp := range fps { + sfps = append(sfps, fp.toServiceFieldPath()) + } + return sfps +} + +// Google SQL syntax for an unquoted field. +var unquotedFieldRegexp = regexp.MustCompile("^[A-Za-z_][A-Za-z_0-9]*$") + +// toServiceFieldPathComponent returns a string that represents key and is a valid +// field path component. +func toServiceFieldPathComponent(key string) string { + if unquotedFieldRegexp.MatchString(key) { + return key + } + var buf bytes.Buffer + buf.WriteRune('`') + for _, r := range key { + if r == '`' || r == '\\' { + buf.WriteRune('\\') + } + buf.WriteRune(r) + } + buf.WriteRune('`') + return buf.String() +} diff --git a/vendor/cloud.google.com/go/firestore/fieldpath_test.go b/vendor/cloud.google.com/go/firestore/fieldpath_test.go new file mode 100644 index 0000000..7c15a00 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/fieldpath_test.go @@ -0,0 +1,232 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "reflect" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" +) + +func TestFieldPathValidate(t *testing.T) { + for _, in := range [][]string{nil, []string{}, []string{"a", "", "b"}} { + if err := FieldPath(in).validate(); err == nil { + t.Errorf("%v: want error, got nil", in) + } + } +} + +func TestFieldPathLess(t *testing.T) { + for _, test := range []struct { + in1, in2 string + want bool + }{ + {"a b", "a b", false}, + {"a", "b", true}, + {"b", "a", false}, + {"a", "a b", true}, + {"a b", "a", false}, + {"a b c", "a b d", true}, + {"a b d", "a b c", false}, + } { + fp1 := FieldPath(strings.Fields(test.in1)) + fp2 := FieldPath(strings.Fields(test.in2)) + got := fp1.less(fp2) + if got != test.want { + t.Errorf("%q.less(%q): got %t, want %t", test.in1, test.in2, got, test.want) + } + } +} + +func TestCheckForPrefix(t *testing.T) { + for _, test := range []struct { + in []string // field paths as space-separated strings + wantErr bool + }{ + {in: []string{"a", "b", "c"}, wantErr: false}, + {in: []string{"a b", "b", "c d"}, wantErr: false}, + {in: []string{"a b", "a c", "a d"}, wantErr: false}, + {in: []string{"a b", "b", "b d"}, wantErr: true}, + {in: []string{"a b", "b", "b d"}, wantErr: true}, + {in: []string{"b c d", "c d", "b c"}, wantErr: true}, + } { + var fps []FieldPath + for _, s := range test.in { + fps = append(fps, strings.Fields(s)) + } + err := checkNoDupOrPrefix(fps) + if got, want := (err != nil), test.wantErr; got != want { + t.Errorf("%#v: got '%v', want %t", test.in, err, want) + } + } +} + +func TestGetAtPath(t *testing.T) { + type S struct { + X int + Y int `firestore:"y"` + M map[string]interface{} + Next *S + } + + const fail = "ERROR" // value for expected error + + for _, test := range []struct { + val interface{} + fp FieldPath + want interface{} + }{ + { + val: map[string]int(nil), + fp: nil, + want: map[string]int(nil), + }, + { + val: 1, + fp: nil, + want: 1, + }, + { + val: 1, + fp: []string{"a"}, + want: fail, + }, + { + val: map[string]int{"a": 2}, + fp: []string{"a"}, + want: 2, + }, + { + val: map[string]int{"a": 2}, + fp: []string{"b"}, + want: fail, + }, + { + val: map[string]interface{}{"a": map[string]int{"b": 3}}, + fp: []string{"a", "b"}, + want: 3, + }, + { + val: map[string]interface{}{"a": map[string]int{"b": 3}}, + fp: []string{"a", "b", "c"}, + want: fail, + }, + { + val: S{X: 1, Y: 2}, + fp: nil, + want: S{X: 1, Y: 2}, + }, + { + val: S{X: 1, Y: 2}, + fp: []string{"X"}, + want: 1, + }, + { + val: S{X: 1, Y: 2}, + fp: []string{"Y"}, + want: fail, // because Y is tagged with name "y" + }, + { + val: S{X: 1, Y: 2}, + fp: []string{"y"}, + want: 2, + }, + { + val: &S{X: 1}, + fp: []string{"X"}, + want: 1, + }, + { + val: &S{X: 1, Next: nil}, + fp: []string{"Next"}, + want: (*S)(nil), + }, + { + val: &S{X: 1, Next: nil}, + fp: []string{"Next", "Next"}, + want: fail, + }, + { + val: map[string]S{"a": S{X: 1, Y: 2}}, + fp: []string{"a", "y"}, + want: 2, + }, + { + val: map[string]S{"a": S{X: 1, Y: 2}}, + fp: []string{"a", "z"}, + want: fail, + }, + { + val: map[string]*S{ + "a": &S{ + M: map[string]interface{}{ + "b": S{ + Next: &S{ + X: 17, + }, + }, + }, + }, + }, + fp: []string{"a", "M", "b", "Next", "X"}, + want: 17, + }, + } { + got, err := getAtPath(reflect.ValueOf(test.val), test.fp) + if err != nil && test.want != fail { + t.Errorf("%+v: got error <%v>, want nil", test, err) + } + if err == nil && !testutil.Equal(got, test.want) { + t.Errorf("%+v: got %v, want %v, want nil", test, got, test.want) + } + } +} + +func TestToServiceFieldPath(t *testing.T) { + for _, test := range []struct { + in FieldPath + want string + }{ + {[]string{"a"}, "a"}, + {[]string{"a", "b"}, "a.b"}, + {[]string{"a.", "[b*", "c2"}, "`a.`.`[b*`.c2"}, + {[]string{"`a", `b\`}, "`\\`a`.`b\\\\`"}, + } { + got := test.in.toServiceFieldPath() + if got != test.want { + t.Errorf("%v: got %s, want %s", test.in, got, test.want) + } + } +} + +func TestToServiceFieldPathComponent(t *testing.T) { + for _, test := range []struct { + in, want string + }{ + {"", "``"}, + {"clam_chowder23", "clam_chowder23"}, + {"23skidoo", "`23skidoo`"}, + {"bak`tik", "`bak\\`tik`"}, + {"a\\b", "`a\\\\b`"}, + {"dots.are.confusing", "`dots.are.confusing`"}, + } { + got := toServiceFieldPathComponent(test.in) + if got != test.want { + t.Errorf("%q: got %q, want %q", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/from_value.go b/vendor/cloud.google.com/go/firestore/from_value.go new file mode 100644 index 0000000..e3d79bf --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/from_value.go @@ -0,0 +1,431 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "reflect" + "strings" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" +) + +func setFromProtoValue(x interface{}, vproto *pb.Value, c *Client) error { + v := reflect.ValueOf(x) + if v.Kind() != reflect.Ptr || v.IsNil() { + return errors.New("firestore: nil or not a pointer") + } + return setReflectFromProtoValue(v.Elem(), vproto, c) +} + +// setReflectFromProtoValue sets v from a Firestore Value. +// v must be a settable value. +func setReflectFromProtoValue(v reflect.Value, vproto *pb.Value, c *Client) error { + typeErr := func() error { + return fmt.Errorf("firestore: cannot set type %s to %s", v.Type(), typeString(vproto)) + } + + val := vproto.ValueType + // A Null value sets anything nullable to nil, and has no effect + // on anything else. + if _, ok := val.(*pb.Value_NullValue); ok { + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + } + return nil + } + + // Handle special types first. + switch v.Type() { + case typeOfByteSlice: + x, ok := val.(*pb.Value_BytesValue) + if !ok { + return typeErr() + } + v.SetBytes(x.BytesValue) + return nil + + case typeOfGoTime: + x, ok := val.(*pb.Value_TimestampValue) + if !ok { + return typeErr() + } + t, err := ptypes.Timestamp(x.TimestampValue) + if err != nil { + return err + } + v.Set(reflect.ValueOf(t)) + return nil + + case typeOfProtoTimestamp: + x, ok := val.(*pb.Value_TimestampValue) + if !ok { + return typeErr() + } + v.Set(reflect.ValueOf(x.TimestampValue)) + return nil + + case typeOfLatLng: + x, ok := val.(*pb.Value_GeoPointValue) + if !ok { + return typeErr() + } + v.Set(reflect.ValueOf(x.GeoPointValue)) + return nil + + case typeOfDocumentRef: + x, ok := val.(*pb.Value_ReferenceValue) + if !ok { + return typeErr() + } + dr, err := pathToDoc(x.ReferenceValue, c) + if err != nil { + return err + } + v.Set(reflect.ValueOf(dr)) + return nil + } + + switch v.Kind() { + case reflect.Bool: + x, ok := val.(*pb.Value_BooleanValue) + if !ok { + return typeErr() + } + v.SetBool(x.BooleanValue) + + case reflect.String: + x, ok := val.(*pb.Value_StringValue) + if !ok { + return typeErr() + } + v.SetString(x.StringValue) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + switch x := val.(type) { + case *pb.Value_IntegerValue: + i = x.IntegerValue + case *pb.Value_DoubleValue: + f := x.DoubleValue + i = int64(f) + if float64(i) != f { + return fmt.Errorf("firestore: float %f does not fit into %s", f, v.Type()) + } + default: + return typeErr() + } + if v.OverflowInt(i) { + return overflowErr(v, i) + } + v.SetInt(i) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + var u uint64 + switch x := val.(type) { + case *pb.Value_IntegerValue: + u = uint64(x.IntegerValue) + case *pb.Value_DoubleValue: + f := x.DoubleValue + u = uint64(f) + if float64(u) != f { + return fmt.Errorf("firestore: float %f does not fit into %s", f, v.Type()) + } + default: + return typeErr() + } + if v.OverflowUint(u) { + return overflowErr(v, u) + } + v.SetUint(u) + + case reflect.Float32, reflect.Float64: + var f float64 + switch x := val.(type) { + case *pb.Value_DoubleValue: + f = x.DoubleValue + case *pb.Value_IntegerValue: + f = float64(x.IntegerValue) + if int64(f) != x.IntegerValue { + return overflowErr(v, x.IntegerValue) + } + default: + return typeErr() + } + if v.OverflowFloat(f) { + return overflowErr(v, f) + } + v.SetFloat(f) + + case reflect.Slice: + x, ok := val.(*pb.Value_ArrayValue) + if !ok { + return typeErr() + } + vals := x.ArrayValue.Values + vlen := v.Len() + xlen := len(vals) + // Make a slice of the right size, avoiding allocation if possible. + switch { + case vlen < xlen: + v.Set(reflect.MakeSlice(v.Type(), xlen, xlen)) + case vlen > xlen: + v.SetLen(xlen) + } + return populateRepeated(v, vals, xlen, c) + + case reflect.Array: + x, ok := val.(*pb.Value_ArrayValue) + if !ok { + return typeErr() + } + vals := x.ArrayValue.Values + xlen := len(vals) + vlen := v.Len() + minlen := vlen + // Set extra elements to their zero value. + if vlen > xlen { + z := reflect.Zero(v.Type().Elem()) + for i := xlen; i < vlen; i++ { + v.Index(i).Set(z) + } + minlen = xlen + } + return populateRepeated(v, vals, minlen, c) + + case reflect.Map: + x, ok := val.(*pb.Value_MapValue) + if !ok { + return typeErr() + } + return populateMap(v, x.MapValue.Fields, c) + + case reflect.Ptr: + // If the pointer is nil, set it to a zero value. + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return setReflectFromProtoValue(v.Elem(), vproto, c) + + case reflect.Struct: + x, ok := val.(*pb.Value_MapValue) + if !ok { + return typeErr() + } + return populateStruct(v, x.MapValue.Fields, c) + + case reflect.Interface: + if v.NumMethod() == 0 { // empty interface + // If v holds a pointer, set the pointer. + if !v.IsNil() && v.Elem().Kind() == reflect.Ptr { + return setReflectFromProtoValue(v.Elem(), vproto, c) + } + // Otherwise, create a fresh value. + x, err := createFromProtoValue(vproto, c) + if err != nil { + return err + } + v.Set(reflect.ValueOf(x)) + return nil + } + // Any other kind of interface is an error. + fallthrough + + default: + return fmt.Errorf("firestore: cannot set type %s", v.Type()) + } + return nil +} + +// populateRepeated sets the first n elements of vr, which must be a slice or +// array, to the corresponding elements of vals. +func populateRepeated(vr reflect.Value, vals []*pb.Value, n int, c *Client) error { + for i := 0; i < n; i++ { + if err := setReflectFromProtoValue(vr.Index(i), vals[i], c); err != nil { + return err + } + } + return nil +} + +// populateMap sets the elements of vm, which must be a map, from the +// corresponding elements of pm. +// +// Since a map value is not settable, this function always creates a new +// element for each corresponding map key. Existing values of vm are +// overwritten. This happens even if the map value is something like a pointer +// to a struct, where we could in theory populate the existing struct value +// instead of discarding it. This behavior matches encoding/json. +func populateMap(vm reflect.Value, pm map[string]*pb.Value, c *Client) error { + t := vm.Type() + if t.Key().Kind() != reflect.String { + return errors.New("firestore: map key type is not string") + } + if vm.IsNil() { + vm.Set(reflect.MakeMap(t)) + } + et := t.Elem() + for k, vproto := range pm { + el := reflect.New(et).Elem() + if err := setReflectFromProtoValue(el, vproto, c); err != nil { + return err + } + vm.SetMapIndex(reflect.ValueOf(k), el) + } + return nil +} + +// createMapFromValueMap creates a fresh map and populates it with pm. +func createMapFromValueMap(pm map[string]*pb.Value, c *Client) (map[string]interface{}, error) { + m := map[string]interface{}{} + for k, pv := range pm { + v, err := createFromProtoValue(pv, c) + if err != nil { + return nil, err + } + m[k] = v + } + return m, nil +} + +// populateStruct sets the fields of vs, which must be a struct, from +// the matching elements of pm. +func populateStruct(vs reflect.Value, pm map[string]*pb.Value, c *Client) error { + fields, err := fieldCache.Fields(vs.Type()) + if err != nil { + return err + } + for k, vproto := range pm { + f := fields.Match(k) + if f == nil { + continue + } + if err := setReflectFromProtoValue(vs.FieldByIndex(f.Index), vproto, c); err != nil { + return fmt.Errorf("%s.%s: %v", vs.Type(), f.Name, err) + } + } + return nil +} + +func createFromProtoValue(vproto *pb.Value, c *Client) (interface{}, error) { + switch v := vproto.ValueType.(type) { + case *pb.Value_NullValue: + return nil, nil + case *pb.Value_BooleanValue: + return v.BooleanValue, nil + case *pb.Value_IntegerValue: + return v.IntegerValue, nil + case *pb.Value_DoubleValue: + return v.DoubleValue, nil + case *pb.Value_TimestampValue: + return ptypes.Timestamp(v.TimestampValue) + case *pb.Value_StringValue: + return v.StringValue, nil + case *pb.Value_BytesValue: + return v.BytesValue, nil + case *pb.Value_ReferenceValue: + return pathToDoc(v.ReferenceValue, c) + case *pb.Value_GeoPointValue: + return v.GeoPointValue, nil + + case *pb.Value_ArrayValue: + vals := v.ArrayValue.Values + ret := make([]interface{}, len(vals)) + for i, v := range vals { + r, err := createFromProtoValue(v, c) + if err != nil { + return nil, err + } + ret[i] = r + } + return ret, nil + + case *pb.Value_MapValue: + fields := v.MapValue.Fields + ret := make(map[string]interface{}, len(fields)) + for k, v := range fields { + r, err := createFromProtoValue(v, c) + if err != nil { + return nil, err + } + ret[k] = r + } + return ret, nil + + default: + return nil, fmt.Errorf("firestore: unknown value type %T", v) + } +} + +// Convert a document path to a DocumentRef. +func pathToDoc(docPath string, c *Client) (*DocumentRef, error) { + projID, dbID, docIDs, err := parseDocumentPath(docPath) + if err != nil { + return nil, err + } + parentResourceName := fmt.Sprintf("projects/%s/databases/%s", projID, dbID) + _, doc := c.idsToRef(docIDs, parentResourceName) + return doc, nil +} + +// A document path should be of the form "projects/P/databases/D/documents/coll1/doc1/coll2/doc2/...". +func parseDocumentPath(path string) (projectID, databaseID string, docPath []string, err error) { + parts := strings.Split(path, "/") + if len(parts) < 6 || parts[0] != "projects" || parts[2] != "databases" || parts[4] != "documents" { + return "", "", nil, fmt.Errorf("firestore: malformed document path %q", path) + } + docp := parts[5:] + if len(docp)%2 != 0 { + return "", "", nil, fmt.Errorf("firestore: path %q refers to collection, not document", path) + } + return parts[1], parts[3], docp, nil +} + +func typeString(vproto *pb.Value) string { + switch vproto.ValueType.(type) { + case *pb.Value_NullValue: + return "null" + case *pb.Value_BooleanValue: + return "bool" + case *pb.Value_IntegerValue: + return "int" + case *pb.Value_DoubleValue: + return "float" + case *pb.Value_TimestampValue: + return "timestamp" + case *pb.Value_StringValue: + return "string" + case *pb.Value_BytesValue: + return "bytes" + case *pb.Value_ReferenceValue: + return "reference" + case *pb.Value_GeoPointValue: + return "GeoPoint" + case *pb.Value_MapValue: + return "map" + case *pb.Value_ArrayValue: + return "array" + default: + return "" + } +} + +func overflowErr(v reflect.Value, x interface{}) error { + return fmt.Errorf("firestore: value %v overflows type %s", x, v.Type()) +} diff --git a/vendor/cloud.google.com/go/firestore/from_value_test.go b/vendor/cloud.google.com/go/firestore/from_value_test.go new file mode 100644 index 0000000..1327fb4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/from_value_test.go @@ -0,0 +1,550 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "encoding/json" + "fmt" + "io" + "math" + "reflect" + "strings" + "testing" + "time" + + ts "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "google.golang.org/genproto/googleapis/type/latlng" +) + +var ( + tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC) + ll = &latlng.LatLng{Latitude: 20, Longitude: 30} + ptm = &ts.Timestamp{12345, 67890} +) + +func TestCreateFromProtoValue(t *testing.T) { + for _, test := range []struct { + in *pb.Value + want interface{} + }{ + {in: nullValue, want: nil}, + {in: boolval(true), want: true}, + {in: intval(3), want: int64(3)}, + {in: floatval(1.5), want: 1.5}, + {in: strval("str"), want: "str"}, + {in: tsval(tm), want: tm}, + { + in: bytesval([]byte{1, 2}), + want: []byte{1, 2}, + }, + { + in: &pb.Value{&pb.Value_GeoPointValue{ll}}, + want: ll, + }, + { + in: arrayval(intval(1), intval(2)), + want: []interface{}{int64(1), int64(2)}, + }, + { + in: arrayval(), + want: []interface{}{}, + }, + { + in: mapval(map[string]*pb.Value{"a": intval(1), "b": intval(2)}), + want: map[string]interface{}{"a": int64(1), "b": int64(2)}, + }, + { + in: mapval(map[string]*pb.Value{}), + want: map[string]interface{}{}, + }, + { + in: refval("projects/P/databases/D/documents/c/d"), + want: &DocumentRef{ + ID: "d", + Parent: &CollectionRef{ + ID: "c", + parentPath: "projects/P/databases/D", + Path: "projects/P/databases/D/documents/c", + Query: Query{collectionID: "c", parentPath: "projects/P/databases/D"}, + }, + Path: "projects/P/databases/D/documents/c/d", + }, + }, + } { + got, err := createFromProtoValue(test.in, nil) + if err != nil { + t.Errorf("%v: %v", test.in, err) + continue + } + if !testEqual(got, test.want) { + t.Errorf("%+v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want) + } + } +} + +func TestSetFromProtoValue(t *testing.T) { + testSetFromProtoValue(t, "json", jsonTester{}) + testSetFromProtoValue(t, "firestore", protoTester{}) +} + +func testSetFromProtoValue(t *testing.T, prefix string, r tester) { + pi := newfloat(7) + s := []float64{7, 8} + ar1 := [1]float64{7} + ar2 := [2]float64{7, 8} + ar3 := [3]float64{7, 8, 9} + mf := map[string]float64{"a": 7} + + type T struct { + I **float64 + J float64 + } + + one := newfloat(1) + six := newfloat(6) + st := []*T{&T{I: &six}, nil, &T{I: &six, J: 7}} + vs := interface{}(T{J: 1}) + vm := interface{}(map[string]float64{"i": 1}) + var ( + i int + i8 int8 + i16 int16 + i32 int32 + i64 int64 + u8 uint8 + u16 uint16 + u32 uint32 + b bool + ll *latlng.LatLng + mi map[string]interface{} + ms map[string]T + ) + + for i, test := range []struct { + in interface{} + val interface{} + want interface{} + }{ + {&pi, r.Null(), (*float64)(nil)}, + {pi, r.Float(1), 1.0}, + {&s, r.Null(), ([]float64)(nil)}, + {&s, r.Array(r.Float(1), r.Float(2)), []float64{1, 2}}, + {&ar1, r.Array(r.Float(1), r.Float(2)), [1]float64{1}}, + {&ar2, r.Array(r.Float(1), r.Float(2)), [2]float64{1, 2}}, + {&ar3, r.Array(r.Float(1), r.Float(2)), [3]float64{1, 2, 0}}, + {&mf, r.Null(), (map[string]float64)(nil)}, + {&mf, r.Map("a", r.Float(1), "b", r.Float(2)), map[string]float64{"a": 1, "b": 2}}, + {&st, r.Array( + r.Null(), // overwrites st[0] with nil + r.Map("i", r.Float(1)), // sets st[1] to a new struct + r.Map("i", r.Float(2)), // modifies st[2] + ), + []*T{nil, &T{I: &one}, &T{I: &six, J: 7}}}, + {&mi, r.Map("a", r.Float(1), "b", r.Float(2)), map[string]interface{}{"a": 1.0, "b": 2.0}}, + {&ms, r.Map("a", r.Map("j", r.Float(1))), map[string]T{"a": T{J: 1}}}, + {&vs, r.Map("i", r.Float(2)), map[string]interface{}{"i": 2.0}}, + {&vm, r.Map("i", r.Float(2)), map[string]interface{}{"i": 2.0}}, + {&ll, r.Null(), (*latlng.LatLng)(nil)}, + {&i, r.Int(1), int(1)}, + {&i8, r.Int(1), int8(1)}, + {&i16, r.Int(1), int16(1)}, + {&i32, r.Int(1), int32(1)}, + {&i64, r.Int(1), int64(1)}, + {&u8, r.Int(1), uint8(1)}, + {&u16, r.Int(1), uint16(1)}, + {&u32, r.Int(1), uint32(1)}, + {&b, r.Bool(true), true}, + {&i, r.Float(1), int(1)}, // can put a float with no fractional part into an int + {pi, r.Int(1), float64(1)}, // can put an int into a float + } { + if err := r.Set(test.in, test.val); err != nil { + t.Errorf("%s: #%d: got error %v", prefix, i, err) + continue + } + got := reflect.ValueOf(test.in).Elem().Interface() + if !testEqual(got, test.want) { + t.Errorf("%s: #%d, %v:\ngot\n%+v (%T)\nwant\n%+v (%T)", + prefix, i, test.val, got, got, test.want, test.want) + } + } +} + +func TestSetFromProtoValueNoJSON(t *testing.T) { + // Test code paths that we cannot compare to JSON. + var ( + bs []byte + tmi time.Time + lli *latlng.LatLng + tmp *ts.Timestamp + ) + bytes := []byte{1, 2, 3} + + for i, test := range []struct { + in interface{} + val *pb.Value + want interface{} + }{ + {&bs, bytesval(bytes), bytes}, + {&tmi, tsval(tm), tm}, + {&tmp, &pb.Value{&pb.Value_TimestampValue{ptm}}, ptm}, + {&lli, geoval(ll), ll}, + } { + if err := setFromProtoValue(test.in, test.val, &Client{}); err != nil { + t.Errorf("#%d: got error %v", i, err) + continue + } + got := reflect.ValueOf(test.in).Elem().Interface() + if !testEqual(got, test.want) { + t.Errorf("#%d, %v:\ngot\n%+v (%T)\nwant\n%+v (%T)", + i, test.val, got, got, test.want, test.want) + } + } +} + +func TestSetFromProtoValueErrors(t *testing.T) { + c := &Client{} + ival := intval(3) + for i, test := range []struct { + in interface{} + val *pb.Value + }{ + {3, ival}, // not a pointer + {new(int8), intval(128)}, // int overflow + {new(uint8), intval(256)}, // uint overflow + {new(float32), floatval(2 * math.MaxFloat32)}, // float overflow + {new(uint), ival}, // cannot set type + {new(uint64), ival}, // cannot set type + {new(io.Reader), ival}, // cannot set type + {new(map[int]int), + mapval(map[string]*pb.Value{"x": ival})}, // map key type is not string + // the rest are all type mismatches + {new(bool), ival}, + {new(*latlng.LatLng), ival}, + {new(time.Time), ival}, + {new(string), ival}, + {new([]byte), ival}, + {new([]int), ival}, + {new([1]int), ival}, + {new(map[string]int), ival}, + {new(*bool), ival}, + {new(struct{}), ival}, + {new(int), floatval(2.5)}, // float must be integral + {new(uint16), intval(-1)}, // uint cannot be negative + {new(int16), floatval(math.MaxFloat32)}, // doesn't fit + {new(uint16), floatval(math.MaxFloat32)}, // doesn't fit + {new(float32), + &pb.Value{&pb.Value_IntegerValue{math.MaxInt64}}}, // overflow + } { + err := setFromProtoValue(test.in, test.val, c) + if err == nil { + t.Errorf("#%d: %v, %v: got nil, want error", i, test.in, test.val) + } + } +} + +func TestSetFromProtoValuePointers(t *testing.T) { + // Verify that pointers are set, instead of being replaced. + // Confirm that the behavior matches encoding/json. + testSetPointer(t, "json", jsonTester{}) + testSetPointer(t, "firestore", protoTester{&Client{}}) +} + +func testSetPointer(t *testing.T, prefix string, r tester) { + // If an interface{} holds a pointer, the pointer is set. + + set := func(x, val interface{}) { + if err := r.Set(x, val); err != nil { + t.Fatalf("%s: set(%v, %v): %v", prefix, x, val, err) + } + } + p := new(float64) + var st struct { + I interface{} + } + + // A pointer in a slice of interface{} is set. + s := []interface{}{p} + set(&s, r.Array(r.Float(1))) + if s[0] != p { + t.Errorf("%s: pointers not identical", prefix) + } + if *p != 1 { + t.Errorf("%s: got %f, want 1", prefix, *p) + } + // Setting a null will set the pointer to nil. + set(&s, r.Array(r.Null())) + if got := s[0]; got != nil { + t.Errorf("%s: got %v, want null", prefix, got) + } + + // It doesn't matter how deep the pointers nest. + p = new(float64) + p2 := &p + p3 := &p2 + s = []interface{}{p3} + set(&s, r.Array(r.Float(1))) + if s[0] != p3 { + t.Errorf("%s: pointers not identical", prefix) + } + if *p != 1 { + t.Errorf("%s: got %f, want 1", prefix, *p) + } + + // A pointer in an interface{} field is set. + p = new(float64) + st.I = p + set(&st, r.Map("i", r.Float(1))) + if st.I != p { + t.Errorf("%s: pointers not identical", prefix) + } + if *p != 1 { + t.Errorf("%s: got %f, want 1", prefix, *p) + } + // Setting a null will set the pointer to nil. + set(&st, r.Map("i", r.Null())) + if got := st.I; got != nil { + t.Errorf("%s: got %v, want null", prefix, got) + } + + // A pointer to a slice (instead of to float64) is set. + psi := &[]float64{7, 8, 9} + st.I = psi + set(&st, r.Map("i", r.Array(r.Float(1)))) + if st.I != psi { + t.Errorf("%s: pointers not identical", prefix) + } + // The slice itself should be truncated and filled, not replaced. + if got, want := cap(*psi), 3; got != want { + t.Errorf("cap: got %d, want %d", got, want) + } + if want := &[]float64{1}; !testEqual(st.I, want) { + t.Errorf("got %+v, want %+v", st.I, want) + } + + // A pointer to a map is set. + pmf := &map[string]float64{"a": 7, "b": 8} + st.I = pmf + set(&st, r.Map("i", r.Map("a", r.Float(1)))) + if st.I != pmf { + t.Errorf("%s: pointers not identical", prefix) + } + if want := map[string]float64{"a": 1, "b": 8}; !testEqual(*pmf, want) { + t.Errorf("%s: got %+v, want %+v", prefix, *pmf, want) + } + + // Maps are different: since the map values aren't addressable, they + // are always discarded, even if the map element type is not interface{}. + + // A map's values are discarded if the value type is a pointer type. + p = new(float64) + m := map[string]*float64{"i": p} + set(&m, r.Map("i", r.Float(1))) + if m["i"] == p { + t.Errorf("%s: pointers are identical", prefix) + } + if got, want := *m["i"], 1.0; got != want { + t.Errorf("%s: got %v, want %v", prefix, got, want) + } + // A map's values are discarded if the value type is interface{}. + p = new(float64) + m2 := map[string]interface{}{"i": p} + set(&m2, r.Map("i", r.Float(1))) + if m2["i"] == p { + t.Errorf("%s: pointers are identical", prefix) + } + if got, want := m2["i"].(float64), 1.0; got != want { + t.Errorf("%s: got %f, want %f", prefix, got, want) + } +} + +// An interface for setting and building values, to facilitate comparing firestore deserialization +// with encoding/json. +type tester interface { + Set(x, val interface{}) error + Null() interface{} + Int(int) interface{} + Float(float64) interface{} + Bool(bool) interface{} + Array(...interface{}) interface{} + Map(keysvals ...interface{}) interface{} +} + +type protoTester struct { + c *Client +} + +func (p protoTester) Set(x, val interface{}) error { return setFromProtoValue(x, val.(*pb.Value), p.c) } +func (protoTester) Null() interface{} { return nullValue } +func (protoTester) Int(i int) interface{} { return intval(i) } +func (protoTester) Float(f float64) interface{} { return floatval(f) } +func (protoTester) Bool(b bool) interface{} { return boolval(b) } + +func (protoTester) Array(els ...interface{}) interface{} { + var s []*pb.Value + for _, el := range els { + s = append(s, el.(*pb.Value)) + } + return arrayval(s...) +} + +func (protoTester) Map(keysvals ...interface{}) interface{} { + m := map[string]*pb.Value{} + for i := 0; i < len(keysvals); i += 2 { + m[keysvals[i].(string)] = keysvals[i+1].(*pb.Value) + } + return mapval(m) +} + +type jsonTester struct{} + +func (jsonTester) Set(x, val interface{}) error { return json.Unmarshal([]byte(val.(string)), x) } +func (jsonTester) Null() interface{} { return "null" } +func (jsonTester) Int(i int) interface{} { return fmt.Sprint(i) } +func (jsonTester) Float(f float64) interface{} { return fmt.Sprint(f) } + +func (jsonTester) Bool(b bool) interface{} { + if b { + return "true" + } + return "false" +} + +func (jsonTester) Array(els ...interface{}) interface{} { + var s []string + for _, el := range els { + s = append(s, el.(string)) + } + return "[" + strings.Join(s, ", ") + "]" +} + +func (jsonTester) Map(keysvals ...interface{}) interface{} { + var s []string + for i := 0; i < len(keysvals); i += 2 { + s = append(s, fmt.Sprintf("%q: %v", keysvals[i], keysvals[i+1])) + } + return "{" + strings.Join(s, ", ") + "}" +} + +func newfloat(f float64) *float64 { + p := new(float64) + *p = f + return p +} + +func TestParseDocumentPath(t *testing.T) { + for _, test := range []struct { + in string + pid, dbid string + dpath []string + }{ + {"projects/foo-bar/databases/db2/documents/c1/d1", + "foo-bar", "db2", []string{"c1", "d1"}}, + {"projects/P/databases/D/documents/c1/d1/c2/d2", + "P", "D", []string{"c1", "d1", "c2", "d2"}}, + } { + gotPid, gotDbid, gotDpath, err := parseDocumentPath(test.in) + if err != nil { + t.Fatal(err) + } + if got, want := gotPid, test.pid; got != want { + t.Errorf("project ID: got %q, want %q", got, want) + } + if got, want := gotDbid, test.dbid; got != want { + t.Errorf("db ID: got %q, want %q", got, want) + } + if got, want := gotDpath, test.dpath; !testEqual(got, want) { + t.Errorf("doc path: got %q, want %q", got, want) + } + } +} + +func TestParseDocumentPathErrors(t *testing.T) { + for _, badPath := range []string{ + "projects/P/databases/D/documents/c", // collection path + "/projects/P/databases/D/documents/c/d", // initial slash + "projects/P/databases/D/c/d", // missing "documents" + "project/P/database/D/document/c/d", + } { + // Every prefix of a bad path is also bad. + for i := 0; i <= len(badPath); i++ { + in := badPath[:i] + _, _, _, err := parseDocumentPath(in) + if err == nil { + t.Errorf("%q: got nil, want error", in) + } + } + } +} + +func TestPathToDoc(t *testing.T) { + c := &Client{} + path := "projects/P/databases/D/documents/c1/d1/c2/d2" + got, err := pathToDoc(path, c) + if err != nil { + t.Fatal(err) + } + want := &DocumentRef{ + ID: "d2", + Path: "projects/P/databases/D/documents/c1/d1/c2/d2", + Parent: &CollectionRef{ + ID: "c2", + parentPath: "projects/P/databases/D/documents/c1/d1", + Path: "projects/P/databases/D/documents/c1/d1/c2", + c: c, + Query: Query{c: c, collectionID: "c2", parentPath: "projects/P/databases/D/documents/c1/d1"}, + Parent: &DocumentRef{ + ID: "d1", + Path: "projects/P/databases/D/documents/c1/d1", + Parent: &CollectionRef{ + ID: "c1", + c: c, + parentPath: "projects/P/databases/D", + Path: "projects/P/databases/D/documents/c1", + Parent: nil, + Query: Query{c: c, collectionID: "c1", parentPath: "projects/P/databases/D"}, + }, + }, + }, + } + if !testEqual(got, want) { + t.Errorf("\ngot %+v\nwant %+v", got, want) + } +} + +func TestTypeString(t *testing.T) { + for _, test := range []struct { + in *pb.Value + want string + }{ + {nullValue, "null"}, + {intval(1), "int"}, + {floatval(1), "float"}, + {boolval(true), "bool"}, + {strval(""), "string"}, + {tsval(tm), "timestamp"}, + {geoval(ll), "GeoPoint"}, + {bytesval(nil), "bytes"}, + {refval(""), "reference"}, + {arrayval(nil), "array"}, + {mapval(nil), "map"}, + } { + got := typeString(test.in) + if got != test.want { + t.Errorf("%+v: got %q, want %q", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/genproto/test.pb.go b/vendor/cloud.google.com/go/firestore/genproto/test.pb.go new file mode 100644 index 0000000..fe6973d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/genproto/test.pb.go @@ -0,0 +1,1243 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package tests is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + TestSuite + Test + GetTest + CreateTest + SetTest + UpdateTest + UpdatePathsTest + DeleteTest + SetOption + QueryTest + Clause + Select + Where + OrderBy + Cursor + DocSnapshot + FieldPath +*/ +package tests + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_firestore_v1beta14 "google.golang.org/genproto/googleapis/firestore/v1beta1" +import google_firestore_v1beta1 "google.golang.org/genproto/googleapis/firestore/v1beta1" +import google_firestore_v1beta12 "google.golang.org/genproto/googleapis/firestore/v1beta1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A collection of tests. +type TestSuite struct { + Tests []*Test `protobuf:"bytes,1,rep,name=tests" json:"tests,omitempty"` +} + +func (m *TestSuite) Reset() { *m = TestSuite{} } +func (m *TestSuite) String() string { return proto.CompactTextString(m) } +func (*TestSuite) ProtoMessage() {} +func (*TestSuite) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *TestSuite) GetTests() []*Test { + if m != nil { + return m.Tests + } + return nil +} + +// A Test describes a single client method call and its expected result. +type Test struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // Types that are valid to be assigned to Test: + // *Test_Get + // *Test_Create + // *Test_Set + // *Test_Update + // *Test_UpdatePaths + // *Test_Delete + // *Test_Query + Test isTest_Test `protobuf_oneof:"test"` +} + +func (m *Test) Reset() { *m = Test{} } +func (m *Test) String() string { return proto.CompactTextString(m) } +func (*Test) ProtoMessage() {} +func (*Test) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type isTest_Test interface { + isTest_Test() +} + +type Test_Get struct { + Get *GetTest `protobuf:"bytes,2,opt,name=get,oneof"` +} +type Test_Create struct { + Create *CreateTest `protobuf:"bytes,3,opt,name=create,oneof"` +} +type Test_Set struct { + Set *SetTest `protobuf:"bytes,4,opt,name=set,oneof"` +} +type Test_Update struct { + Update *UpdateTest `protobuf:"bytes,5,opt,name=update,oneof"` +} +type Test_UpdatePaths struct { + UpdatePaths *UpdatePathsTest `protobuf:"bytes,6,opt,name=update_paths,json=updatePaths,oneof"` +} +type Test_Delete struct { + Delete *DeleteTest `protobuf:"bytes,7,opt,name=delete,oneof"` +} +type Test_Query struct { + Query *QueryTest `protobuf:"bytes,8,opt,name=query,oneof"` +} + +func (*Test_Get) isTest_Test() {} +func (*Test_Create) isTest_Test() {} +func (*Test_Set) isTest_Test() {} +func (*Test_Update) isTest_Test() {} +func (*Test_UpdatePaths) isTest_Test() {} +func (*Test_Delete) isTest_Test() {} +func (*Test_Query) isTest_Test() {} + +func (m *Test) GetTest() isTest_Test { + if m != nil { + return m.Test + } + return nil +} + +func (m *Test) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Test) GetGet() *GetTest { + if x, ok := m.GetTest().(*Test_Get); ok { + return x.Get + } + return nil +} + +func (m *Test) GetCreate() *CreateTest { + if x, ok := m.GetTest().(*Test_Create); ok { + return x.Create + } + return nil +} + +func (m *Test) GetSet() *SetTest { + if x, ok := m.GetTest().(*Test_Set); ok { + return x.Set + } + return nil +} + +func (m *Test) GetUpdate() *UpdateTest { + if x, ok := m.GetTest().(*Test_Update); ok { + return x.Update + } + return nil +} + +func (m *Test) GetUpdatePaths() *UpdatePathsTest { + if x, ok := m.GetTest().(*Test_UpdatePaths); ok { + return x.UpdatePaths + } + return nil +} + +func (m *Test) GetDelete() *DeleteTest { + if x, ok := m.GetTest().(*Test_Delete); ok { + return x.Delete + } + return nil +} + +func (m *Test) GetQuery() *QueryTest { + if x, ok := m.GetTest().(*Test_Query); ok { + return x.Query + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Test) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Test_OneofMarshaler, _Test_OneofUnmarshaler, _Test_OneofSizer, []interface{}{ + (*Test_Get)(nil), + (*Test_Create)(nil), + (*Test_Set)(nil), + (*Test_Update)(nil), + (*Test_UpdatePaths)(nil), + (*Test_Delete)(nil), + (*Test_Query)(nil), + } +} + +func _Test_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Test) + // test + switch x := m.Test.(type) { + case *Test_Get: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Get); err != nil { + return err + } + case *Test_Create: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Create); err != nil { + return err + } + case *Test_Set: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Set); err != nil { + return err + } + case *Test_Update: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Update); err != nil { + return err + } + case *Test_UpdatePaths: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UpdatePaths); err != nil { + return err + } + case *Test_Delete: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Delete); err != nil { + return err + } + case *Test_Query: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Test.Test has unexpected type %T", x) + } + return nil +} + +func _Test_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Test) + switch tag { + case 2: // test.get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GetTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Get{msg} + return true, err + case 3: // test.create + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CreateTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Create{msg} + return true, err + case 4: // test.set + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SetTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Set{msg} + return true, err + case 5: // test.update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UpdateTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Update{msg} + return true, err + case 6: // test.update_paths + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UpdatePathsTest) + err := b.DecodeMessage(msg) + m.Test = &Test_UpdatePaths{msg} + return true, err + case 7: // test.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Delete{msg} + return true, err + case 8: // test.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Query{msg} + return true, err + default: + return false, nil + } +} + +func _Test_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Test) + // test + switch x := m.Test.(type) { + case *Test_Get: + s := proto.Size(x.Get) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Create: + s := proto.Size(x.Create) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Set: + s := proto.Size(x.Set) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Update: + s := proto.Size(x.Update) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_UpdatePaths: + s := proto.Size(x.UpdatePaths) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Delete: + s := proto.Size(x.Delete) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Query: + s := proto.Size(x.Query) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Call to the DocumentRef.Get method. +type GetTest struct { + // The path of the doc, e.g. "projects/projectID/databases/(default)/documents/C/d" + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + // The request that the call should send to the Firestore service. + Request *google_firestore_v1beta14.GetDocumentRequest `protobuf:"bytes,2,opt,name=request" json:"request,omitempty"` +} + +func (m *GetTest) Reset() { *m = GetTest{} } +func (m *GetTest) String() string { return proto.CompactTextString(m) } +func (*GetTest) ProtoMessage() {} +func (*GetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GetTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *GetTest) GetRequest() *google_firestore_v1beta14.GetDocumentRequest { + if m != nil { + return m.Request + } + return nil +} + +// Call to DocumentRef.Create. +type CreateTest struct { + // The path of the doc, e.g. "projects/projectID/databases/(default)/documents/C/d" + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + // The data passed to Create, as JSON. The strings "Delete" and "ServerTimestamp" + // denote the two special sentinel values. Values that could be interpreted as integers + // (i.e. digit strings) should be treated as integers. + JsonData string `protobuf:"bytes,2,opt,name=json_data,json=jsonData" json:"json_data,omitempty"` + // The request that the call should generate. + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,3,opt,name=request" json:"request,omitempty"` + // If true, the call should result in an error without generating a request. + // If this is true, request should not be set. + IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *CreateTest) Reset() { *m = CreateTest{} } +func (m *CreateTest) String() string { return proto.CompactTextString(m) } +func (*CreateTest) ProtoMessage() {} +func (*CreateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *CreateTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *CreateTest) GetJsonData() string { + if m != nil { + return m.JsonData + } + return "" +} + +func (m *CreateTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *CreateTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// A call to DocumentRef.Set. +type SetTest struct { + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + Option *SetOption `protobuf:"bytes,2,opt,name=option" json:"option,omitempty"` + JsonData string `protobuf:"bytes,3,opt,name=json_data,json=jsonData" json:"json_data,omitempty"` + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,4,opt,name=request" json:"request,omitempty"` + IsError bool `protobuf:"varint,5,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *SetTest) Reset() { *m = SetTest{} } +func (m *SetTest) String() string { return proto.CompactTextString(m) } +func (*SetTest) ProtoMessage() {} +func (*SetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *SetTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *SetTest) GetOption() *SetOption { + if m != nil { + return m.Option + } + return nil +} + +func (m *SetTest) GetJsonData() string { + if m != nil { + return m.JsonData + } + return "" +} + +func (m *SetTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *SetTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// A call to the form of DocumentRef.Update that represents the data as a map +// or dictionary. +type UpdateTest struct { + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + Precondition *google_firestore_v1beta1.Precondition `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"` + JsonData string `protobuf:"bytes,3,opt,name=json_data,json=jsonData" json:"json_data,omitempty"` + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,4,opt,name=request" json:"request,omitempty"` + IsError bool `protobuf:"varint,5,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *UpdateTest) Reset() { *m = UpdateTest{} } +func (m *UpdateTest) String() string { return proto.CompactTextString(m) } +func (*UpdateTest) ProtoMessage() {} +func (*UpdateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *UpdateTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *UpdateTest) GetPrecondition() *google_firestore_v1beta1.Precondition { + if m != nil { + return m.Precondition + } + return nil +} + +func (m *UpdateTest) GetJsonData() string { + if m != nil { + return m.JsonData + } + return "" +} + +func (m *UpdateTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *UpdateTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// A call to the form of DocumentRef.Update that represents the data as a list +// of field paths and their values. +type UpdatePathsTest struct { + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + Precondition *google_firestore_v1beta1.Precondition `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"` + // parallel sequences: field_paths[i] corresponds to json_values[i] + FieldPaths []*FieldPath `protobuf:"bytes,3,rep,name=field_paths,json=fieldPaths" json:"field_paths,omitempty"` + JsonValues []string `protobuf:"bytes,4,rep,name=json_values,json=jsonValues" json:"json_values,omitempty"` + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,5,opt,name=request" json:"request,omitempty"` + IsError bool `protobuf:"varint,6,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *UpdatePathsTest) Reset() { *m = UpdatePathsTest{} } +func (m *UpdatePathsTest) String() string { return proto.CompactTextString(m) } +func (*UpdatePathsTest) ProtoMessage() {} +func (*UpdatePathsTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *UpdatePathsTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *UpdatePathsTest) GetPrecondition() *google_firestore_v1beta1.Precondition { + if m != nil { + return m.Precondition + } + return nil +} + +func (m *UpdatePathsTest) GetFieldPaths() []*FieldPath { + if m != nil { + return m.FieldPaths + } + return nil +} + +func (m *UpdatePathsTest) GetJsonValues() []string { + if m != nil { + return m.JsonValues + } + return nil +} + +func (m *UpdatePathsTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *UpdatePathsTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// A call to DocmentRef.Delete +type DeleteTest struct { + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + Precondition *google_firestore_v1beta1.Precondition `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"` + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,3,opt,name=request" json:"request,omitempty"` + IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *DeleteTest) Reset() { *m = DeleteTest{} } +func (m *DeleteTest) String() string { return proto.CompactTextString(m) } +func (*DeleteTest) ProtoMessage() {} +func (*DeleteTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *DeleteTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *DeleteTest) GetPrecondition() *google_firestore_v1beta1.Precondition { + if m != nil { + return m.Precondition + } + return nil +} + +func (m *DeleteTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *DeleteTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// An option to the DocumentRef.Set call. +type SetOption struct { + All bool `protobuf:"varint,1,opt,name=all" json:"all,omitempty"` + Fields []*FieldPath `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` +} + +func (m *SetOption) Reset() { *m = SetOption{} } +func (m *SetOption) String() string { return proto.CompactTextString(m) } +func (*SetOption) ProtoMessage() {} +func (*SetOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *SetOption) GetAll() bool { + if m != nil { + return m.All + } + return false +} + +func (m *SetOption) GetFields() []*FieldPath { + if m != nil { + return m.Fields + } + return nil +} + +type QueryTest struct { + CollPath string `protobuf:"bytes,1,opt,name=coll_path,json=collPath" json:"coll_path,omitempty"` + Clauses []*Clause `protobuf:"bytes,2,rep,name=clauses" json:"clauses,omitempty"` + Query *google_firestore_v1beta12.StructuredQuery `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` + IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *QueryTest) Reset() { *m = QueryTest{} } +func (m *QueryTest) String() string { return proto.CompactTextString(m) } +func (*QueryTest) ProtoMessage() {} +func (*QueryTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *QueryTest) GetCollPath() string { + if m != nil { + return m.CollPath + } + return "" +} + +func (m *QueryTest) GetClauses() []*Clause { + if m != nil { + return m.Clauses + } + return nil +} + +func (m *QueryTest) GetQuery() *google_firestore_v1beta12.StructuredQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *QueryTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +type Clause struct { + // Types that are valid to be assigned to Clause: + // *Clause_Select + // *Clause_Where + // *Clause_OrderBy + // *Clause_Offset + // *Clause_Limit + // *Clause_StartAt + // *Clause_StartAfter + // *Clause_EndAt + // *Clause_EndBefore + Clause isClause_Clause `protobuf_oneof:"clause"` +} + +func (m *Clause) Reset() { *m = Clause{} } +func (m *Clause) String() string { return proto.CompactTextString(m) } +func (*Clause) ProtoMessage() {} +func (*Clause) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type isClause_Clause interface { + isClause_Clause() +} + +type Clause_Select struct { + Select *Select `protobuf:"bytes,1,opt,name=select,oneof"` +} +type Clause_Where struct { + Where *Where `protobuf:"bytes,2,opt,name=where,oneof"` +} +type Clause_OrderBy struct { + OrderBy *OrderBy `protobuf:"bytes,3,opt,name=order_by,json=orderBy,oneof"` +} +type Clause_Offset struct { + Offset int32 `protobuf:"varint,4,opt,name=offset,oneof"` +} +type Clause_Limit struct { + Limit int32 `protobuf:"varint,5,opt,name=limit,oneof"` +} +type Clause_StartAt struct { + StartAt *Cursor `protobuf:"bytes,6,opt,name=start_at,json=startAt,oneof"` +} +type Clause_StartAfter struct { + StartAfter *Cursor `protobuf:"bytes,7,opt,name=start_after,json=startAfter,oneof"` +} +type Clause_EndAt struct { + EndAt *Cursor `protobuf:"bytes,8,opt,name=end_at,json=endAt,oneof"` +} +type Clause_EndBefore struct { + EndBefore *Cursor `protobuf:"bytes,9,opt,name=end_before,json=endBefore,oneof"` +} + +func (*Clause_Select) isClause_Clause() {} +func (*Clause_Where) isClause_Clause() {} +func (*Clause_OrderBy) isClause_Clause() {} +func (*Clause_Offset) isClause_Clause() {} +func (*Clause_Limit) isClause_Clause() {} +func (*Clause_StartAt) isClause_Clause() {} +func (*Clause_StartAfter) isClause_Clause() {} +func (*Clause_EndAt) isClause_Clause() {} +func (*Clause_EndBefore) isClause_Clause() {} + +func (m *Clause) GetClause() isClause_Clause { + if m != nil { + return m.Clause + } + return nil +} + +func (m *Clause) GetSelect() *Select { + if x, ok := m.GetClause().(*Clause_Select); ok { + return x.Select + } + return nil +} + +func (m *Clause) GetWhere() *Where { + if x, ok := m.GetClause().(*Clause_Where); ok { + return x.Where + } + return nil +} + +func (m *Clause) GetOrderBy() *OrderBy { + if x, ok := m.GetClause().(*Clause_OrderBy); ok { + return x.OrderBy + } + return nil +} + +func (m *Clause) GetOffset() int32 { + if x, ok := m.GetClause().(*Clause_Offset); ok { + return x.Offset + } + return 0 +} + +func (m *Clause) GetLimit() int32 { + if x, ok := m.GetClause().(*Clause_Limit); ok { + return x.Limit + } + return 0 +} + +func (m *Clause) GetStartAt() *Cursor { + if x, ok := m.GetClause().(*Clause_StartAt); ok { + return x.StartAt + } + return nil +} + +func (m *Clause) GetStartAfter() *Cursor { + if x, ok := m.GetClause().(*Clause_StartAfter); ok { + return x.StartAfter + } + return nil +} + +func (m *Clause) GetEndAt() *Cursor { + if x, ok := m.GetClause().(*Clause_EndAt); ok { + return x.EndAt + } + return nil +} + +func (m *Clause) GetEndBefore() *Cursor { + if x, ok := m.GetClause().(*Clause_EndBefore); ok { + return x.EndBefore + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Clause) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Clause_OneofMarshaler, _Clause_OneofUnmarshaler, _Clause_OneofSizer, []interface{}{ + (*Clause_Select)(nil), + (*Clause_Where)(nil), + (*Clause_OrderBy)(nil), + (*Clause_Offset)(nil), + (*Clause_Limit)(nil), + (*Clause_StartAt)(nil), + (*Clause_StartAfter)(nil), + (*Clause_EndAt)(nil), + (*Clause_EndBefore)(nil), + } +} + +func _Clause_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Clause) + // clause + switch x := m.Clause.(type) { + case *Clause_Select: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Select); err != nil { + return err + } + case *Clause_Where: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Where); err != nil { + return err + } + case *Clause_OrderBy: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.OrderBy); err != nil { + return err + } + case *Clause_Offset: + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Offset)) + case *Clause_Limit: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Limit)) + case *Clause_StartAt: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StartAt); err != nil { + return err + } + case *Clause_StartAfter: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StartAfter); err != nil { + return err + } + case *Clause_EndAt: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndAt); err != nil { + return err + } + case *Clause_EndBefore: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndBefore); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Clause.Clause has unexpected type %T", x) + } + return nil +} + +func _Clause_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Clause) + switch tag { + case 1: // clause.select + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Select) + err := b.DecodeMessage(msg) + m.Clause = &Clause_Select{msg} + return true, err + case 2: // clause.where + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Where) + err := b.DecodeMessage(msg) + m.Clause = &Clause_Where{msg} + return true, err + case 3: // clause.order_by + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(OrderBy) + err := b.DecodeMessage(msg) + m.Clause = &Clause_OrderBy{msg} + return true, err + case 4: // clause.offset + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Clause = &Clause_Offset{int32(x)} + return true, err + case 5: // clause.limit + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Clause = &Clause_Limit{int32(x)} + return true, err + case 6: // clause.start_at + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cursor) + err := b.DecodeMessage(msg) + m.Clause = &Clause_StartAt{msg} + return true, err + case 7: // clause.start_after + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cursor) + err := b.DecodeMessage(msg) + m.Clause = &Clause_StartAfter{msg} + return true, err + case 8: // clause.end_at + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cursor) + err := b.DecodeMessage(msg) + m.Clause = &Clause_EndAt{msg} + return true, err + case 9: // clause.end_before + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cursor) + err := b.DecodeMessage(msg) + m.Clause = &Clause_EndBefore{msg} + return true, err + default: + return false, nil + } +} + +func _Clause_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Clause) + // clause + switch x := m.Clause.(type) { + case *Clause_Select: + s := proto.Size(x.Select) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_Where: + s := proto.Size(x.Where) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_OrderBy: + s := proto.Size(x.OrderBy) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_Offset: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Offset)) + case *Clause_Limit: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Limit)) + case *Clause_StartAt: + s := proto.Size(x.StartAt) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_StartAfter: + s := proto.Size(x.StartAfter) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_EndAt: + s := proto.Size(x.EndAt) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_EndBefore: + s := proto.Size(x.EndBefore) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Select struct { + Fields []*FieldPath `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"` +} + +func (m *Select) Reset() { *m = Select{} } +func (m *Select) String() string { return proto.CompactTextString(m) } +func (*Select) ProtoMessage() {} +func (*Select) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Select) GetFields() []*FieldPath { + if m != nil { + return m.Fields + } + return nil +} + +type Where struct { + Path *FieldPath `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Op string `protobuf:"bytes,2,opt,name=op" json:"op,omitempty"` + JsonValue string `protobuf:"bytes,3,opt,name=json_value,json=jsonValue" json:"json_value,omitempty"` +} + +func (m *Where) Reset() { *m = Where{} } +func (m *Where) String() string { return proto.CompactTextString(m) } +func (*Where) ProtoMessage() {} +func (*Where) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *Where) GetPath() *FieldPath { + if m != nil { + return m.Path + } + return nil +} + +func (m *Where) GetOp() string { + if m != nil { + return m.Op + } + return "" +} + +func (m *Where) GetJsonValue() string { + if m != nil { + return m.JsonValue + } + return "" +} + +type OrderBy struct { + Path *FieldPath `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Direction string `protobuf:"bytes,2,opt,name=direction" json:"direction,omitempty"` +} + +func (m *OrderBy) Reset() { *m = OrderBy{} } +func (m *OrderBy) String() string { return proto.CompactTextString(m) } +func (*OrderBy) ProtoMessage() {} +func (*OrderBy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *OrderBy) GetPath() *FieldPath { + if m != nil { + return m.Path + } + return nil +} + +func (m *OrderBy) GetDirection() string { + if m != nil { + return m.Direction + } + return "" +} + +type Cursor struct { + // one of: + DocSnapshot *DocSnapshot `protobuf:"bytes,1,opt,name=doc_snapshot,json=docSnapshot" json:"doc_snapshot,omitempty"` + JsonValues []string `protobuf:"bytes,2,rep,name=json_values,json=jsonValues" json:"json_values,omitempty"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} +func (*Cursor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *Cursor) GetDocSnapshot() *DocSnapshot { + if m != nil { + return m.DocSnapshot + } + return nil +} + +func (m *Cursor) GetJsonValues() []string { + if m != nil { + return m.JsonValues + } + return nil +} + +type DocSnapshot struct { + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + JsonData string `protobuf:"bytes,2,opt,name=json_data,json=jsonData" json:"json_data,omitempty"` +} + +func (m *DocSnapshot) Reset() { *m = DocSnapshot{} } +func (m *DocSnapshot) String() string { return proto.CompactTextString(m) } +func (*DocSnapshot) ProtoMessage() {} +func (*DocSnapshot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *DocSnapshot) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *DocSnapshot) GetJsonData() string { + if m != nil { + return m.JsonData + } + return "" +} + +type FieldPath struct { + Field []string `protobuf:"bytes,1,rep,name=field" json:"field,omitempty"` +} + +func (m *FieldPath) Reset() { *m = FieldPath{} } +func (m *FieldPath) String() string { return proto.CompactTextString(m) } +func (*FieldPath) ProtoMessage() {} +func (*FieldPath) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *FieldPath) GetField() []string { + if m != nil { + return m.Field + } + return nil +} + +func init() { + proto.RegisterType((*TestSuite)(nil), "tests.TestSuite") + proto.RegisterType((*Test)(nil), "tests.Test") + proto.RegisterType((*GetTest)(nil), "tests.GetTest") + proto.RegisterType((*CreateTest)(nil), "tests.CreateTest") + proto.RegisterType((*SetTest)(nil), "tests.SetTest") + proto.RegisterType((*UpdateTest)(nil), "tests.UpdateTest") + proto.RegisterType((*UpdatePathsTest)(nil), "tests.UpdatePathsTest") + proto.RegisterType((*DeleteTest)(nil), "tests.DeleteTest") + proto.RegisterType((*SetOption)(nil), "tests.SetOption") + proto.RegisterType((*QueryTest)(nil), "tests.QueryTest") + proto.RegisterType((*Clause)(nil), "tests.Clause") + proto.RegisterType((*Select)(nil), "tests.Select") + proto.RegisterType((*Where)(nil), "tests.Where") + proto.RegisterType((*OrderBy)(nil), "tests.OrderBy") + proto.RegisterType((*Cursor)(nil), "tests.Cursor") + proto.RegisterType((*DocSnapshot)(nil), "tests.DocSnapshot") + proto.RegisterType((*FieldPath)(nil), "tests.FieldPath") +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 994 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x56, 0x5f, 0x6f, 0xdc, 0x44, + 0x10, 0xaf, 0x7d, 0x67, 0x9f, 0x3d, 0x0e, 0xa5, 0xac, 0x50, 0x65, 0x0a, 0x88, 0xab, 0x15, 0x92, + 0x83, 0xa2, 0x2b, 0x09, 0xe2, 0x09, 0x09, 0x94, 0x4b, 0x48, 0x2a, 0xa4, 0xaa, 0xc1, 0x57, 0xe0, + 0x05, 0xe9, 0x70, 0xec, 0x71, 0x62, 0xe4, 0xf3, 0x5e, 0x77, 0xd7, 0x45, 0xfd, 0x48, 0x20, 0xf1, + 0xc0, 0x37, 0xe1, 0x91, 0x4f, 0xc0, 0x37, 0x80, 0x67, 0xb4, 0x7f, 0x7c, 0xb6, 0xd3, 0x5c, 0xc9, + 0x43, 0x29, 0x6f, 0xbb, 0x33, 0xbf, 0x99, 0x9d, 0xf9, 0xcd, 0xec, 0xec, 0x02, 0x08, 0xe4, 0x62, + 0xba, 0x62, 0x54, 0x50, 0xe2, 0xc8, 0x35, 0xbf, 0x33, 0x39, 0xa7, 0xf4, 0xbc, 0xc4, 0xfb, 0x79, + 0xc1, 0x90, 0x0b, 0xca, 0xf0, 0xfe, 0xd3, 0xbd, 0x33, 0x14, 0xc9, 0x5e, 0x2b, 0xd1, 0x06, 0x77, + 0xde, 0xdf, 0x88, 0x4c, 0xe9, 0x72, 0x49, 0x2b, 0x03, 0xdb, 0xde, 0x08, 0x7b, 0x52, 0x23, 0x7b, + 0xa6, 0x51, 0xd1, 0x14, 0xfc, 0xc7, 0xc8, 0xc5, 0xbc, 0x2e, 0x04, 0x92, 0xbb, 0xa0, 0x83, 0x09, + 0xad, 0xf1, 0x60, 0x12, 0xec, 0x07, 0x53, 0xb5, 0x9b, 0x4a, 0x40, 0xac, 0x35, 0xd1, 0x9f, 0x36, + 0x0c, 0xe5, 0x9e, 0x8c, 0x21, 0xc8, 0x90, 0xa7, 0xac, 0x58, 0x89, 0x82, 0x56, 0xa1, 0x35, 0xb6, + 0x26, 0x7e, 0xdc, 0x15, 0x91, 0x08, 0x06, 0xe7, 0x28, 0x42, 0x7b, 0x6c, 0x4d, 0x82, 0xfd, 0x9b, + 0xc6, 0xd7, 0x09, 0x0a, 0x69, 0xfe, 0xe0, 0x46, 0x2c, 0x95, 0xe4, 0x1e, 0xb8, 0x29, 0xc3, 0x44, + 0x60, 0x38, 0x50, 0xb0, 0x37, 0x0c, 0xec, 0x50, 0x09, 0x0d, 0xd2, 0x40, 0xa4, 0x43, 0x8e, 0x22, + 0x1c, 0xf6, 0x1c, 0xce, 0x5b, 0x87, 0x5c, 0x3b, 0xac, 0x57, 0x99, 0x74, 0xe8, 0xf4, 0x1c, 0x7e, + 0xa3, 0x84, 0x8d, 0x43, 0x0d, 0x21, 0x9f, 0xc1, 0x96, 0x5e, 0x2d, 0x56, 0x89, 0xb8, 0xe0, 0xa1, + 0xab, 0x4c, 0x6e, 0xf7, 0x4c, 0x4e, 0xa5, 0xc6, 0xd8, 0x05, 0x75, 0x2b, 0x92, 0x27, 0x65, 0x58, + 0xa2, 0xc0, 0x70, 0xd4, 0x3b, 0xe9, 0x48, 0x09, 0x9b, 0x93, 0x34, 0x84, 0x4c, 0xc0, 0x51, 0xac, + 0x87, 0x9e, 0xc2, 0xde, 0x32, 0xd8, 0xaf, 0xa5, 0xcc, 0x40, 0x35, 0x60, 0xe6, 0xc2, 0x50, 0xea, + 0x22, 0x0e, 0x23, 0xc3, 0x15, 0x19, 0xc3, 0x56, 0x46, 0xd3, 0x05, 0xc3, 0x5c, 0xc5, 0x69, 0xb8, + 0x86, 0x8c, 0xa6, 0x31, 0xe6, 0x32, 0x18, 0x72, 0x0c, 0x23, 0x86, 0x4f, 0x6a, 0xe4, 0x0d, 0xdd, + 0x1f, 0x4d, 0x75, 0xf5, 0xa7, 0x6d, 0xf3, 0x98, 0xea, 0xcb, 0x0a, 0x1c, 0xd1, 0xb4, 0x5e, 0x62, + 0x25, 0x62, 0x6d, 0x13, 0x37, 0xc6, 0xd1, 0xcf, 0x16, 0x40, 0x4b, 0xfd, 0x35, 0x0e, 0x7e, 0x1b, + 0xfc, 0x1f, 0x39, 0xad, 0x16, 0x59, 0x22, 0x12, 0x75, 0xb4, 0x1f, 0x7b, 0x52, 0x70, 0x94, 0x88, + 0x84, 0x1c, 0xb4, 0x51, 0xe9, 0xea, 0xee, 0x6e, 0x8e, 0xea, 0x90, 0x2e, 0x97, 0xc5, 0x73, 0x01, + 0x91, 0xb7, 0xc0, 0x2b, 0xf8, 0x02, 0x19, 0xa3, 0x4c, 0xd5, 0xdd, 0x8b, 0x47, 0x05, 0xff, 0x52, + 0x6e, 0xa3, 0xdf, 0x2d, 0x18, 0xcd, 0xaf, 0xcd, 0xd0, 0x04, 0x5c, 0xaa, 0x3b, 0xd5, 0xee, 0x55, + 0x60, 0x8e, 0xe2, 0x91, 0x92, 0xc7, 0x46, 0xdf, 0x4f, 0x69, 0xb0, 0x39, 0xa5, 0xe1, 0x4b, 0x48, + 0xc9, 0xe9, 0xa7, 0xf4, 0x97, 0x05, 0xd0, 0x36, 0xea, 0x35, 0xb2, 0xfa, 0x0a, 0xb6, 0x56, 0x0c, + 0x53, 0x5a, 0x65, 0x45, 0x27, 0xb7, 0x9d, 0xcd, 0x31, 0x9d, 0x76, 0xd0, 0x71, 0xcf, 0xf6, 0xff, + 0xcc, 0xfb, 0x37, 0x1b, 0x5e, 0xbf, 0x74, 0xdb, 0x5e, 0x71, 0xf2, 0x7b, 0x10, 0xe4, 0x05, 0x96, + 0x99, 0x19, 0x04, 0x03, 0x35, 0xff, 0x9a, 0x1e, 0x39, 0x96, 0x1a, 0x79, 0x64, 0x0c, 0x79, 0xb3, + 0xe4, 0xe4, 0x3d, 0x08, 0x14, 0x5f, 0x4f, 0x93, 0xb2, 0x46, 0x1e, 0x0e, 0xc7, 0x03, 0x19, 0x9f, + 0x14, 0x7d, 0xab, 0x24, 0x5d, 0xce, 0x9c, 0x97, 0xc0, 0x99, 0xdb, 0xe7, 0xec, 0x0f, 0x0b, 0xa0, + 0x1d, 0x35, 0xaf, 0x98, 0xae, 0xff, 0xf6, 0x66, 0x9f, 0x80, 0xbf, 0xbe, 0x96, 0xe4, 0x16, 0x0c, + 0x92, 0xb2, 0x54, 0xf9, 0x78, 0xb1, 0x5c, 0xca, 0xab, 0xac, 0xca, 0xc0, 0x43, 0x7b, 0x43, 0x99, + 0x8c, 0x3e, 0xfa, 0xd5, 0x02, 0x7f, 0x3d, 0x62, 0x65, 0x83, 0xa7, 0xb4, 0x2c, 0xbb, 0xfc, 0x78, + 0x52, 0xa0, 0xd8, 0xd9, 0x85, 0x51, 0x5a, 0x26, 0x35, 0xc7, 0xc6, 0xeb, 0x6b, 0xcd, 0x4b, 0xa4, + 0xa4, 0x71, 0xa3, 0x25, 0x5f, 0x34, 0x93, 0x5c, 0x27, 0xfe, 0xc1, 0xe6, 0xc4, 0xe7, 0x82, 0xd5, + 0xa9, 0xa8, 0x19, 0x66, 0x2a, 0x06, 0x33, 0xe0, 0x5f, 0x94, 0xf8, 0xdf, 0x36, 0xb8, 0xfa, 0x3c, + 0xb2, 0x0b, 0x2e, 0xc7, 0x12, 0x53, 0xa1, 0x22, 0x6d, 0xc3, 0x99, 0x2b, 0xa1, 0x7c, 0x59, 0xb4, + 0x9a, 0x6c, 0x83, 0xf3, 0xd3, 0x05, 0x32, 0x34, 0xf5, 0xdc, 0x32, 0xb8, 0xef, 0xa4, 0x4c, 0xbe, + 0x2a, 0x4a, 0x49, 0xee, 0x81, 0x47, 0x59, 0x86, 0x6c, 0x71, 0xd6, 0x04, 0xde, 0xbc, 0x9f, 0x8f, + 0xa4, 0x78, 0xf6, 0xec, 0xc1, 0x8d, 0x78, 0x44, 0xf5, 0x92, 0x84, 0xe0, 0xd2, 0x3c, 0x6f, 0x9e, + 0x5a, 0x47, 0x1e, 0xa6, 0xf7, 0xe4, 0x36, 0x38, 0x65, 0xb1, 0x2c, 0x74, 0x43, 0x4b, 0x85, 0xde, + 0x92, 0x0f, 0xc1, 0xe3, 0x22, 0x61, 0x62, 0x91, 0x08, 0xf3, 0x88, 0xae, 0xe9, 0xab, 0x19, 0xa7, + 0x4c, 0x7a, 0x57, 0x80, 0x03, 0x41, 0x3e, 0x86, 0xc0, 0x60, 0x73, 0x81, 0xcc, 0x3c, 0x9e, 0xcf, + 0xc1, 0x41, 0xc3, 0x25, 0x84, 0xec, 0x80, 0x8b, 0x55, 0x26, 0x7d, 0x7b, 0x57, 0x83, 0x1d, 0xac, + 0xb2, 0x03, 0x41, 0xa6, 0x00, 0x12, 0x77, 0x86, 0x39, 0x65, 0x18, 0xfa, 0x57, 0x63, 0x7d, 0xac, + 0xb2, 0x99, 0x42, 0xcc, 0x3c, 0x70, 0x75, 0x55, 0xa3, 0x7d, 0x70, 0x35, 0xb1, 0x9d, 0xe6, 0xb2, + 0xfe, 0xa5, 0xb9, 0xbe, 0x07, 0x47, 0x91, 0x4c, 0xb6, 0x61, 0xb8, 0x6e, 0xa9, 0xab, 0x0c, 0x94, + 0x96, 0xdc, 0x04, 0x9b, 0xae, 0xcc, 0x13, 0x69, 0xd3, 0x15, 0x79, 0x17, 0xa0, 0x1d, 0x1f, 0x66, + 0xde, 0xfa, 0xeb, 0xe9, 0x11, 0x3d, 0x84, 0x91, 0xa9, 0xcc, 0x35, 0xfd, 0xbf, 0x03, 0x7e, 0x56, + 0x30, 0x4c, 0xd7, 0x77, 0xdb, 0x8f, 0x5b, 0x41, 0xf4, 0x03, 0xb8, 0x9a, 0x01, 0xf2, 0xa9, 0x1e, + 0x14, 0xbc, 0x4a, 0x56, 0xfc, 0x82, 0x36, 0xed, 0x45, 0x9a, 0xcf, 0x0b, 0x4d, 0xe7, 0x46, 0x13, + 0x07, 0x59, 0xbb, 0xb9, 0x3c, 0xed, 0xec, 0xcb, 0xd3, 0x2e, 0xfa, 0x1c, 0x82, 0x8e, 0x31, 0x21, + 0x9d, 0xa0, 0x7d, 0x13, 0xe2, 0x8b, 0x3e, 0x0b, 0xd1, 0x5d, 0xf0, 0xd7, 0x29, 0x91, 0x37, 0xc1, + 0x51, 0x2c, 0xab, 0x22, 0xf8, 0xb1, 0xde, 0xcc, 0x1e, 0xc2, 0x4e, 0x4a, 0x97, 0xcd, 0x85, 0x4b, + 0x4b, 0x5a, 0x67, 0x9d, 0x6b, 0x97, 0xd2, 0x2a, 0xa7, 0x6c, 0x99, 0x54, 0x29, 0xfe, 0x62, 0x47, + 0x27, 0x1a, 0x74, 0xa8, 0x40, 0xc7, 0x6b, 0xd0, 0x63, 0x95, 0xe5, 0xa9, 0xfc, 0xfa, 0x9e, 0xb9, + 0xea, 0x07, 0xfc, 0xc9, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x96, 0x46, 0xb3, 0x8d, 0x0b, + 0x00, 0x00, +} diff --git a/vendor/cloud.google.com/go/firestore/integration_test.go b/vendor/cloud.google.com/go/firestore/integration_test.go new file mode 100644 index 0000000..a575fcf --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/integration_test.go @@ -0,0 +1,1133 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "flag" + "fmt" + "log" + "math" + "os" + "path/filepath" + "runtime" + "sort" + "testing" + "time" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/genproto/googleapis/type/latlng" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +func TestMain(m *testing.M) { + initIntegrationTest() + status := m.Run() + cleanupIntegrationTest() + os.Exit(status) +} + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_FIRESTORE_KEY" +) + +var ( + iClient *Client + iColl *CollectionRef + collectionIDs = testutil.NewUIDSpace("go-integration-test") +) + +func initIntegrationTest() { + flag.Parse() // needed for testing.Short() + if testing.Short() { + return + } + ctx := context.Background() + testProjectID := os.Getenv(envProjID) + if testProjectID == "" { + log.Println("Integration tests skipped. See CONTRIBUTING.md for details") + return + } + ts := testutil.TokenSourceEnv(ctx, envPrivateKey, + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore") + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + ti := &testInterceptor{dbPath: "projects/" + testProjectID + "/databases/(default)"} + c, err := NewClient(ctx, testProjectID, + option.WithTokenSource(ts), + option.WithGRPCDialOption(grpc.WithUnaryInterceptor(ti.interceptUnary)), + option.WithGRPCDialOption(grpc.WithStreamInterceptor(ti.interceptStream)), + ) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + iClient = c + iColl = c.Collection(collectionIDs.New()) + refDoc := iColl.NewDoc() + integrationTestMap["ref"] = refDoc + wantIntegrationTestMap["ref"] = refDoc + integrationTestStruct.Ref = refDoc +} + +type testInterceptor struct { + dbPath string +} + +func (ti *testInterceptor) interceptUnary(ctx context.Context, method string, req, res interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ti.checkMetadata(ctx, method) + return invoker(ctx, method, req, res, cc, opts...) +} + +func (ti *testInterceptor) interceptStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ti.checkMetadata(ctx, method) + return streamer(ctx, desc, cc, method, opts...) +} + +func (ti *testInterceptor) checkMetadata(ctx context.Context, method string) { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + log.Fatalf("method %s: bad metadata", method) + } + for _, h := range []string{"google-cloud-resource-prefix", "x-goog-api-client"} { + v, ok := md[h] + if !ok { + log.Fatalf("method %s, header %s missing", method, h) + } + if len(v) != 1 { + log.Fatalf("method %s, header %s: bad value %v", method, h, v) + } + } + v := md["google-cloud-resource-prefix"][0] + if v != ti.dbPath { + log.Fatalf("method %s: bad resource prefix header: %q", method, v) + } +} + +func cleanupIntegrationTest() { + if iClient == nil { + return + } + // TODO(jba): delete everything in integrationColl. + iClient.Close() +} + +// integrationClient should be called by integration tests to get a valid client. It will never +// return nil. If integrationClient returns, an integration test can proceed without +// further checks. +func integrationClient(t *testing.T) *Client { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + if iClient == nil { + t.SkipNow() // log message printed in initIntegrationTest + } + return iClient +} + +func integrationColl(t *testing.T) *CollectionRef { + _ = integrationClient(t) + return iColl +} + +type integrationTestStructType struct { + Int int + Str string + Bool bool + Float float32 + Null interface{} + Bytes []byte + Time time.Time + Geo, NilGeo *latlng.LatLng + Ref *DocumentRef +} + +var ( + integrationTime = time.Date(2017, 3, 20, 1, 2, 3, 456789, time.UTC) + // Firestore times are accurate only to microseconds. + wantIntegrationTime = time.Date(2017, 3, 20, 1, 2, 3, 456000, time.UTC) + + integrationGeo = &latlng.LatLng{Latitude: 30, Longitude: 70} + + // Use this when writing a doc. + integrationTestMap = map[string]interface{}{ + "int": 1, + "str": "two", + "bool": true, + "float": 3.14, + "null": nil, + "bytes": []byte("bytes"), + "*": map[string]interface{}{"`": 4}, + "time": integrationTime, + "geo": integrationGeo, + "ref": nil, // populated by initIntegrationTest + } + + // The returned data is slightly different. + wantIntegrationTestMap = map[string]interface{}{ + "int": int64(1), + "str": "two", + "bool": true, + "float": 3.14, + "null": nil, + "bytes": []byte("bytes"), + "*": map[string]interface{}{"`": int64(4)}, + "time": wantIntegrationTime, + "geo": integrationGeo, + "ref": nil, // populated by initIntegrationTest + } + + integrationTestStruct = integrationTestStructType{ + Int: 1, + Str: "two", + Bool: true, + Float: 3.14, + Null: nil, + Bytes: []byte("bytes"), + Time: integrationTime, + Geo: integrationGeo, + NilGeo: nil, + Ref: nil, // populated by initIntegrationTest + } +) + +func TestIntegration_Create(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + start := time.Now() + h := testHelper{t} + wr := h.mustCreate(doc, integrationTestMap) + end := time.Now() + checkTimeBetween(t, wr.UpdateTime, start, end) + _, err := doc.Create(ctx, integrationTestMap) + codeEq(t, "Create on a present doc", codes.AlreadyExists, err) + // OK to create an empty document. + _, err = integrationColl(t).NewDoc().Create(ctx, map[string]interface{}{}) + codeEq(t, "Create empty doc", codes.OK, err) +} + +func TestIntegration_Get(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + h := testHelper{t} + h.mustCreate(doc, integrationTestMap) + ds := h.mustGet(doc) + if ds.CreateTime != ds.UpdateTime { + t.Errorf("create time %s != update time %s", ds.CreateTime, ds.UpdateTime) + } + got := ds.Data() + if want := wantIntegrationTestMap; !testEqual(got, want) { + t.Errorf("got\n%v\nwant\n%v", pretty.Value(got), pretty.Value(want)) + } + + doc = integrationColl(t).NewDoc() + empty := map[string]interface{}{} + h.mustCreate(doc, empty) + ds = h.mustGet(doc) + if ds.CreateTime != ds.UpdateTime { + t.Errorf("create time %s != update time %s", ds.CreateTime, ds.UpdateTime) + } + if got, want := ds.Data(), empty; !testEqual(got, want) { + t.Errorf("got\n%v\nwant\n%v", pretty.Value(got), pretty.Value(want)) + } + + ds, err := integrationColl(t).NewDoc().Get(ctx) + codeEq(t, "Get on a missing doc", codes.NotFound, err) + if ds == nil || ds.Exists() { + t.Fatal("got nil or existing doc snapshot, want !ds.Exists") + } + if ds.ReadTime.IsZero() { + t.Error("got zero read time") + } +} + +func TestIntegration_GetAll(t *testing.T) { + type getAll struct{ N int } + + h := testHelper{t} + coll := integrationColl(t) + ctx := context.Background() + var docRefs []*DocumentRef + for i := 0; i < 5; i++ { + doc := coll.NewDoc() + docRefs = append(docRefs, doc) + if i != 3 { + h.mustCreate(doc, getAll{N: i}) + } + } + docSnapshots, err := iClient.GetAll(ctx, docRefs) + if err != nil { + t.Fatal(err) + } + if got, want := len(docSnapshots), len(docRefs); got != want { + t.Fatalf("got %d snapshots, want %d", got, want) + } + for i, ds := range docSnapshots { + if i == 3 { + if ds == nil || ds.Exists() { + t.Fatal("got nil or existing doc snapshot, want !ds.Exists") + } + err := ds.DataTo(nil) + codeEq(t, "DataTo on a missing doc", codes.NotFound, err) + } else { + var got getAll + if err := ds.DataTo(&got); err != nil { + t.Fatal(err) + } + want := getAll{N: i} + if got != want { + t.Errorf("%d: got %+v, want %+v", i, got, want) + } + } + if ds.ReadTime.IsZero() { + t.Errorf("%d: got zero read time", i) + } + } +} + +func TestIntegration_Add(t *testing.T) { + start := time.Now() + _, wr, err := integrationColl(t).Add(context.Background(), integrationTestMap) + if err != nil { + t.Fatal(err) + } + end := time.Now() + checkTimeBetween(t, wr.UpdateTime, start, end) +} + +func TestIntegration_Set(t *testing.T) { + coll := integrationColl(t) + ctx := context.Background() + h := testHelper{t} + + // Set Should be able to create a new doc. + doc := coll.NewDoc() + wr1, err := doc.Set(ctx, integrationTestMap) + if err != nil { + t.Fatal(err) + } + // Calling Set on the doc completely replaces the contents. + // The update time should increase. + newData := map[string]interface{}{ + "str": "change", + "x": "1", + } + wr2, err := doc.Set(ctx, newData) + if err != nil { + t.Fatal(err) + } + if !wr1.UpdateTime.Before(wr2.UpdateTime) { + t.Errorf("update time did not increase: old=%s, new=%s", wr1.UpdateTime, wr2.UpdateTime) + } + ds := h.mustGet(doc) + if got := ds.Data(); !testEqual(got, newData) { + t.Errorf("got %v, want %v", got, newData) + } + + newData = map[string]interface{}{ + "str": "1", + "x": "2", + "y": "3", + } + // SetOptions: + // Only fields mentioned in the Merge option will be changed. + // In this case, "str" will not be changed to "1". + wr3, err := doc.Set(ctx, newData, Merge([]string{"x"}, []string{"y"})) + if err != nil { + t.Fatal(err) + } + ds = h.mustGet(doc) + want := map[string]interface{}{ + "str": "change", + "x": "2", + "y": "3", + } + if got := ds.Data(); !testEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + if !wr2.UpdateTime.Before(wr3.UpdateTime) { + t.Errorf("update time did not increase: old=%s, new=%s", wr2.UpdateTime, wr3.UpdateTime) + } + + // Another way to change only x and y is to pass a map with only + // those keys, and use MergeAll. + wr4, err := doc.Set(ctx, map[string]interface{}{"x": "4", "y": "5"}, MergeAll) + if err != nil { + t.Fatal(err) + } + ds = h.mustGet(doc) + want = map[string]interface{}{ + "str": "change", + "x": "4", + "y": "5", + } + if got := ds.Data(); !testEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + if !wr3.UpdateTime.Before(wr4.UpdateTime) { + t.Errorf("update time did not increase: old=%s, new=%s", wr3.UpdateTime, wr4.UpdateTime) + } +} + +func TestIntegration_Delete(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + h := testHelper{t} + h.mustCreate(doc, integrationTestMap) + wr := h.mustDelete(doc) + // Confirm that doc doesn't exist. + if _, err := doc.Get(ctx); grpc.Code(err) != codes.NotFound { + t.Fatalf("got error <%v>, want NotFound", err) + } + + er := func(_ *WriteResult, err error) error { return err } + + codeEq(t, "Delete on a missing doc", codes.OK, + er(doc.Delete(ctx))) + // TODO(jba): confirm that the server should return InvalidArgument instead of + // FailedPrecondition. + wr = h.mustCreate(doc, integrationTestMap) + codeEq(t, "Delete with wrong LastUpdateTime", codes.FailedPrecondition, + er(doc.Delete(ctx, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond))))) + codeEq(t, "Delete with right LastUpdateTime", codes.OK, + er(doc.Delete(ctx, LastUpdateTime(wr.UpdateTime)))) +} + +func TestIntegration_Update(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + h := testHelper{t} + + h.mustCreate(doc, integrationTestMap) + fpus := []Update{ + {Path: "bool", Value: false}, + {Path: "time", Value: 17}, + {FieldPath: []string{"*", "`"}, Value: 18}, + {Path: "null", Value: Delete}, + {Path: "noSuchField", Value: Delete}, // deleting a non-existent field is a no-op + } + wr := h.mustUpdate(doc, fpus) + ds := h.mustGet(doc) + got := ds.Data() + want := copyMap(wantIntegrationTestMap) + want["bool"] = false + want["time"] = int64(17) + want["*"] = map[string]interface{}{"`": int64(18)} + delete(want, "null") + if !testEqual(got, want) { + t.Errorf("got\n%#v\nwant\n%#v", got, want) + } + + er := func(_ *WriteResult, err error) error { return err } + + codeEq(t, "Update on missing doc", codes.NotFound, + er(integrationColl(t).NewDoc().Update(ctx, fpus))) + codeEq(t, "Update with wrong LastUpdateTime", codes.FailedPrecondition, + er(doc.Update(ctx, fpus, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond))))) + codeEq(t, "Update with right LastUpdateTime", codes.OK, + er(doc.Update(ctx, fpus, LastUpdateTime(wr.UpdateTime)))) +} + +func TestIntegration_Collections(t *testing.T) { + ctx := context.Background() + c := integrationClient(t) + h := testHelper{t} + got, err := c.Collections(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + // There should be at least one collection. + if len(got) == 0 { + t.Error("got 0 top-level collections, want at least one") + } + + doc := integrationColl(t).NewDoc() + got, err = doc.Collections(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d collections, want 0", len(got)) + } + var want []*CollectionRef + for i := 0; i < 3; i++ { + id := collectionIDs.New() + cr := doc.Collection(id) + want = append(want, cr) + h.mustCreate(cr.NewDoc(), integrationTestMap) + } + got, err = doc.Collections(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if !testEqual(got, want) { + t.Errorf("got\n%#v\nwant\n%#v", got, want) + } +} + +func TestIntegration_ServerTimestamp(t *testing.T) { + type S struct { + A int + B time.Time + C time.Time `firestore:"C.C,serverTimestamp"` + D map[string]interface{} + E time.Time `firestore:",omitempty,serverTimestamp"` + } + data := S{ + A: 1, + B: aTime, + // C is unset, so will get the server timestamp. + D: map[string]interface{}{"x": ServerTimestamp}, + // E is unset, so will get the server timestamp. + } + h := testHelper{t} + doc := integrationColl(t).NewDoc() + // Bound times of the RPC, with some slack for clock skew. + start := time.Now() + h.mustCreate(doc, data) + end := time.Now() + ds := h.mustGet(doc) + var got S + if err := ds.DataTo(&got); err != nil { + t.Fatal(err) + } + if !testEqual(got.B, aTime) { + t.Errorf("B: got %s, want %s", got.B, aTime) + } + checkTimeBetween(t, got.C, start, end) + if g, w := got.D["x"], got.C; !testEqual(g, w) { + t.Errorf(`D["x"] = %s, want equal to C (%s)`, g, w) + } + if g, w := got.E, got.C; !testEqual(g, w) { + t.Errorf(`E = %s, want equal to C (%s)`, g, w) + } +} + +func TestIntegration_MergeServerTimestamp(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + h := testHelper{t} + + // Create a doc with an ordinary field "a" and a ServerTimestamp field "b". + _, err := doc.Set(ctx, map[string]interface{}{ + "a": 1, + "b": ServerTimestamp}) + if err != nil { + t.Fatal(err) + } + docSnap := h.mustGet(doc) + data1 := docSnap.Data() + // Merge with a document with a different value of "a". However, + // specify only "b" in the list of merge fields. + _, err = doc.Set(ctx, + map[string]interface{}{"a": 2, "b": ServerTimestamp}, + Merge([]string{"b"})) + if err != nil { + t.Fatal(err) + } + // The result should leave "a" unchanged, while "b" is updated. + docSnap = h.mustGet(doc) + data2 := docSnap.Data() + if got, want := data2["a"], data1["a"]; got != want { + t.Errorf("got %v, want %v", got, want) + } + t1 := data1["b"].(time.Time) + t2 := data2["b"].(time.Time) + if !t1.Before(t2) { + t.Errorf("got t1=%s, t2=%s; want t1 before t2", t1, t2) + } +} + +func TestIntegration_MergeNestedServerTimestamp(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + h := testHelper{t} + + // Create a doc with an ordinary field "a" a ServerTimestamp field "b", + // and a second ServerTimestamp field "c.d". + _, err := doc.Set(ctx, map[string]interface{}{ + "a": 1, + "b": ServerTimestamp, + "c": map[string]interface{}{"d": ServerTimestamp}, + }) + if err != nil { + t.Fatal(err) + } + data1 := h.mustGet(doc).Data() + // Merge with a document with a different value of "a". However, + // specify only "c.d" in the list of merge fields. + _, err = doc.Set(ctx, + map[string]interface{}{ + "a": 2, + "b": ServerTimestamp, + "c": map[string]interface{}{"d": ServerTimestamp}, + }, + Merge([]string{"c", "d"})) + if err != nil { + t.Fatal(err) + } + // The result should leave "a" and "b" unchanged, while "c.d" is updated. + data2 := h.mustGet(doc).Data() + if got, want := data2["a"], data1["a"]; got != want { + t.Errorf("a: got %v, want %v", got, want) + } + want := data1["b"].(time.Time) + got := data2["b"].(time.Time) + if !got.Equal(want) { + t.Errorf("b: got %s, want %s", got, want) + } + t1 := data1["c"].(map[string]interface{})["d"].(time.Time) + t2 := data2["c"].(map[string]interface{})["d"].(time.Time) + if !t1.Before(t2) { + t.Errorf("got t1=%s, t2=%s; want t1 before t2", t1, t2) + } +} + +func TestIntegration_WriteBatch(t *testing.T) { + ctx := context.Background() + b := integrationClient(t).Batch() + h := testHelper{t} + doc1 := iColl.NewDoc() + doc2 := iColl.NewDoc() + b.Create(doc1, integrationTestMap) + b.Set(doc2, integrationTestMap) + b.Update(doc1, []Update{{Path: "bool", Value: false}}) + b.Update(doc1, []Update{{Path: "str", Value: Delete}}) + + wrs, err := b.Commit(ctx) + if err != nil { + t.Fatal(err) + } + if got, want := len(wrs), 4; got != want { + t.Fatalf("got %d WriteResults, want %d", got, want) + } + got1 := h.mustGet(doc1).Data() + want := copyMap(wantIntegrationTestMap) + want["bool"] = false + delete(want, "str") + if !testEqual(got1, want) { + t.Errorf("got\n%#v\nwant\n%#v", got1, want) + } + got2 := h.mustGet(doc2).Data() + if !testEqual(got2, wantIntegrationTestMap) { + t.Errorf("got\n%#v\nwant\n%#v", got2, wantIntegrationTestMap) + } + // TODO(jba): test two updates to the same document when it is supported. + // TODO(jba): test verify when it is supported. +} + +func TestIntegration_Query(t *testing.T) { + ctx := context.Background() + coll := integrationColl(t) + h := testHelper{t} + var docs []*DocumentRef + var wants []map[string]interface{} + for i := 0; i < 3; i++ { + doc := coll.NewDoc() + docs = append(docs, doc) + // To support running this test in parallel with the others, use a field name + // that we don't use anywhere else. + h.mustCreate(doc, map[string]interface{}{"q": i, "x": 1}) + wants = append(wants, map[string]interface{}{"q": int64(i)}) + } + q := coll.Select("q").OrderBy("q", Asc) + for i, test := range []struct { + q Query + want []map[string]interface{} + }{ + {q, wants}, + {q.Where("q", ">", 1), wants[2:]}, + {q.WherePath([]string{"q"}, ">", 1), wants[2:]}, + {q.Offset(1).Limit(1), wants[1:2]}, + {q.StartAt(1), wants[1:]}, + {q.StartAfter(1), wants[2:]}, + {q.EndAt(1), wants[:2]}, + {q.EndBefore(1), wants[:1]}, + } { + gotDocs, err := test.q.Documents(ctx).GetAll() + if err != nil { + t.Errorf("#%d: %+v: %v", i, test.q, err) + continue + } + if len(gotDocs) != len(test.want) { + t.Errorf("#%d: %+v: got %d docs, want %d", i, test.q, len(gotDocs), len(test.want)) + continue + } + for j, g := range gotDocs { + if got, want := g.Data(), test.want[j]; !testEqual(got, want) { + t.Errorf("#%d: %+v, #%d: got\n%+v\nwant\n%+v", i, test.q, j, got, want) + } + } + } + _, err := coll.Select("q").Where("x", "==", 1).OrderBy("q", Asc).Documents(ctx).GetAll() + codeEq(t, "Where and OrderBy on different fields without an index", codes.FailedPrecondition, err) + + // Using the collection itself as the query should return the full documents. + allDocs, err := coll.Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + seen := map[int64]bool{} // "q" values we see + for _, d := range allDocs { + data := d.Data() + q, ok := data["q"] + if !ok { + // A document from another test. + continue + } + if seen[q.(int64)] { + t.Errorf("%v: duplicate doc", data) + } + seen[q.(int64)] = true + if data["x"] != int64(1) { + t.Errorf("%v: wrong or missing 'x'", data) + } + if len(data) != 2 { + t.Errorf("%v: want two keys", data) + } + } + if got, want := len(seen), len(wants); got != want { + t.Errorf("got %d docs with 'q', want %d", len(seen), len(wants)) + } +} + +// Test unary filters. +func TestIntegration_QueryUnary(t *testing.T) { + ctx := context.Background() + coll := integrationColl(t) + h := testHelper{t} + h.mustCreate(coll.NewDoc(), map[string]interface{}{"x": 2, "q": "a"}) + h.mustCreate(coll.NewDoc(), map[string]interface{}{"x": 2, "q": nil}) + h.mustCreate(coll.NewDoc(), map[string]interface{}{"x": 2, "q": math.NaN()}) + wantNull := map[string]interface{}{"q": nil} + wantNaN := map[string]interface{}{"q": math.NaN()} + + base := coll.Select("q").Where("x", "==", 2) + for _, test := range []struct { + q Query + want map[string]interface{} + }{ + {base.Where("q", "==", nil), wantNull}, + {base.Where("q", "==", math.NaN()), wantNaN}, + } { + got, err := test.q.Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if len(got) != 1 { + t.Errorf("got %d responses, want 1", len(got)) + continue + } + if g, w := got[0].Data(), test.want; !testEqual(g, w) { + t.Errorf("%v: got %v, want %v", test.q, g, w) + } + } +} + +// Test the special DocumentID field in queries. +func TestIntegration_QueryName(t *testing.T) { + ctx := context.Background() + h := testHelper{t} + + checkIDs := func(q Query, wantIDs []string) { + gots, err := q.Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if len(gots) != len(wantIDs) { + t.Fatalf("got %d, want %d", len(gots), len(wantIDs)) + } + for i, g := range gots { + if got, want := g.Ref.ID, wantIDs[i]; got != want { + t.Errorf("#%d: got %s, want %s", i, got, want) + } + } + } + + coll := integrationColl(t) + var wantIDs []string + for i := 0; i < 3; i++ { + doc := coll.NewDoc() + h.mustCreate(doc, map[string]interface{}{"nm": 1}) + wantIDs = append(wantIDs, doc.ID) + } + sort.Strings(wantIDs) + q := coll.Where("nm", "==", 1).OrderBy(DocumentID, Asc) + checkIDs(q, wantIDs) + + // Empty Select. + q = coll.Select().Where("nm", "==", 1).OrderBy(DocumentID, Asc) + checkIDs(q, wantIDs) + + // Test cursors with __name__. + checkIDs(q.StartAt(wantIDs[1]), wantIDs[1:]) + checkIDs(q.EndAt(wantIDs[1]), wantIDs[:2]) +} + +func TestIntegration_QueryNested(t *testing.T) { + ctx := context.Background() + h := testHelper{t} + coll1 := integrationColl(t) + doc1 := coll1.NewDoc() + coll2 := doc1.Collection(collectionIDs.New()) + doc2 := coll2.NewDoc() + wantData := map[string]interface{}{"x": int64(1)} + h.mustCreate(doc2, wantData) + q := coll2.Select("x") + got, err := q.Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if len(got) != 1 { + t.Fatalf("got %d docs, want 1", len(got)) + } + if gotData := got[0].Data(); !testEqual(gotData, wantData) { + t.Errorf("got\n%+v\nwant\n%+v", gotData, wantData) + } +} + +func TestIntegration_RunTransaction(t *testing.T) { + ctx := context.Background() + h := testHelper{t} + + type Player struct { + Name string + Score int + Star bool `firestore:"*"` + } + + pat := Player{Name: "Pat", Score: 3, Star: false} + client := integrationClient(t) + patDoc := iColl.Doc("pat") + var anError error + incPat := func(_ context.Context, tx *Transaction) error { + doc, err := tx.Get(patDoc) + if err != nil { + return err + } + score, err := doc.DataAt("Score") + if err != nil { + return err + } + // Since the Star field is called "*", we must use DataAtPath to get it. + star, err := doc.DataAtPath([]string{"*"}) + if err != nil { + return err + } + err = tx.Update(patDoc, []Update{{Path: "Score", Value: int(score.(int64) + 7)}}) + if err != nil { + return err + } + // Since the Star field is called "*", we must use Update to change it. + err = tx.Update(patDoc, + []Update{{FieldPath: []string{"*"}, Value: !star.(bool)}}) + if err != nil { + return err + } + return anError + } + + h.mustCreate(patDoc, pat) + err := client.RunTransaction(ctx, incPat) + if err != nil { + t.Fatal(err) + } + ds := h.mustGet(patDoc) + var got Player + if err := ds.DataTo(&got); err != nil { + t.Fatal(err) + } + want := Player{Name: "Pat", Score: 10, Star: true} + if got != want { + t.Errorf("got %+v, want %+v", got, want) + } + + // Function returns error, so transaction is rolled back and no writes happen. + anError = errors.New("bad") + err = client.RunTransaction(ctx, incPat) + if err != anError { + t.Fatalf("got %v, want %v", err, anError) + } + if err := ds.DataTo(&got); err != nil { + t.Fatal(err) + } + // want is same as before. + if got != want { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestIntegration_TransactionGetAll(t *testing.T) { + ctx := context.Background() + h := testHelper{t} + type Player struct { + Name string + Score int + } + lee := Player{Name: "Lee", Score: 3} + sam := Player{Name: "Sam", Score: 1} + client := integrationClient(t) + leeDoc := iColl.Doc("lee") + samDoc := iColl.Doc("sam") + h.mustCreate(leeDoc, lee) + h.mustCreate(samDoc, sam) + + err := client.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + docs, err := tx.GetAll([]*DocumentRef{samDoc, leeDoc}) + if err != nil { + return err + } + for i, want := range []Player{sam, lee} { + var got Player + if err := docs[i].DataTo(&got); err != nil { + return err + } + if !testutil.Equal(got, want) { + return fmt.Errorf("got %+v, want %+v", got, want) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } +} + +func TestIntegration_WatchDocument(t *testing.T) { + coll := integrationColl(t) + ctx := context.Background() + h := testHelper{t} + doc := coll.NewDoc() + it := doc.Snapshots(ctx) + defer it.Stop() + + next := func() *DocumentSnapshot { + snap, err := it.Next() + if err != nil { + t.Fatal(err) + } + return snap + } + + snap := next() + if snap.Exists() { + t.Fatal("snapshot exists; it should not") + } + want := map[string]interface{}{"a": int64(1), "b": "two"} + h.mustCreate(doc, want) + snap = next() + if got := snap.Data(); !testutil.Equal(got, want) { + t.Fatalf("got %v, want %v", got, want) + } + + h.mustUpdate(doc, []Update{{Path: "a", Value: int64(2)}}) + want["a"] = int64(2) + snap = next() + if got := snap.Data(); !testutil.Equal(got, want) { + t.Fatalf("got %v, want %v", got, want) + } + + h.mustDelete(doc) + snap = next() + if snap.Exists() { + t.Fatal("snapshot exists; it should not") + } + + h.mustCreate(doc, want) + snap = next() + if got := snap.Data(); !testutil.Equal(got, want) { + t.Fatalf("got %v, want %v", got, want) + } +} + +type imap map[string]interface{} + +func TestIntegration_WatchQuery(t *testing.T) { + ctx := context.Background() + coll := integrationColl(t) + h := testHelper{t} + + q := coll.Where("e", ">", 1).OrderBy("e", Asc) + it := q.Snapshots(ctx) + defer it.Stop() + + next := func() ([]*DocumentSnapshot, []DocumentChange) { + diter, err := it.Next() + if err != nil { + t.Fatal(err) + } + if it.ReadTime.IsZero() { + t.Fatal("zero time") + } + ds, err := diter.GetAll() + if err != nil { + t.Fatal(err) + } + if it.Size != len(ds) { + t.Fatalf("Size=%d but we have %d docs", it.Size, len(ds)) + } + return ds, it.Changes + } + + copts := append([]cmp.Option{cmpopts.IgnoreFields(DocumentSnapshot{}, "ReadTime")}, cmpOpts...) + check := func(msg string, wantd []*DocumentSnapshot, wantc []DocumentChange) { + gotd, gotc := next() + if diff := testutil.Diff(gotd, wantd, copts...); diff != "" { + t.Errorf("%s: %s", msg, diff) + } + if diff := testutil.Diff(gotc, wantc, copts...); diff != "" { + t.Errorf("%s: %s", msg, diff) + } + } + + check("initial", nil, nil) + doc1 := coll.NewDoc() + h.mustCreate(doc1, imap{"e": int64(2), "b": "two"}) + wds := h.mustGet(doc1) + check("one", + []*DocumentSnapshot{wds}, + []DocumentChange{{Kind: DocumentAdded, Doc: wds, OldIndex: -1, NewIndex: 0}}) + + // Add a doc that does not match. We won't see a snapshot for this. + doc2 := coll.NewDoc() + h.mustCreate(doc2, imap{"e": int64(1)}) + + // Update the first doc. We should see the change. We won't see doc2. + h.mustUpdate(doc1, []Update{{Path: "e", Value: int64(3)}}) + wds = h.mustGet(doc1) + check("update", + []*DocumentSnapshot{wds}, + []DocumentChange{{Kind: DocumentModified, Doc: wds, OldIndex: 0, NewIndex: 0}}) + + // Now update doc so that it is not in the query. We should see a snapshot with no docs. + h.mustUpdate(doc1, []Update{{Path: "e", Value: int64(0)}}) + check("update2", nil, []DocumentChange{{Kind: DocumentRemoved, Doc: wds, OldIndex: 0, NewIndex: -1}}) + + // Add two docs out of order. We should see them in order. + doc3 := coll.NewDoc() + doc4 := coll.NewDoc() + want3 := imap{"e": int64(5)} + want4 := imap{"e": int64(4)} + h.mustCreate(doc3, want3) + h.mustCreate(doc4, want4) + wds4 := h.mustGet(doc4) + wds3 := h.mustGet(doc3) + check("two#1", + []*DocumentSnapshot{wds3}, + []DocumentChange{{Kind: DocumentAdded, Doc: wds3, OldIndex: -1, NewIndex: 0}}) + check("two#2", + []*DocumentSnapshot{wds4, wds3}, + []DocumentChange{{Kind: DocumentAdded, Doc: wds4, OldIndex: -1, NewIndex: 0}}) + // Delete a doc. + h.mustDelete(doc4) + check("after del", []*DocumentSnapshot{wds3}, []DocumentChange{{Kind: DocumentRemoved, Doc: wds4, OldIndex: 0, NewIndex: -1}}) +} + +func TestIntegration_WatchQueryCancel(t *testing.T) { + ctx := context.Background() + coll := integrationColl(t) + + q := coll.Where("e", ">", 1).OrderBy("e", Asc) + ctx, cancel := context.WithCancel(ctx) + it := q.Snapshots(ctx) + defer it.Stop() + + // First call opens the stream. + _, err := it.Next() + if err != nil { + t.Fatal(err) + } + cancel() + _, err = it.Next() + codeEq(t, "after cancel", codes.Canceled, err) +} + +func codeEq(t *testing.T, msg string, code codes.Code, err error) { + if grpc.Code(err) != code { + t.Fatalf("%s:\ngot <%v>\nwant code %s", msg, err, code) + } +} + +func loc() string { + _, file, line, ok := runtime.Caller(2) + if !ok { + return "???" + } + return fmt.Sprintf("%s:%d", filepath.Base(file), line) +} + +func copyMap(m map[string]interface{}) map[string]interface{} { + c := map[string]interface{}{} + for k, v := range m { + c[k] = v + } + return c +} + +func checkTimeBetween(t *testing.T, got, low, high time.Time) { + // Allow slack for clock skew. + const slack = 4 * time.Second + low = low.Add(-slack) + high = high.Add(slack) + if got.Before(low) || got.After(high) { + t.Fatalf("got %s, not in [%s, %s]", got, low, high) + } +} + +type testHelper struct { + t *testing.T +} + +func (h testHelper) mustCreate(doc *DocumentRef, data interface{}) *WriteResult { + wr, err := doc.Create(context.Background(), data) + if err != nil { + h.t.Fatalf("%s: creating: %v", loc(), err) + } + return wr +} + +func (h testHelper) mustUpdate(doc *DocumentRef, updates []Update) *WriteResult { + wr, err := doc.Update(context.Background(), updates) + if err != nil { + h.t.Fatalf("%s: updating: %v", loc(), err) + } + return wr +} + +func (h testHelper) mustGet(doc *DocumentRef) *DocumentSnapshot { + d, err := doc.Get(context.Background()) + if err != nil { + h.t.Fatalf("%s: getting: %v", loc(), err) + } + return d +} + +func (h testHelper) mustDelete(doc *DocumentRef) *WriteResult { + wr, err := doc.Delete(context.Background()) + if err != nil { + h.t.Fatalf("%s: updating: %v", loc(), err) + } + return wr +} diff --git a/vendor/cloud.google.com/go/firestore/internal/Makefile b/vendor/cloud.google.com/go/firestore/internal/Makefile new file mode 100644 index 0000000..6769cb3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/internal/Makefile @@ -0,0 +1,16 @@ +# Build doc.go from template and snippets. + +SHELL=/bin/bash + +../doc.go: build doc-snippets.go doc.template snipdoc.awk + @tmp=$$(mktemp) && \ + awk -f snipdoc.awk doc-snippets.go doc.template > $$tmp && \ + chmod +w $@ && \ + mv $$tmp $@ && \ + chmod -w $@ + @echo "wrote $@" + +.PHONY: build + +build: + go build doc-snippets.go diff --git a/vendor/cloud.google.com/go/firestore/internal/doc-snippets.go b/vendor/cloud.google.com/go/firestore/internal/doc-snippets.go new file mode 100644 index 0000000..4657e4b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/internal/doc-snippets.go @@ -0,0 +1,161 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + + firestore "cloud.google.com/go/firestore" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +const ELLIPSIS = 0 + +//[ structDef +type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions +} + +//] + +func f1() { + //[ NewClient + ctx := context.Background() + client, err := firestore.NewClient(ctx, "projectID") + if err != nil { + // TODO: Handle error. + } + //] + //[ refs + states := client.Collection("States") + ny := states.Doc("NewYork") + // Or, in a single call: + ny = client.Doc("States/NewYork") + //] + //[ docref.Get + docsnap, err := ny.Get(ctx) + if err != nil { + // TODO: Handle error. + } + dataMap := docsnap.Data() + fmt.Println(dataMap) + //] + //[ DataTo + var nyData State + if err := docsnap.DataTo(&nyData); err != nil { + // TODO: Handle error. + } + //] + //[ GetAll + docsnaps, err := client.GetAll(ctx, []*firestore.DocumentRef{ + states.Doc("Wisconsin"), states.Doc("Ohio"), + }) + if err != nil { + // TODO: Handle error. + } + for _, ds := range docsnaps { + _ = ds // TODO: Use ds. + } + //[ docref.Create + wr, err := ny.Create(ctx, State{ + Capital: "Albany", + Population: 19.8, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr) + //] + //[ docref.Set + ca := states.Doc("California") + _, err = ca.Set(ctx, State{ + Capital: "Sacramento", + Population: 39.14, + }) + //] + + //[ docref.Update + _, err = ca.Update(ctx, []firestore.Update{{Path: "capital", Value: "Sacramento"}}) + //] + + //[ docref.Delete + _, err = ny.Delete(ctx) + //] + + //[ LUT-precond + docsnap, err = ca.Get(ctx) + if err != nil { + // TODO: Handle error. + } + _, err = ca.Update(ctx, + []firestore.Update{{Path: "capital", Value: "Sacramento"}}, + firestore.LastUpdateTime(docsnap.UpdateTime)) + //] + + //[ WriteBatch + writeResults, err := client.Batch(). + Create(ny, State{Capital: "Albany"}). + Update(ca, []firestore.Update{{Path: "capital", Value: "Sacramento"}}). + Delete(client.Doc("States/WestDakota")). + Commit(ctx) + //] + _ = writeResults + + //[ Query + q := states.Where("pop", ">", 10).OrderBy("pop", firestore.Desc) + //] + //[ Documents + iter := q.Documents(ctx) + for { + doc, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(doc.Data()) + } + //] + + //[ CollQuery + iter = client.Collection("States").Documents(ctx) + //] +} + +func txn() { + var ctx context.Context + var client *firestore.Client + //[ Transaction + ny := client.Doc("States/NewYork") + err := client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error { + doc, err := tx.Get(ny) // tx.Get, NOT ny.Get! + if err != nil { + return err + } + pop, err := doc.DataAt("pop") + if err != nil { + return err + } + return tx.Update(ny, []firestore.Update{{Path: "pop", Value: pop.(float64) + 0.2}}) + }) + if err != nil { + // TODO: Handle error. + } + //] +} diff --git a/vendor/cloud.google.com/go/firestore/internal/doc.template b/vendor/cloud.google.com/go/firestore/internal/doc.template new file mode 100644 index 0000000..f5e196d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/internal/doc.template @@ -0,0 +1,145 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT doc.go. Modify internal/doc.template, then run make -C internal. + +/* +Package firestore provides a client for reading and writing to a Cloud Firestore +database. + +See https://cloud.google.com/firestore/docs for an introduction +to Cloud Firestore and additional help on using the Firestore API. + +Note: you can't use both Cloud Firestore and Cloud Datastore in the same +project. + +Creating a Client + +To start working with this package, create a client with a project ID: + +[NewClient] + +CollectionRefs and DocumentRefs + +In Firestore, documents are sets of key-value pairs, and collections are groups of +documents. A Firestore database consists of a hierarchy of alternating collections +and documents, referred to by slash-separated paths like +"States/California/Cities/SanFrancisco". + +This client is built around references to collections and documents. CollectionRefs +and DocumentRefs are lightweight values that refer to the corresponding database +entities. Creating a ref does not involve any network traffic. + +[refs] + +Reading + +Use DocumentRef.Get to read a document. The result is a DocumentSnapshot. +Call its Data method to obtain the entire document contents as a map. + +[docref.Get] + +You can also obtain a single field with DataAt, or extract the data into a struct +with DataTo. With the type definition + +[structDef] + +we can extract the document's data into a value of type State: + +[DataTo] + +Note that this client supports struct tags beginning with "firestore:" that work like +the tags of the encoding/json package, letting you rename fields, ignore them, or +omit their values when empty. + +To retrieve multiple documents from their references in a single call, use +Client.GetAll. + +[GetAll] + +Writing + +For writing individual documents, use the methods on DocumentReference. +Create creates a new document. + +[docref.Create] + +The first return value is a WriteResult, which contains the time +at which the document was updated. + +Create fails if the document exists. Another method, Set, either replaces an existing +document or creates a new one. + +[docref.Set] + +To update some fields of an existing document, use Update. It takes a list of +paths to update and their corresponding values. + +[docref.Update] + +Use DocumentRef.Delete to delete a document. + +[docref.Delete] + +Preconditions + +You can condition Deletes or Updates on when a document was last changed. Specify +these preconditions as an option to a Delete or Update method. The check and the +write happen atomically with a single RPC. + +[LUT-precond] + +Here we update a doc only if it hasn't changed since we read it. +You could also do this with a transaction. + +To perform multiple writes at once, use a WriteBatch. Its methods chain +for convenience. + +WriteBatch.Commit sends the collected writes to the server, where they happen +atomically. + +[WriteBatch] + +Queries + +You can use SQL to select documents from a collection. Begin with the collection, and +build up a query using Select, Where and other methods of Query. + +[Query] + +Call the Query's Documents method to get an iterator, and use it like +the other Google Cloud Client iterators. + +[Documents] + +To get all the documents in a collection, you can use the collection itself +as a query. + +[CollQuery] + +Transactions + +Use a transaction to execute reads and writes atomically. All reads must happen +before any writes. Transaction creation, commit, rollback and retry are handled for +you by the Client.RunTransaction method; just provide a function and use the +read and write methods of the Transaction passed to it. + +[Transaction] + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package firestore diff --git a/vendor/cloud.google.com/go/firestore/internal/snipdoc.awk b/vendor/cloud.google.com/go/firestore/internal/snipdoc.awk new file mode 100644 index 0000000..ebb7e21 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/internal/snipdoc.awk @@ -0,0 +1,116 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# snipdoc merges code snippets from Go source files into a template to +# produce another go file (typically doc.go). +# +# Call with one or more .go files and a template file. +# +# awk -f snipmd.awk foo.go bar.go doc.template +# +# In the Go files, start a snippet with +# //[ NAME +# and end it with +# //] +# +# In the template, write +# [NAME] +# on a line by itself to insert the snippet NAME on that line. +# +# The following transformations are made to the Go code: +# - Trailing blank lines are removed. +# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...` + + +/^[ \t]*\/\/\[/ { # start snippet in Go file + if (inGo()) { + if ($2 == "") { + die("missing snippet name") + } + curSnip = $2 + next + } +} + +/^[ \t]*\/\/]/ { # end snippet in Go file + if (inGo()) { + if (curSnip != "") { + # Remove all trailing newlines. + gsub(/[\t\n]+$/, "", snips[curSnip]) + curSnip = "" + next + } else { + die("//] without corresponding //[") + } + } +} + +ENDFILE { + if (curSnip != "") { + die("unclosed snippet: " curSnip) + } +} + +/^\[.*\]$/ { # Snippet marker in template file. + if (inTemplate()) { + name = substr($1, 2, length($1)-2) + if (snips[name] == "") { + die("no snippet named " name) + } + printf("%s\n", snips[name]) + afterSnip = 1 + next + } +} + +# Matches every line. +{ + if (curSnip != "") { + # If the first line in the snip has no indent, add the indent. + if (snips[curSnip] == "") { + if (index($0, "\t") == 1) { + extraIndent = "" + } else { + extraIndent = "\t" + } + } + + line = $0 + # Replace ELLIPSIS. + gsub(/_ = ELLIPSIS/, "...", line) + gsub(/ELLIPSIS/, "...", line) + + snips[curSnip] = snips[curSnip] extraIndent line "\n" + } else if (inTemplate()) { + afterSnip = 0 + # Copy to output. + print + } +} + + + +function inTemplate() { + return match(FILENAME, /\.template$/) +} + +function inGo() { + return match(FILENAME, /\.go$/) +} + + +function die(msg) { + printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr" + exit 1 +} diff --git a/vendor/cloud.google.com/go/firestore/mock_test.go b/vendor/cloud.google.com/go/firestore/mock_test.go new file mode 100644 index 0000000..7c50147 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/mock_test.go @@ -0,0 +1,207 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +// A simple mock server. + +import ( + "fmt" + "strings" + + "cloud.google.com/go/internal/testutil" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "golang.org/x/net/context" +) + +type mockServer struct { + pb.FirestoreServer + + Addr string + + reqItems []reqItem + resps []interface{} +} + +type reqItem struct { + wantReq proto.Message + adjust func(gotReq proto.Message) +} + +func newMockServer() (*mockServer, error) { + srv, err := testutil.NewServer() + if err != nil { + return nil, err + } + mock := &mockServer{Addr: srv.Addr} + pb.RegisterFirestoreServer(srv.Gsrv, mock) + srv.Start() + return mock, nil +} + +// addRPC adds a (request, response) pair to the server's list of expected +// interactions. The server will compare the incoming request with wantReq +// using proto.Equal. The response can be a message or an error. +// +// For the Listen RPC, resp should be a []interface{}, where each element +// is either ListenResponse or an error. +// +// Passing nil for wantReq disables the request check. +func (s *mockServer) addRPC(wantReq proto.Message, resp interface{}) { + s.addRPCAdjust(wantReq, resp, nil) +} + +// addRPCAdjust is like addRPC, but accepts a function that can be used +// to tweak the requests before comparison, for example to adjust for +// randomness. +func (s *mockServer) addRPCAdjust(wantReq proto.Message, resp interface{}, adjust func(proto.Message)) { + s.reqItems = append(s.reqItems, reqItem{wantReq, adjust}) + s.resps = append(s.resps, resp) +} + +// popRPC compares the request with the next expected (request, response) pair. +// It returns the response, or an error if the request doesn't match what +// was expected or there are no expected rpcs. +func (s *mockServer) popRPC(gotReq proto.Message) (interface{}, error) { + if len(s.reqItems) == 0 { + panic("out of RPCs") + } + ri := s.reqItems[0] + s.reqItems = s.reqItems[1:] + if ri.wantReq != nil { + if ri.adjust != nil { + ri.adjust(gotReq) + } + if !proto.Equal(gotReq, ri.wantReq) { + return nil, fmt.Errorf("mockServer: bad request\ngot: %T\n%s\nwant: %T\n%s", + gotReq, proto.MarshalTextString(gotReq), + ri.wantReq, proto.MarshalTextString(ri.wantReq)) + } + } + resp := s.resps[0] + s.resps = s.resps[1:] + if err, ok := resp.(error); ok { + return nil, err + } + return resp, nil +} + +func (s *mockServer) reset() { + s.reqItems = nil + s.resps = nil +} + +func (s *mockServer) GetDocument(_ context.Context, req *pb.GetDocumentRequest) (*pb.Document, error) { + res, err := s.popRPC(req) + if err != nil { + return nil, err + } + return res.(*pb.Document), nil +} + +func (s *mockServer) Commit(_ context.Context, req *pb.CommitRequest) (*pb.CommitResponse, error) { + res, err := s.popRPC(req) + if err != nil { + return nil, err + } + return res.(*pb.CommitResponse), nil +} + +func (s *mockServer) BatchGetDocuments(req *pb.BatchGetDocumentsRequest, bs pb.Firestore_BatchGetDocumentsServer) error { + res, err := s.popRPC(req) + if err != nil { + return err + } + responses := res.([]interface{}) + for _, res := range responses { + switch res := res.(type) { + case *pb.BatchGetDocumentsResponse: + if err := bs.Send(res); err != nil { + return err + } + case error: + return res + default: + panic(fmt.Sprintf("bad response type in BatchGetDocuments: %+v", res)) + } + } + return nil +} + +func (s *mockServer) RunQuery(req *pb.RunQueryRequest, qs pb.Firestore_RunQueryServer) error { + res, err := s.popRPC(req) + if err != nil { + return err + } + responses := res.([]interface{}) + for _, res := range responses { + switch res := res.(type) { + case *pb.RunQueryResponse: + if err := qs.Send(res); err != nil { + return err + } + case error: + return res + default: + panic(fmt.Sprintf("bad response type in RunQuery: %+v", res)) + } + } + return nil +} + +func (s *mockServer) BeginTransaction(_ context.Context, req *pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) { + res, err := s.popRPC(req) + if err != nil { + return nil, err + } + return res.(*pb.BeginTransactionResponse), nil +} + +func (s *mockServer) Rollback(_ context.Context, req *pb.RollbackRequest) (*empty.Empty, error) { + res, err := s.popRPC(req) + if err != nil { + return nil, err + } + return res.(*empty.Empty), nil +} + +func (s *mockServer) Listen(stream pb.Firestore_ListenServer) error { + req, err := stream.Recv() + if err != nil { + return err + } + responses, err := s.popRPC(req) + if err != nil { + if status.Code(err) == codes.Unknown && strings.Contains(err.Error(), "mockServer") { + // The stream will retry on Unknown, but we don't want that to happen if + // the error comes from us. + panic(err) + } + return err + } + for _, res := range responses.([]interface{}) { + if err, ok := res.(error); ok { + return err + } + if err := stream.Send(res.(*pb.ListenResponse)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/firestore/options.go b/vendor/cloud.google.com/go/firestore/options.go new file mode 100644 index 0000000..874253b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/options.go @@ -0,0 +1,177 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" +) + +// A Precondition modifies a Firestore update or delete operation. +type Precondition interface { + // Returns the corresponding Precondition proto. + preconditionProto() (*pb.Precondition, error) +} + +// Exists is a Precondition that checks for the existence of a resource before +// writing to it. If the check fails, the write does not occur. +var Exists Precondition + +func init() { + // Initialize here so godoc doesn't show the internal value. + Exists = exists(true) +} + +type exists bool + +func (e exists) preconditionProto() (*pb.Precondition, error) { + return &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{bool(e)}, + }, nil +} + +func (e exists) String() string { + if e { + return "Exists" + } else { + return "DoesNotExist" + } +} + +// LastUpdateTime returns a Precondition that checks that a resource must exist and +// must have last been updated at the given time. If the check fails, the write +// does not occur. +func LastUpdateTime(t time.Time) Precondition { return lastUpdateTime(t) } + +type lastUpdateTime time.Time + +func (u lastUpdateTime) preconditionProto() (*pb.Precondition, error) { + ts, err := ptypes.TimestampProto(time.Time(u)) + if err != nil { + return nil, err + } + return &pb.Precondition{ + ConditionType: &pb.Precondition_UpdateTime{ts}, + }, nil +} + +func (u lastUpdateTime) String() string { return fmt.Sprintf("LastUpdateTime(%s)", time.Time(u)) } + +func processPreconditionsForDelete(preconds []Precondition) (*pb.Precondition, error) { + // At most one option permitted. + switch len(preconds) { + case 0: + return nil, nil + case 1: + return preconds[0].preconditionProto() + default: + return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds) + } +} + +func processPreconditionsForUpdate(preconds []Precondition) (*pb.Precondition, error) { + // At most one option permitted, and it cannot be Exists. + switch len(preconds) { + case 0: + // If the user doesn't provide any options, default to Exists(true). + return exists(true).preconditionProto() + case 1: + if _, ok := preconds[0].(exists); ok { + return nil, errors.New("Cannot use Exists with Update") + } + return preconds[0].preconditionProto() + default: + return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds) + } +} + +func processPreconditionsForVerify(preconds []Precondition) (*pb.Precondition, error) { + // At most one option permitted. + switch len(preconds) { + case 0: + return nil, nil + case 1: + return preconds[0].preconditionProto() + default: + return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds) + } +} + +// A SetOption modifies a Firestore set operation. +type SetOption interface { + fieldPaths() (fps []FieldPath, all bool, err error) +} + +// MergeAll is a SetOption that causes all the field paths given in the data argument +// to Set to be overwritten. It is not supported for struct data. +var MergeAll SetOption = merge{all: true} + +// Merge returns a SetOption that causes only the given field paths to be +// overwritten. Other fields on the existing document will be untouched. It is an +// error if a provided field path does not refer to a value in the data passed to +// Set. +func Merge(fps ...FieldPath) SetOption { + for _, fp := range fps { + if err := fp.validate(); err != nil { + return merge{err: err} + } + } + return merge{paths: fps} +} + +type merge struct { + all bool + paths []FieldPath + err error +} + +func (m merge) String() string { + if m.err != nil { + return fmt.Sprintf("", m.err) + } + if m.all { + return "MergeAll" + } + return fmt.Sprintf("Merge(%+v)", m.paths) +} + +func (m merge) fieldPaths() (fps []FieldPath, all bool, err error) { + if m.err != nil { + return nil, false, m.err + } + if err := checkNoDupOrPrefix(m.paths); err != nil { + return nil, false, err + } + if m.all { + return nil, true, nil + } + return m.paths, false, nil +} + +func processSetOptions(opts []SetOption) (fps []FieldPath, all bool, err error) { + switch len(opts) { + case 0: + return nil, false, nil + case 1: + return opts[0].fieldPaths() + default: + return nil, false, fmt.Errorf("conflicting options: %+v", opts) + } +} diff --git a/vendor/cloud.google.com/go/firestore/options_test.go b/vendor/cloud.google.com/go/firestore/options_test.go new file mode 100644 index 0000000..211557b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/options_test.go @@ -0,0 +1,151 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +func TestProcessPreconditionsForVerify(t *testing.T) { + for _, test := range []struct { + in []Precondition + want *pb.Precondition + wantErr bool + }{ + { + in: nil, + want: nil, + }, + { + in: []Precondition{}, + want: nil, + }, + { + in: []Precondition{Exists}, + want: &pb.Precondition{&pb.Precondition_Exists{true}}, + }, + { + in: []Precondition{LastUpdateTime(aTime)}, + want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}}, + }, + { + in: []Precondition{Exists, LastUpdateTime(aTime)}, + wantErr: true, + }, + { + in: []Precondition{Exists, Exists}, + wantErr: true, + }, + } { + got, err := processPreconditionsForVerify(test.in) + switch { + case test.wantErr && err == nil: + t.Errorf("%v: got nil, want error", test.in) + case !test.wantErr && err != nil: + t.Errorf("%v: got <%v>, want no error", test.in, err) + case !test.wantErr && err == nil && !testEqual(got, test.want): + t.Errorf("%v: got %+v, want %v", test.in, got, test.want) + } + } +} + +func TestProcessPreconditionsForDelete(t *testing.T) { + for _, test := range []struct { + in []Precondition + want *pb.Precondition + wantErr bool + }{ + { + in: nil, + want: nil, + }, + { + in: []Precondition{}, + want: nil, + }, + { + in: []Precondition{Exists}, + want: &pb.Precondition{&pb.Precondition_Exists{true}}, + }, + { + in: []Precondition{LastUpdateTime(aTime)}, + want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}}, + }, + { + in: []Precondition{Exists, LastUpdateTime(aTime)}, + wantErr: true, + }, + { + in: []Precondition{Exists, Exists}, + wantErr: true, + }, + } { + got, err := processPreconditionsForDelete(test.in) + switch { + case test.wantErr && err == nil: + t.Errorf("%v: got nil, want error", test.in) + case !test.wantErr && err != nil: + t.Errorf("%v: got <%v>, want no error", test.in, err) + case !test.wantErr && err == nil && !testEqual(got, test.want): + t.Errorf("%v: got %+v, want %v", test.in, got, test.want) + } + } +} + +func TestProcessPreconditionsForUpdate(t *testing.T) { + for _, test := range []struct { + in []Precondition + want *pb.Precondition + wantErr bool + }{ + { + in: nil, + want: &pb.Precondition{&pb.Precondition_Exists{true}}, + }, + { + in: []Precondition{}, + want: &pb.Precondition{&pb.Precondition_Exists{true}}, + }, + + { + in: []Precondition{Exists}, + wantErr: true, + }, + { + in: []Precondition{LastUpdateTime(aTime)}, + want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}}, + }, + { + in: []Precondition{Exists, LastUpdateTime(aTime)}, + wantErr: true, + }, + { + in: []Precondition{Exists, Exists}, + wantErr: true, + }, + } { + got, err := processPreconditionsForUpdate(test.in) + switch { + case test.wantErr && err == nil: + t.Errorf("%v: got nil, want error", test.in) + case !test.wantErr && err != nil: + t.Errorf("%v: got <%v>, want no error", test.in, err) + case !test.wantErr && err == nil && !testEqual(got, test.want): + t.Errorf("%v: got %+v, want %v", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/order.go b/vendor/cloud.google.com/go/firestore/order.go new file mode 100644 index 0000000..9c9716b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/order.go @@ -0,0 +1,216 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "bytes" + "fmt" + "math" + "sort" + "strings" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +// Returns a negative number, zero, or a positive number depending on whether a is +// less than, equal to, or greater than b according to Firestore's ordering of +// values. +func compareValues(a, b *pb.Value) int { + ta := typeOrder(a) + tb := typeOrder(b) + if ta != tb { + return compareInt64s(int64(ta), int64(tb)) + } + switch a := a.ValueType.(type) { + case *pb.Value_NullValue: + return 0 // nulls are equal + + case *pb.Value_BooleanValue: + av := a.BooleanValue + bv := b.GetBooleanValue() + switch { + case av && !bv: + return 1 + case bv && !av: + return -1 + default: + return 0 + } + + case *pb.Value_IntegerValue: + return compareNumbers(float64(a.IntegerValue), toFloat(b)) + + case *pb.Value_DoubleValue: + return compareNumbers(a.DoubleValue, toFloat(b)) + + case *pb.Value_TimestampValue: + return compareTimestamps(a.TimestampValue, b.GetTimestampValue()) + + case *pb.Value_StringValue: + return strings.Compare(a.StringValue, b.GetStringValue()) + + case *pb.Value_BytesValue: + return bytes.Compare(a.BytesValue, b.GetBytesValue()) + + case *pb.Value_ReferenceValue: + return compareReferences(a.ReferenceValue, b.GetReferenceValue()) + + case *pb.Value_GeoPointValue: + ag := a.GeoPointValue + bg := b.GetGeoPointValue() + if ag.Latitude != bg.Latitude { + return compareFloat64s(ag.Latitude, bg.Latitude) + } + return compareFloat64s(ag.Longitude, bg.Longitude) + + case *pb.Value_ArrayValue: + return compareArrays(a.ArrayValue.Values, b.GetArrayValue().Values) + + case *pb.Value_MapValue: + return compareMaps(a.MapValue.Fields, b.GetMapValue().Fields) + + default: + panic(fmt.Sprintf("bad value type: %v", a)) + } +} + +// Treats NaN as less than any non-NaN. +func compareNumbers(a, b float64) int { + switch { + case math.IsNaN(a): + if math.IsNaN(b) { + return 0 + } + return -1 + case math.IsNaN(b): + return 1 + default: + return compareFloat64s(a, b) + } +} + +// Return v as a float64, assuming it's an Integer or Double. +func toFloat(v *pb.Value) float64 { + if x, ok := v.ValueType.(*pb.Value_IntegerValue); ok { + return float64(x.IntegerValue) + } + return v.GetDoubleValue() +} + +func compareTimestamps(a, b *tspb.Timestamp) int { + if c := compareInt64s(a.Seconds, b.Seconds); c != 0 { + return c + } + return compareInt64s(int64(a.Nanos), int64(b.Nanos)) +} + +func compareReferences(a, b string) int { + // Compare path components lexicographically. + pa := strings.Split(a, "/") + pb := strings.Split(b, "/") + return compareSequences(len(pa), len(pb), func(i int) int { + return strings.Compare(pa[i], pb[i]) + }) +} + +func compareArrays(a, b []*pb.Value) int { + return compareSequences(len(a), len(b), func(i int) int { + return compareValues(a[i], b[i]) + }) +} + +func compareMaps(a, b map[string]*pb.Value) int { + sortedKeys := func(m map[string]*pb.Value) []string { + var ks []string + for k := range m { + ks = append(ks, k) + } + sort.Strings(ks) + return ks + } + + aks := sortedKeys(a) + bks := sortedKeys(b) + return compareSequences(len(aks), len(bks), func(i int) int { + if c := strings.Compare(aks[i], bks[i]); c != 0 { + return c + } + k := aks[i] + return compareValues(a[k], b[k]) + }) +} + +func compareSequences(len1, len2 int, compare func(int) int) int { + for i := 0; i < len1 && i < len2; i++ { + if c := compare(i); c != 0 { + return c + } + } + return compareInt64s(int64(len1), int64(len2)) +} + +func compareFloat64s(a, b float64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +func compareInt64s(a, b int64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +// Return an integer corresponding to the type of value stored in v, such that +// comparing the resulting integers gives the Firestore ordering for types. +func typeOrder(v *pb.Value) int { + switch v.ValueType.(type) { + case *pb.Value_NullValue: + return 0 + case *pb.Value_BooleanValue: + return 1 + case *pb.Value_IntegerValue: + return 2 + case *pb.Value_DoubleValue: + return 2 + case *pb.Value_TimestampValue: + return 3 + case *pb.Value_StringValue: + return 4 + case *pb.Value_BytesValue: + return 5 + case *pb.Value_ReferenceValue: + return 6 + case *pb.Value_GeoPointValue: + return 7 + case *pb.Value_ArrayValue: + return 8 + case *pb.Value_MapValue: + return 9 + default: + panic(fmt.Sprintf("bad value type: %v", v)) + } +} diff --git a/vendor/cloud.google.com/go/firestore/order_test.go b/vendor/cloud.google.com/go/firestore/order_test.go new file mode 100644 index 0000000..0702374 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/order_test.go @@ -0,0 +1,118 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "math" + "testing" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/genproto/googleapis/type/latlng" +) + +func TestCompareValues(t *testing.T) { + // Ordered list of values. + vals := []*pb.Value{ + nullValue, + boolval(false), + boolval(true), + floatval(math.NaN()), + floatval(math.Inf(-1)), + floatval(-math.MaxFloat64), + int64val(math.MinInt64), + floatval(-1.1), + intval(-1), + intval(0), + floatval(math.SmallestNonzeroFloat64), + intval(1), + floatval(1.1), + intval(2), + int64val(math.MaxInt64), + floatval(math.MaxFloat64), + floatval(math.Inf(1)), + tsval(time.Date(2016, 5, 20, 10, 20, 0, 0, time.UTC)), + tsval(time.Date(2016, 10, 21, 15, 32, 0, 0, time.UTC)), + strval(""), + strval("\u0000\ud7ff\ue000\uffff"), + strval("(╯°□°)╯︵ â”»â”â”»"), + strval("a"), + strval("abc def"), + strval("e\u0301b"), + strval("æ"), + strval("\u00e9a"), + bytesval([]byte{}), + bytesval([]byte{0}), + bytesval([]byte{0, 1, 2, 3, 4}), + bytesval([]byte{0, 1, 2, 4, 3}), + bytesval([]byte{255}), + refval("projects/p1/databases/d1/documents/c1/doc1"), + refval("projects/p1/databases/d1/documents/c1/doc2"), + refval("projects/p1/databases/d1/documents/c1/doc2/c2/doc1"), + refval("projects/p1/databases/d1/documents/c1/doc2/c2/doc2"), + refval("projects/p1/databases/d1/documents/c10/doc1"), + refval("projects/p1/databases/dkkkkklkjnjkkk1/documents/c2/doc1"), + refval("projects/p2/databases/d2/documents/c1/doc1"), + refval("projects/p2/databases/d2/documents/c1-/doc1"), + geopoint(-90, -180), + geopoint(-90, 0), + geopoint(-90, 180), + geopoint(0, -180), + geopoint(0, 0), + geopoint(0, 180), + geopoint(1, -180), + geopoint(1, 0), + geopoint(1, 180), + geopoint(90, -180), + geopoint(90, 0), + geopoint(90, 180), + arrayval(), + arrayval(strval("bar")), + arrayval(strval("foo")), + arrayval(strval("foo"), intval(1)), + arrayval(strval("foo"), intval(2)), + arrayval(strval("foo"), strval("0")), + mapval(map[string]*pb.Value{"bar": intval(0)}), + mapval(map[string]*pb.Value{"bar": intval(0), "foo": intval(1)}), + mapval(map[string]*pb.Value{"foo": intval(1)}), + mapval(map[string]*pb.Value{"foo": intval(2)}), + mapval(map[string]*pb.Value{"foo": strval("0")}), + } + + for i, v1 := range vals { + if got := compareValues(v1, v1); got != 0 { + t.Errorf("compare(%v, %v) == %d, want 0", v1, v1, got) + } + for _, v2 := range vals[i+1:] { + if got := compareValues(v1, v2); got != -1 { + t.Errorf("compare(%v, %v) == %d, want -1", v1, v2, got) + } + if got := compareValues(v2, v1); got != 1 { + t.Errorf("compare(%v, %v) == %d, want 1", v1, v2, got) + } + } + } + + // Integers and Doubles order the same. + n1 := intval(17) + n2 := floatval(17) + if got := compareValues(n1, n2); got != 0 { + t.Errorf("compare(%v, %v) == %d, want 0", n1, n2, got) + } +} + +func geopoint(lat, lng float64) *pb.Value { + return geoval(&latlng.LatLng{Latitude: lat, Longitude: lng}) +} diff --git a/vendor/cloud.google.com/go/firestore/query.go b/vendor/cloud.google.com/go/firestore/query.go new file mode 100644 index 0000000..5ef6f4a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/query.go @@ -0,0 +1,757 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "time" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "cloud.google.com/go/internal/btree" + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/api/iterator" +) + +// Query represents a Firestore query. +// +// Query values are immutable. Each Query method creates +// a new Query; it does not modify the old. +type Query struct { + c *Client + parentPath string // path of the collection's parent + collectionID string + selection []FieldPath + filters []filter + orders []order + offset int32 + limit *wrappers.Int32Value + startVals, endVals []interface{} + startDoc, endDoc *DocumentSnapshot + startBefore, endBefore bool + err error +} + +func (q *Query) collectionPath() string { + return q.parentPath + "/documents/" + q.collectionID +} + +// DocumentID is the special field name representing the ID of a document +// in queries. +const DocumentID = "__name__" + +// Select returns a new Query that specifies the paths +// to return from the result documents. +// Each path argument can be a single field or a dot-separated sequence of +// fields, and must not contain any of the runes "Ëœ*/[]". +func (q Query) Select(paths ...string) Query { + var fps []FieldPath + for _, s := range paths { + fp, err := parseDotSeparatedString(s) + if err != nil { + q.err = err + return q + } + fps = append(fps, fp) + } + return q.SelectPaths(fps...) +} + +// SelectPaths returns a new Query that specifies the field paths +// to return from the result documents. +func (q Query) SelectPaths(fieldPaths ...FieldPath) Query { + if len(fieldPaths) == 0 { + q.selection = []FieldPath{{DocumentID}} + } else { + q.selection = fieldPaths + } + return q +} + +// Where returns a new Query that filters the set of results. +// A Query can have multiple filters. +// The path argument can be a single field or a dot-separated sequence of +// fields, and must not contain any of the runes "Ëœ*/[]". +// The op argument must be one of "==", "<", "<=", ">" or ">=". +func (q Query) Where(path, op string, value interface{}) Query { + fp, err := parseDotSeparatedString(path) + if err != nil { + q.err = err + return q + } + q.filters = append(append([]filter(nil), q.filters...), filter{fp, op, value}) + return q +} + +// WherePath returns a new Query that filters the set of results. +// A Query can have multiple filters. +// The op argument must be one of "==", "<", "<=", ">" or ">=". +func (q Query) WherePath(fp FieldPath, op string, value interface{}) Query { + q.filters = append(append([]filter(nil), q.filters...), filter{fp, op, value}) + return q +} + +// Direction is the sort direction for result ordering. +type Direction int32 + +const ( + // Asc sorts results from smallest to largest. + Asc Direction = Direction(pb.StructuredQuery_ASCENDING) + + // Desc sorts results from largest to smallest. + Desc Direction = Direction(pb.StructuredQuery_DESCENDING) +) + +// OrderBy returns a new Query that specifies the order in which results are +// returned. A Query can have multiple OrderBy/OrderByPath specifications. OrderBy +// appends the specification to the list of existing ones. +// +// The path argument can be a single field or a dot-separated sequence of +// fields, and must not contain any of the runes "Ëœ*/[]". +// +// To order by document name, use the special field path DocumentID. +func (q Query) OrderBy(path string, dir Direction) Query { + fp, err := parseDotSeparatedString(path) + if err != nil { + q.err = err + return q + } + q.orders = append(q.copyOrders(), order{fp, dir}) + return q +} + +// OrderByPath returns a new Query that specifies the order in which results are +// returned. A Query can have multiple OrderBy/OrderByPath specifications. +// OrderByPath appends the specification to the list of existing ones. +func (q Query) OrderByPath(fp FieldPath, dir Direction) Query { + q.orders = append(q.copyOrders(), order{fp, dir}) + return q +} + +func (q *Query) copyOrders() []order { + return append([]order(nil), q.orders...) +} + +// Offset returns a new Query that specifies the number of initial results to skip. +// It must not be negative. +func (q Query) Offset(n int) Query { + q.offset = trunc32(n) + return q +} + +// Limit returns a new Query that specifies the maximum number of results to return. +// It must not be negative. +func (q Query) Limit(n int) Query { + q.limit = &wrappers.Int32Value{trunc32(n)} + return q +} + +// StartAt returns a new Query that specifies that results should start at +// the document with the given field values. +// +// If StartAt is called with a single DocumentSnapshot, its field values are used. +// The DocumentSnapshot must have all the fields mentioned in the OrderBy clauses. +// +// Otherwise, StartAt should be called with one field value for each OrderBy clause, +// in the order that they appear. For example, in +// q.OrderBy("X", Asc).OrderBy("Y", Desc).StartAt(1, 2) +// results will begin at the first document where X = 1 and Y = 2. +// +// If an OrderBy call uses the special DocumentID field path, the corresponding value +// should be the document ID relative to the query's collection. For example, to +// start at the document "NewYork" in the "States" collection, write +// +// client.Collection("States").OrderBy(DocumentID, firestore.Asc).StartAt("NewYork") +// +// Calling StartAt overrides a previous call to StartAt or StartAfter. +func (q Query) StartAt(docSnapshotOrFieldValues ...interface{}) Query { + q.startBefore = true + q.startVals, q.startDoc, q.err = q.processCursorArg("StartAt", docSnapshotOrFieldValues) + return q +} + +// StartAfter returns a new Query that specifies that results should start just after +// the document with the given field values. See Query.StartAt for more information. +// +// Calling StartAfter overrides a previous call to StartAt or StartAfter. +func (q Query) StartAfter(docSnapshotOrFieldValues ...interface{}) Query { + q.startBefore = false + q.startVals, q.startDoc, q.err = q.processCursorArg("StartAfter", docSnapshotOrFieldValues) + return q +} + +// EndAt returns a new Query that specifies that results should end at the +// document with the given field values. See Query.StartAt for more information. +// +// Calling EndAt overrides a previous call to EndAt or EndBefore. +func (q Query) EndAt(docSnapshotOrFieldValues ...interface{}) Query { + q.endBefore = false + q.endVals, q.endDoc, q.err = q.processCursorArg("EndAt", docSnapshotOrFieldValues) + return q +} + +// EndBefore returns a new Query that specifies that results should end just before +// the document with the given field values. See Query.StartAt for more information. +// +// Calling EndBefore overrides a previous call to EndAt or EndBefore. +func (q Query) EndBefore(docSnapshotOrFieldValues ...interface{}) Query { + q.endBefore = true + q.endVals, q.endDoc, q.err = q.processCursorArg("EndBefore", docSnapshotOrFieldValues) + return q +} + +func (q *Query) processCursorArg(name string, docSnapshotOrFieldValues []interface{}) ([]interface{}, *DocumentSnapshot, error) { + for _, e := range docSnapshotOrFieldValues { + if ds, ok := e.(*DocumentSnapshot); ok { + if len(docSnapshotOrFieldValues) == 1 { + return nil, ds, nil + } + return nil, nil, fmt.Errorf("firestore: a document snapshot must be the only argument to %s", name) + } + } + return docSnapshotOrFieldValues, nil, nil +} + +func (q Query) query() *Query { return &q } + +func (q Query) toProto() (*pb.StructuredQuery, error) { + if q.err != nil { + return nil, q.err + } + if q.collectionID == "" { + return nil, errors.New("firestore: query created without CollectionRef") + } + p := &pb.StructuredQuery{ + From: []*pb.StructuredQuery_CollectionSelector{{CollectionId: q.collectionID}}, + Offset: q.offset, + Limit: q.limit, + } + if len(q.selection) > 0 { + p.Select = &pb.StructuredQuery_Projection{} + for _, fp := range q.selection { + if err := fp.validate(); err != nil { + return nil, err + } + p.Select.Fields = append(p.Select.Fields, fref(fp)) + } + } + // If there is only filter, use it directly. Otherwise, construct + // a CompositeFilter. + if len(q.filters) == 1 { + pf, err := q.filters[0].toProto() + if err != nil { + return nil, err + } + p.Where = pf + } else if len(q.filters) > 1 { + cf := &pb.StructuredQuery_CompositeFilter{ + Op: pb.StructuredQuery_CompositeFilter_AND, + } + p.Where = &pb.StructuredQuery_Filter{ + FilterType: &pb.StructuredQuery_Filter_CompositeFilter{cf}, + } + for _, f := range q.filters { + pf, err := f.toProto() + if err != nil { + return nil, err + } + cf.Filters = append(cf.Filters, pf) + } + } + orders := q.orders + if q.startDoc != nil || q.endDoc != nil { + orders = q.adjustOrders() + } + for _, ord := range orders { + po, err := ord.toProto() + if err != nil { + return nil, err + } + p.OrderBy = append(p.OrderBy, po) + } + + cursor, err := q.toCursor(q.startVals, q.startDoc, q.startBefore, orders) + if err != nil { + return nil, err + } + p.StartAt = cursor + cursor, err = q.toCursor(q.endVals, q.endDoc, q.endBefore, orders) + if err != nil { + return nil, err + } + p.EndAt = cursor + return p, nil +} + +// If there is a start/end that uses a Document Snapshot, we may need to adjust the OrderBy +// clauses that the user provided: we add OrderBy(__name__) if it isn't already present, and +// we make sure we don't invalidate the original query by adding an OrderBy for inequality filters. +func (q *Query) adjustOrders() []order { + // If the user is already ordering by document ID, don't change anything. + for _, ord := range q.orders { + if ord.isDocumentID() { + return q.orders + } + } + // If there are OrderBy clauses, append an OrderBy(DocumentID), using the direction of the last OrderBy clause. + if len(q.orders) > 0 { + return append(q.copyOrders(), order{ + fieldPath: FieldPath{DocumentID}, + dir: q.orders[len(q.orders)-1].dir, + }) + } + // If there are no OrderBy clauses but there is an inequality, add an OrderBy clause + // for the field of the first inequality. + var orders []order + for _, f := range q.filters { + if f.op != "==" { + orders = []order{{fieldPath: f.fieldPath, dir: Asc}} + break + } + } + // Add an ascending OrderBy(DocumentID). + return append(orders, order{fieldPath: FieldPath{DocumentID}, dir: Asc}) +} + +func (q *Query) toCursor(fieldValues []interface{}, ds *DocumentSnapshot, before bool, orders []order) (*pb.Cursor, error) { + var vals []*pb.Value + var err error + if ds != nil { + vals, err = q.docSnapshotToCursorValues(ds, orders) + } else if len(fieldValues) != 0 { + vals, err = q.fieldValuesToCursorValues(fieldValues) + } else { + return nil, nil + } + if err != nil { + return nil, err + } + return &pb.Cursor{Values: vals, Before: before}, nil +} + +// toPositionValues converts the field values to protos. +func (q *Query) fieldValuesToCursorValues(fieldValues []interface{}) ([]*pb.Value, error) { + if len(fieldValues) != len(q.orders) { + return nil, errors.New("firestore: number of field values in StartAt/StartAfter/EndAt/EndBefore does not match number of OrderBy fields") + } + vals := make([]*pb.Value, len(fieldValues)) + var err error + for i, ord := range q.orders { + fval := fieldValues[i] + if ord.isDocumentID() { + // TODO(jba): support DocumentRefs as well as strings. + // TODO(jba): error if document ref does not belong to the right collection. + docID, ok := fval.(string) + if !ok { + return nil, fmt.Errorf("firestore: expected doc ID for DocumentID field, got %T", fval) + } + vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.collectionPath() + "/" + docID}} + } else { + var sawTransform bool + vals[i], sawTransform, err = toProtoValue(reflect.ValueOf(fval)) + if err != nil { + return nil, err + } + if sawTransform { + return nil, errors.New("firestore: ServerTimestamp disallowed in query value") + } + } + } + return vals, nil +} + +func (q *Query) docSnapshotToCursorValues(ds *DocumentSnapshot, orders []order) ([]*pb.Value, error) { + // TODO(jba): error if doc snap does not belong to the right collection. + vals := make([]*pb.Value, len(orders)) + for i, ord := range orders { + if ord.isDocumentID() { + dp, qp := ds.Ref.Parent.Path, q.collectionPath() + if dp != qp { + return nil, fmt.Errorf("firestore: document snapshot for %s passed to query on %s", dp, qp) + } + vals[i] = &pb.Value{&pb.Value_ReferenceValue{ds.Ref.Path}} + } else { + val, err := valueAtPath(ord.fieldPath, ds.proto.Fields) + if err != nil { + return nil, err + } + vals[i] = val + } + } + return vals, nil +} + +// Returns a function that compares DocumentSnapshots according to q's ordering. +func (q Query) compareFunc() func(d1, d2 *DocumentSnapshot) (int, error) { + // Add implicit sorting by name, using the last specified direction. + lastDir := Asc + if len(q.orders) > 0 { + lastDir = q.orders[len(q.orders)-1].dir + } + orders := append(q.copyOrders(), order{[]string{DocumentID}, lastDir}) + return func(d1, d2 *DocumentSnapshot) (int, error) { + for _, ord := range orders { + var cmp int + if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID { + cmp = compareReferences(d1.Ref.Path, d2.Ref.Path) + } else { + v1, err := valueAtPath(ord.fieldPath, d1.proto.Fields) + if err != nil { + return 0, err + } + v2, err := valueAtPath(ord.fieldPath, d2.proto.Fields) + if err != nil { + return 0, err + } + cmp = compareValues(v1, v2) + } + if cmp != 0 { + if ord.dir == Desc { + cmp = -cmp + } + return cmp, nil + } + } + return 0, nil + } +} + +type filter struct { + fieldPath FieldPath + op string + value interface{} +} + +func (f filter) toProto() (*pb.StructuredQuery_Filter, error) { + if err := f.fieldPath.validate(); err != nil { + return nil, err + } + if uop, ok := unaryOpFor(f.value); ok { + if f.op != "==" { + return nil, fmt.Errorf("firestore: must use '==' when comparing %v", f.value) + } + return &pb.StructuredQuery_Filter{ + FilterType: &pb.StructuredQuery_Filter_UnaryFilter{ + UnaryFilter: &pb.StructuredQuery_UnaryFilter{ + OperandType: &pb.StructuredQuery_UnaryFilter_Field{ + Field: fref(f.fieldPath), + }, + Op: uop, + }, + }, + }, nil + } + var op pb.StructuredQuery_FieldFilter_Operator + switch f.op { + case "<": + op = pb.StructuredQuery_FieldFilter_LESS_THAN + case "<=": + op = pb.StructuredQuery_FieldFilter_LESS_THAN_OR_EQUAL + case ">": + op = pb.StructuredQuery_FieldFilter_GREATER_THAN + case ">=": + op = pb.StructuredQuery_FieldFilter_GREATER_THAN_OR_EQUAL + case "==": + op = pb.StructuredQuery_FieldFilter_EQUAL + default: + return nil, fmt.Errorf("firestore: invalid operator %q", f.op) + } + val, sawTransform, err := toProtoValue(reflect.ValueOf(f.value)) + if err != nil { + return nil, err + } + if sawTransform { + return nil, errors.New("firestore: ServerTimestamp disallowed in query value") + } + return &pb.StructuredQuery_Filter{ + FilterType: &pb.StructuredQuery_Filter_FieldFilter{ + FieldFilter: &pb.StructuredQuery_FieldFilter{ + Field: fref(f.fieldPath), + Op: op, + Value: val, + }, + }, + }, nil +} + +func unaryOpFor(value interface{}) (pb.StructuredQuery_UnaryFilter_Operator, bool) { + switch { + case value == nil: + return pb.StructuredQuery_UnaryFilter_IS_NULL, true + case isNaN(value): + return pb.StructuredQuery_UnaryFilter_IS_NAN, true + default: + return pb.StructuredQuery_UnaryFilter_OPERATOR_UNSPECIFIED, false + } +} + +func isNaN(x interface{}) bool { + switch x := x.(type) { + case float32: + return math.IsNaN(float64(x)) + case float64: + return math.IsNaN(x) + default: + return false + } +} + +type order struct { + fieldPath FieldPath + dir Direction +} + +func (r order) isDocumentID() bool { + return len(r.fieldPath) == 1 && r.fieldPath[0] == DocumentID +} + +func (r order) toProto() (*pb.StructuredQuery_Order, error) { + if err := r.fieldPath.validate(); err != nil { + return nil, err + } + return &pb.StructuredQuery_Order{ + Field: fref(r.fieldPath), + Direction: pb.StructuredQuery_Direction(r.dir), + }, nil +} + +func fref(fp FieldPath) *pb.StructuredQuery_FieldReference { + return &pb.StructuredQuery_FieldReference{fp.toServiceFieldPath()} +} + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +// Documents returns an iterator over the query's resulting documents. +func (q Query) Documents(ctx context.Context) *DocumentIterator { + return &DocumentIterator{ + iter: newQueryDocumentIterator(withResourceHeader(ctx, q.c.path()), &q, nil), + err: checkTransaction(ctx), + } +} + +// DocumentIterator is an iterator over documents returned by a query. +type DocumentIterator struct { + iter docIterator + err error +} + +// Unexported interface so we can have two different kinds of DocumentIterator: one +// for straight queries, and one for query snapshots. We do it this way instead of +// making DocumentIterator an interface because in the client libraries, iterators are +// always concrete types, and the fact that this one has two different implementations +// is an internal detail. +type docIterator interface { + next() (*DocumentSnapshot, error) + stop() +} + +// Next returns the next result. Its second return value is iterator.Done if there +// are no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *DocumentIterator) Next() (*DocumentSnapshot, error) { + if it.err != nil { + return nil, it.err + } + ds, err := it.iter.next() + if err != nil { + it.err = err + } + return ds, err +} + +// Stop stops the iterator, freeing its resources. +// Always call Stop when you are done with an iterator. +// It is not safe to call Stop concurrently with Next. +func (it *DocumentIterator) Stop() { + if it.iter != nil { // possible in error cases + it.iter.stop() + } + if it.err == nil { + it.err = iterator.Done + } +} + +// GetAll returns all the documents remaining from the iterator. +// It is not necessary to call Stop on the iterator after calling GetAll. +func (it *DocumentIterator) GetAll() ([]*DocumentSnapshot, error) { + defer it.Stop() + var docs []*DocumentSnapshot + for { + doc, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + docs = append(docs, doc) + } + return docs, nil +} + +type queryDocumentIterator struct { + ctx context.Context + cancel func() + q *Query + tid []byte // transaction ID, if any + streamClient pb.Firestore_RunQueryClient +} + +func newQueryDocumentIterator(ctx context.Context, q *Query, tid []byte) *queryDocumentIterator { + ctx, cancel := context.WithCancel(ctx) + return &queryDocumentIterator{ + ctx: ctx, + cancel: cancel, + q: q, + tid: tid, + } +} + +func (it *queryDocumentIterator) next() (*DocumentSnapshot, error) { + client := it.q.c + if it.streamClient == nil { + sq, err := it.q.toProto() + if err != nil { + return nil, err + } + req := &pb.RunQueryRequest{ + Parent: it.q.parentPath, + QueryType: &pb.RunQueryRequest_StructuredQuery{sq}, + } + if it.tid != nil { + req.ConsistencySelector = &pb.RunQueryRequest_Transaction{it.tid} + } + it.streamClient, err = client.c.RunQuery(it.ctx, req) + if err != nil { + return nil, err + } + } + var res *pb.RunQueryResponse + var err error + for { + res, err = it.streamClient.Recv() + if err == io.EOF { + return nil, iterator.Done + } + if err != nil { + return nil, err + } + if res.Document != nil { + break + } + // No document => partial progress; keep receiving. + } + docRef, err := pathToDoc(res.Document.Name, client) + if err != nil { + return nil, err + } + doc, err := newDocumentSnapshot(docRef, res.Document, client, res.ReadTime) + if err != nil { + return nil, err + } + return doc, nil +} + +func (it *queryDocumentIterator) stop() { + it.cancel() +} + +// Snapshots returns an iterator over snapshots of the query. Each time the query +// results change, a new snapshot will be generated. +func (q Query) Snapshots(ctx context.Context) *QuerySnapshotIterator { + ws, err := newWatchStreamForQuery(ctx, q) + if err != nil { + return &QuerySnapshotIterator{err: err} + } + return &QuerySnapshotIterator{ + Query: q, + ws: ws, + } +} + +// QuerySnapshotIterator is an iterator over snapshots of a query. +// Call Next on the iterator to get a snapshot of the query's results each time they change. +// Call Stop on the iterator when done. +// +// For an example, see Query.Snapshots. +type QuerySnapshotIterator struct { + // The Query used to construct this iterator. + Query Query + + // The time at which the most recent snapshot was obtained from Firestore. + ReadTime time.Time + + // The number of results in the most recent snapshot. + Size int + + // The changes since the previous snapshot. + Changes []DocumentChange + + ws *watchStream + err error +} + +// Next blocks until the query's results change, then returns a DocumentIterator for +// the current results. +// +// Next never returns iterator.Done unless it is called after Stop. +func (it *QuerySnapshotIterator) Next() (*DocumentIterator, error) { + if it.err != nil { + return nil, it.err + } + btree, changes, readTime, err := it.ws.nextSnapshot() + if err != nil { + if err == io.EOF { + err = iterator.Done + } + it.err = err + return nil, it.err + } + it.Changes = changes + it.ReadTime = readTime + it.Size = btree.Len() + return &DocumentIterator{ + iter: (*btreeDocumentIterator)(btree.BeforeIndex(0)), + }, nil +} + +// Stop stops receiving snapshots. +// You should always call Stop when you are done with an iterator, to free up resources. +// It is not safe to call Stop concurrently with Next. +func (it *QuerySnapshotIterator) Stop() { + it.ws.stop() +} + +type btreeDocumentIterator btree.Iterator + +func (it *btreeDocumentIterator) next() (*DocumentSnapshot, error) { + if !(*btree.Iterator)(it).Next() { + return nil, iterator.Done + } + return it.Key.(*DocumentSnapshot), nil +} + +func (*btreeDocumentIterator) stop() {} diff --git a/vendor/cloud.google.com/go/firestore/query_test.go b/vendor/cloud.google.com/go/firestore/query_test.go new file mode 100644 index 0000000..7f373ed --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/query_test.go @@ -0,0 +1,717 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "math" + "sort" + "testing" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/pretty" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" +) + +func TestFilterToProto(t *testing.T) { + for _, test := range []struct { + in filter + want *pb.StructuredQuery_Filter + }{ + { + filter{[]string{"a"}, ">", 1}, + &pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_FieldFilter{ + FieldFilter: &pb.StructuredQuery_FieldFilter{ + Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"}, + Op: pb.StructuredQuery_FieldFilter_GREATER_THAN, + Value: intval(1), + }, + }}, + }, + { + filter{[]string{"a"}, "==", nil}, + &pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_UnaryFilter{ + UnaryFilter: &pb.StructuredQuery_UnaryFilter{ + OperandType: &pb.StructuredQuery_UnaryFilter_Field{ + Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"}, + }, + Op: pb.StructuredQuery_UnaryFilter_IS_NULL, + }, + }}, + }, + { + filter{[]string{"a"}, "==", math.NaN()}, + &pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_UnaryFilter{ + UnaryFilter: &pb.StructuredQuery_UnaryFilter{ + OperandType: &pb.StructuredQuery_UnaryFilter_Field{ + Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"}, + }, + Op: pb.StructuredQuery_UnaryFilter_IS_NAN, + }, + }}, + }, + } { + got, err := test.in.toProto() + if err != nil { + t.Fatal(err) + } + if !testEqual(got, test.want) { + t.Errorf("%+v:\ngot\n%v\nwant\n%v", test.in, pretty.Value(got), pretty.Value(test.want)) + } + } +} + +func TestQueryToProto(t *testing.T) { + filtr := func(path []string, op string, val interface{}) *pb.StructuredQuery_Filter { + f, err := filter{path, op, val}.toProto() + if err != nil { + t.Fatal(err) + } + return f + } + + c := &Client{projectID: "P", databaseID: "DB"} + coll := c.Collection("C") + q := coll.Query + type S struct { + A int `firestore:"a"` + } + docsnap := &DocumentSnapshot{ + Ref: coll.Doc("D"), + proto: &pb.Document{ + Fields: map[string]*pb.Value{"a": intval(7), "b": intval(8)}, + }, + } + for _, test := range []struct { + desc string + in Query + want *pb.StructuredQuery + }{ + { + desc: "q.Select()", + in: q.Select(), + want: &pb.StructuredQuery{ + Select: &pb.StructuredQuery_Projection{ + Fields: []*pb.StructuredQuery_FieldReference{fref1("__name__")}, + }, + }, + }, + { + desc: `q.Select("a", "b")`, + in: q.Select("a", "b"), + want: &pb.StructuredQuery{ + Select: &pb.StructuredQuery_Projection{ + Fields: []*pb.StructuredQuery_FieldReference{fref1("a"), fref1("b")}, + }, + }, + }, + { + desc: `q.Select("a", "b").Select("c")`, + in: q.Select("a", "b").Select("c"), // last wins + want: &pb.StructuredQuery{ + Select: &pb.StructuredQuery_Projection{ + Fields: []*pb.StructuredQuery_FieldReference{fref1("c")}, + }, + }, + }, + { + desc: `q.SelectPaths([]string{"*"}, []string{"/"})`, + in: q.SelectPaths([]string{"*"}, []string{"/"}), + want: &pb.StructuredQuery{ + Select: &pb.StructuredQuery_Projection{ + Fields: []*pb.StructuredQuery_FieldReference{fref1("*"), fref1("/")}, + }, + }, + }, + { + desc: `q.Where("a", ">", 5)`, + in: q.Where("a", ">", 5), + want: &pb.StructuredQuery{Where: filtr([]string{"a"}, ">", 5)}, + }, + { + desc: `q.Where("a", "==", NaN)`, + in: q.Where("a", "==", float32(math.NaN())), + want: &pb.StructuredQuery{Where: filtr([]string{"a"}, "==", math.NaN())}, + }, + { + desc: `q.Where("a", ">", 5).Where("b", "<", "foo")`, + in: q.Where("a", ">", 5).Where("b", "<", "foo"), + want: &pb.StructuredQuery{ + Where: &pb.StructuredQuery_Filter{ + &pb.StructuredQuery_Filter_CompositeFilter{ + &pb.StructuredQuery_CompositeFilter{ + Op: pb.StructuredQuery_CompositeFilter_AND, + Filters: []*pb.StructuredQuery_Filter{ + filtr([]string{"a"}, ">", 5), filtr([]string{"b"}, "<", "foo"), + }, + }, + }, + }, + }, + }, + { + desc: ` q.WherePath([]string{"/", "*"}, ">", 5)`, + in: q.WherePath([]string{"/", "*"}, ">", 5), + want: &pb.StructuredQuery{Where: filtr([]string{"/", "*"}, ">", 5)}, + }, + { + desc: `q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc)`, + in: q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("b"), pb.StructuredQuery_ASCENDING}, + {fref1("a"), pb.StructuredQuery_DESCENDING}, + {fref1("~"), pb.StructuredQuery_ASCENDING}, + }, + }, + }, + { + desc: `q.Offset(2).Limit(3)`, + in: q.Offset(2).Limit(3), + want: &pb.StructuredQuery{ + Offset: 2, + Limit: &wrappers.Int32Value{3}, + }, + }, + { + desc: `q.Offset(2).Limit(3).Limit(4).Offset(5)`, + in: q.Offset(2).Limit(3).Limit(4).Offset(5), // last wins + want: &pb.StructuredQuery{ + Offset: 5, + Limit: &wrappers.Int32Value{4}, + }, + }, + { + desc: `q.OrderBy("a", Asc).StartAt(7).EndBefore(9)`, + in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7)}, + Before: true, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(9)}, + Before: true, + }, + }, + }, + { + desc: `q.OrderBy("a", Asc).StartAt(7).EndAt(9)`, + in: q.OrderBy("a", Asc).StartAt(7).EndAt(9), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7)}, + Before: true, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(9)}, + Before: false, + }, + }, + }, + { + desc: `q.OrderBy("a", Asc).StartAfter(7).EndAt(9)`, + in: q.OrderBy("a", Asc).StartAfter(7).EndAt(9), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7)}, + Before: false, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(9)}, + Before: false, + }, + }, + }, + { + desc: `q.OrderBy(DocumentID, Asc).StartAfter("foo").EndBefore("bar")`, + in: q.OrderBy(DocumentID, Asc).StartAfter("foo").EndBefore("bar"), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{refval(coll.parentPath + "/documents/C/foo")}, + Before: false, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{refval(coll.parentPath + "/documents/C/bar")}, + Before: true, + }, + }, + }, + { + desc: `q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10)`, + in: q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + {fref1("b"), pb.StructuredQuery_DESCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), intval(8)}, + Before: false, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(9), intval(10)}, + Before: false, + }, + }, + }, + { + // last of StartAt/After wins, same for End + desc: `q.OrderBy("a", Asc).StartAfter(1).StartAt(2).EndAt(3).EndBefore(4)`, + in: q.OrderBy("a", Asc). + StartAfter(1).StartAt(2). + EndAt(3).EndBefore(4), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(2)}, + Before: true, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(4)}, + Before: true, + }, + }, + }, + // Start/End with DocumentSnapshot + // These tests are from the "Document Snapshot Cursors" doc. + { + desc: `q.StartAt(docsnap)`, + in: q.StartAt(docsnap), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.OrderBy("a", Asc).StartAt(docsnap)`, + in: q.OrderBy("a", Asc).StartAt(docsnap), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + + { + desc: `q.OrderBy("a", Desc).StartAt(docsnap)`, + in: q.OrderBy("a", Desc).StartAt(docsnap), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_DESCENDING}, + {fref1("__name__"), pb.StructuredQuery_DESCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.OrderBy("a", Desc).OrderBy("b", Asc).StartAt(docsnap)`, + in: q.OrderBy("a", Desc).OrderBy("b", Asc).StartAt(docsnap), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_DESCENDING}, + {fref1("b"), pb.StructuredQuery_ASCENDING}, + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), intval(8), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.Where("a", "==", 3).StartAt(docsnap)`, + in: q.Where("a", "==", 3).StartAt(docsnap), + want: &pb.StructuredQuery{ + Where: filtr([]string{"a"}, "==", 3), + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.Where("a", "<", 3).StartAt(docsnap)`, + in: q.Where("a", "<", 3).StartAt(docsnap), + want: &pb.StructuredQuery{ + Where: filtr([]string{"a"}, "<", 3), + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.Where("b", "==", 1).Where("a", "<", 3).StartAt(docsnap)`, + in: q.Where("b", "==", 1).Where("a", "<", 3).StartAt(docsnap), + want: &pb.StructuredQuery{ + Where: &pb.StructuredQuery_Filter{ + &pb.StructuredQuery_Filter_CompositeFilter{ + &pb.StructuredQuery_CompositeFilter{ + Op: pb.StructuredQuery_CompositeFilter_AND, + Filters: []*pb.StructuredQuery_Filter{ + filtr([]string{"b"}, "==", 1), + filtr([]string{"a"}, "<", 3), + }, + }, + }, + }, + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + } { + got, err := test.in.toProto() + if err != nil { + t.Errorf("%s: %v", test.desc, err) + continue + } + test.want.From = []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}} + if !testEqual(got, test.want) { + t.Errorf("%s:\ngot\n%v\nwant\n%v", test.desc, pretty.Value(got), pretty.Value(test.want)) + } + } +} + +func fref1(s string) *pb.StructuredQuery_FieldReference { + return fref([]string{s}) +} + +func TestQueryToProtoErrors(t *testing.T) { + st := map[string]interface{}{"a": ServerTimestamp} + del := map[string]interface{}{"a": Delete} + c := &Client{projectID: "P", databaseID: "DB"} + coll := c.Collection("C") + docsnap := &DocumentSnapshot{ + Ref: coll.Doc("D"), + proto: &pb.Document{ + Fields: map[string]*pb.Value{"a": intval(7)}, + }, + } + q := coll.Query + for _, query := range []Query{ + Query{}, // no collection ID + q.Where("x", "!=", 1), // invalid operator + q.Where("~", ">", 1), // invalid path + q.WherePath([]string{"*", ""}, ">", 1), // invalid path + q.StartAt(1), // no OrderBy + q.StartAt(2).OrderBy("x", Asc).OrderBy("y", Desc), // wrong # OrderBy + q.Select("*"), // invalid path + q.SelectPaths([]string{"/", "", "~"}), // invalid path + q.OrderBy("[", Asc), // invalid path + q.OrderByPath([]string{""}, Desc), // invalid path + q.Where("x", "==", st), // ServerTimestamp in filter + q.OrderBy("a", Asc).StartAt(st), // ServerTimestamp in Start + q.OrderBy("a", Asc).EndAt(st), // ServerTimestamp in End + q.Where("x", "==", del), // Delete in filter + q.OrderBy("a", Asc).StartAt(del), // Delete in Start + q.OrderBy("a", Asc).EndAt(del), // Delete in End + q.OrderBy(DocumentID, Asc).StartAt(7), // wrong type for __name__ + q.OrderBy(DocumentID, Asc).EndAt(7), // wrong type for __name__ + q.OrderBy("b", Asc).StartAt(docsnap), // doc snapshot does not have order-by field + q.StartAt(docsnap).EndAt("x"), // mixed doc snapshot and fields + q.StartAfter("x").EndBefore(docsnap), // mixed doc snapshot and fields + } { + _, err := query.toProto() + if err == nil { + t.Errorf("%+v: got nil, want error", query) + } + } +} + +func TestQueryMethodsDoNotModifyReceiver(t *testing.T) { + var empty Query + + q := Query{} + _ = q.Select("a", "b") + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + q1 := q.Where("a", ">", 3) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + // Extra check because Where appends to a slice. + q1before := q.Where("a", ">", 3) // same as q1 + _ = q1.Where("b", "<", "foo") + if !testEqual(q1, q1before) { + t.Errorf("got %+v, want %+v", q1, q1before) + } + + q = Query{} + q1 = q.OrderBy("a", Asc) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + // Extra check because Where appends to a slice. + q1before = q.OrderBy("a", Asc) // same as q1 + _ = q1.OrderBy("b", Desc) + if !testEqual(q1, q1before) { + t.Errorf("got %+v, want %+v", q1, q1before) + } + + q = Query{} + _ = q.Offset(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.Limit(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.StartAt(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.StartAfter(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.EndAt(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.EndBefore(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } +} + +func TestQueryFromCollectionRef(t *testing.T) { + c := &Client{} + coll := c.Collection("C") + got := coll.Select("x").Offset(8) + want := Query{ + c: c, + parentPath: c.path(), + collectionID: "C", + selection: []FieldPath{{"x"}}, + offset: 8, + } + if !testEqual(got, want) { + t.Fatalf("got %+v, want %+v", got, want) + } +} + +func TestQueryGetAll(t *testing.T) { + // This implicitly tests DocumentIterator as well. + const dbPath = "projects/projectID/databases/(default)" + ctx := context.Background() + c, srv := newMock(t) + docNames := []string{"C/a", "C/b"} + wantPBDocs := []*pb.Document{ + { + Name: dbPath + "/documents/" + docNames[0], + CreateTime: aTimestamp, + UpdateTime: aTimestamp, + Fields: map[string]*pb.Value{"f": intval(2)}, + }, + { + Name: dbPath + "/documents/" + docNames[1], + CreateTime: aTimestamp2, + UpdateTime: aTimestamp3, + Fields: map[string]*pb.Value{"f": intval(1)}, + }, + } + wantReadTimes := []*tspb.Timestamp{aTimestamp, aTimestamp2} + srv.addRPC(nil, []interface{}{ + &pb.RunQueryResponse{Document: wantPBDocs[0], ReadTime: aTimestamp}, + &pb.RunQueryResponse{Document: wantPBDocs[1], ReadTime: aTimestamp2}, + }) + gotDocs, err := c.Collection("C").Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if got, want := len(gotDocs), len(wantPBDocs); got != want { + t.Errorf("got %d docs, wanted %d", got, want) + } + for i, got := range gotDocs { + want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c, wantReadTimes[i]) + if err != nil { + t.Fatal(err) + } + if !testEqual(got, want) { + // avoid writing a cycle + got.c = nil + want.c = nil + t.Errorf("#%d: got %+v, want %+v", i, pretty.Value(got), pretty.Value(want)) + } + } +} + +func TestQueryCompareFunc(t *testing.T) { + mv := func(fields ...interface{}) map[string]*pb.Value { + m := map[string]*pb.Value{} + for i := 0; i < len(fields); i += 2 { + m[fields[i].(string)] = fields[i+1].(*pb.Value) + } + return m + } + snap := func(ref *DocumentRef, fields map[string]*pb.Value) *DocumentSnapshot { + return &DocumentSnapshot{Ref: ref, proto: &pb.Document{Fields: fields}} + } + + c := &Client{} + coll := c.Collection("C") + doc1 := coll.Doc("doc1") + doc2 := coll.Doc("doc2") + doc3 := coll.Doc("doc3") + doc4 := coll.Doc("doc4") + for _, test := range []struct { + q Query + in []*DocumentSnapshot + want []*DocumentSnapshot + }{ + { + q: coll.OrderBy("foo", Asc), + in: []*DocumentSnapshot{ + snap(doc3, mv("foo", intval(2))), + snap(doc4, mv("foo", intval(1))), + snap(doc2, mv("foo", intval(2))), + }, + want: []*DocumentSnapshot{ + snap(doc4, mv("foo", intval(1))), + snap(doc2, mv("foo", intval(2))), + snap(doc3, mv("foo", intval(2))), + }, + }, + { + q: coll.OrderBy("foo", Desc), + in: []*DocumentSnapshot{ + snap(doc3, mv("foo", intval(2))), + snap(doc4, mv("foo", intval(1))), + snap(doc2, mv("foo", intval(2))), + }, + want: []*DocumentSnapshot{ + snap(doc3, mv("foo", intval(2))), + snap(doc2, mv("foo", intval(2))), + snap(doc4, mv("foo", intval(1))), + }, + }, + { + q: coll.OrderBy("foo.bar", Asc), + in: []*DocumentSnapshot{ + snap(doc1, mv("foo", mapval(mv("bar", intval(1))))), + snap(doc2, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc3, mv("foo", mapval(mv("bar", intval(2))))), + }, + want: []*DocumentSnapshot{ + snap(doc1, mv("foo", mapval(mv("bar", intval(1))))), + snap(doc2, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc3, mv("foo", mapval(mv("bar", intval(2))))), + }, + }, + { + q: coll.OrderBy("foo.bar", Desc), + in: []*DocumentSnapshot{ + snap(doc1, mv("foo", mapval(mv("bar", intval(1))))), + snap(doc2, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc3, mv("foo", mapval(mv("bar", intval(2))))), + }, + want: []*DocumentSnapshot{ + snap(doc3, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc2, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc1, mv("foo", mapval(mv("bar", intval(1))))), + }, + }, + } { + got := append([]*DocumentSnapshot(nil), test.in...) + sort.Sort(byQuery{test.q.compareFunc(), got}) + if diff := testDiff(got, test.want); diff != "" { + t.Errorf("%+v: %s", test.q, diff) + } + } + + // Want error on missing field. + q := coll.OrderBy("bar", Asc) + if q.err != nil { + t.Fatalf("bad query: %v", q.err) + } + cf := q.compareFunc() + s := snap(doc1, mv("foo", intval(1))) + if _, err := cf(s, s); err == nil { + t.Error("got nil, want error") + } +} + +type byQuery struct { + compare func(d1, d2 *DocumentSnapshot) (int, error) + docs []*DocumentSnapshot +} + +func (b byQuery) Len() int { return len(b.docs) } +func (b byQuery) Swap(i, j int) { b.docs[i], b.docs[j] = b.docs[j], b.docs[i] } +func (b byQuery) Less(i, j int) bool { + c, err := b.compare(b.docs[i], b.docs[j]) + if err != nil { + panic(err) + } + return c < 0 +} diff --git a/vendor/cloud.google.com/go/firestore/testdata/VERSION b/vendor/cloud.google.com/go/firestore/testdata/VERSION new file mode 100644 index 0000000..e25a9ae --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/VERSION @@ -0,0 +1 @@ +SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/test-suite.binproto)= 1502b0250a2ecd854b80509e3e456e46ade89ea7 diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto new file mode 100644 index 0000000..433ffda --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto @@ -0,0 +1,27 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple call, resulting in a single update operation. + +description: "create: basic" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto new file mode 100644 index 0000000..00a994e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto @@ -0,0 +1,61 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to a write method with complicated input data. + +description: "create: complex" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + array_value: < + values: < + integer_value: 1 + > + values: < + double_value: 2.5 + > + > + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + array_value: < + values: < + string_value: "three" + > + values: < + map_value: < + fields: < + key: "d" + value: < + boolean_value: true + > + > + > + > + > + > + > + > + > + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto new file mode 100644 index 0000000..60694e1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "create: Delete cannot be anywhere inside an array value" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto new file mode 100644 index 0000000..5731be1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "create: Delete cannot be in an array value" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"Delete\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto new file mode 100644 index 0000000..2b6fec7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto @@ -0,0 +1,20 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + + +description: "create: creating or setting an empty map" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto new file mode 100644 index 0000000..c878814 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel cannot be used in Create, or in Set without a Merge option. + +description: "create: Delete cannot appear in data" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto new file mode 100644 index 0000000..e9e1ee2 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto @@ -0,0 +1,40 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Create and Set treat their map keys literally. They do not split on dots. + +description: "create: don\342\200\231t split on dots" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a.b" + value: < + map_value: < + fields: < + key: "c.d" + value: < + integer_value: 1 + > + > + > + > + > + fields: < + key: "e" + value: < + integer_value: 2 + > + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto new file mode 100644 index 0000000..3a7acd3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto @@ -0,0 +1,41 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Create and Set treat their map keys literally. They do not escape special +# characters. + +description: "create: non-alpha characters in map keys" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{ \"*\": { \".\": 1 }, \"~\": 2 }" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "*" + value: < + map_value: < + fields: < + key: "." + value: < + integer_value: 1 + > + > + > + > + > + fields: < + key: "~" + value: < + integer_value: 2 + > + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto new file mode 100644 index 0000000..9803a67 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto @@ -0,0 +1,26 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then no update operation +# should be produced. + +description: "create: ServerTimestamp alone" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-multi.textproto new file mode 100644 index 0000000..cb3db48 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-multi.textproto @@ -0,0 +1,41 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A document can have more than one ServerTimestamp field. Since all the +# ServerTimestamp fields are removed, the only field in the update is "a". + +description: "create: multiple ServerTimestamp fields" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + current_document: < + exists: false + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + field_transforms: < + field_path: "c.d" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-nested.textproto new file mode 100644 index 0000000..6bc03e8 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-nested.textproto @@ -0,0 +1,38 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A ServerTimestamp value can occur at any depth. In this case, the transform +# applies to the field path "b.c". Since "c" is removed from the update, "b" +# becomes empty, so it is also removed from the update. + +description: "create: nested ServerTimestamp field" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + current_document: < + exists: false + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b.c" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray-nested.textproto new file mode 100644 index 0000000..0cec0ae --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray-nested.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# There cannot be an array value anywhere on the path from the document root to +# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. + +description: "create: ServerTimestamp cannot be anywhere inside an array value" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray.textproto new file mode 100644 index 0000000..56d91c2 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The ServerTimestamp sentinel must be the value of a field. Firestore transforms +# don't support array indexing. + +description: "create: ServerTimestamp cannot be in an array value" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st.textproto new file mode 100644 index 0000000..ddfc6a1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st.textproto @@ -0,0 +1,39 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A key with the special ServerTimestamp sentinel is removed from the data in the +# update operation. Instead it appears in a separate Transform operation. Note +# that in these tests, the string "ServerTimestamp" should be replaced with the +# special ServerTimestamp value. + +description: "create: ServerTimestamp with data" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + current_document: < + exists: false + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/delete-exists-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/delete-exists-precond.textproto new file mode 100644 index 0000000..c9cf2dd --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/delete-exists-precond.textproto @@ -0,0 +1,21 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Delete supports an exists precondition. + +description: "delete: delete with exists precondition" +delete: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + exists: true + > + request: < + database: "projects/projectID/databases/(default)" + writes: < + delete: "projects/projectID/databases/(default)/documents/C/d" + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/delete-no-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/delete-no-precond.textproto new file mode 100644 index 0000000..a396cdb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/delete-no-precond.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# An ordinary Delete call. + +description: "delete: delete without precondition" +delete: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + request: < + database: "projects/projectID/databases/(default)" + writes: < + delete: "projects/projectID/databases/(default)/documents/C/d" + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/delete-time-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/delete-time-precond.textproto new file mode 100644 index 0000000..5798f5f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/delete-time-precond.textproto @@ -0,0 +1,25 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Delete supports a last-update-time precondition. + +description: "delete: delete with last-update-time precondition" +delete: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + update_time: < + seconds: 42 + > + > + request: < + database: "projects/projectID/databases/(default)" + writes: < + delete: "projects/projectID/databases/(default)/documents/C/d" + current_document: < + update_time: < + seconds: 42 + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/get-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/get-basic.textproto new file mode 100644 index 0000000..2a44816 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/get-basic.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to DocumentRef.Get. + +description: "get: get a document" +get: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + request: < + name: "projects/projectID/databases/(default)/documents/C/d" + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-bad-NaN.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-bad-NaN.textproto new file mode 100644 index 0000000..6806dd0 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-bad-NaN.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# You can only compare NaN for equality. + +description: "query: where clause with non-== comparison with NaN" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "<" + json_value: "\"NaN\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-bad-null.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-bad-null.textproto new file mode 100644 index 0000000..7fdfb3f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-bad-null.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# You can only compare Null for equality. + +description: "query: where clause with non-== comparison with Null" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: ">" + json_value: "null" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto new file mode 100644 index 0000000..bab8601 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto @@ -0,0 +1,68 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# When a document snapshot is used, the client appends a __name__ order-by clause +# with the direction of the last order-by clause. + +description: "query: cursor methods with a document snapshot, existing orderBy" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + order_by: < + path: < + field: "b" + > + direction: "desc" + > + > + clauses: < + start_after: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + order_by: < + field: < + field_path: "b" + > + direction: DESCENDING + > + order_by: < + field: < + field_path: "__name__" + > + direction: DESCENDING + > + start_at: < + values: < + integer_value: 7 + > + values: < + integer_value: 8 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto new file mode 100644 index 0000000..d0ce3df --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto @@ -0,0 +1,76 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If there is an existing orderBy clause on __name__, no changes are made to the +# list of orderBy clauses. + +description: "query: cursor method, doc snapshot, existing orderBy __name__" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "desc" + > + > + clauses: < + order_by: < + path: < + field: "__name__" + > + direction: "asc" + > + > + clauses: < + start_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + clauses: < + end_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: DESCENDING + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + start_at: < + values: < + integer_value: 7 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + before: true + > + end_at: < + values: < + integer_value: 7 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto new file mode 100644 index 0000000..8b1e217 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto @@ -0,0 +1,53 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Where clause using equality doesn't change the implicit orderBy clauses. + +description: "query: cursor methods with a document snapshot and an equality where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "==" + json_value: "3" + > + > + clauses: < + end_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + where: < + field_filter: < + field: < + field_path: "a" + > + op: EQUAL + value: < + integer_value: 3 + > + > + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + end_at: < + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto new file mode 100644 index 0000000..a69edfc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto @@ -0,0 +1,72 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If there is an OrderBy clause, the inequality Where clause does not result in a +# new OrderBy clause. We still add a __name__ OrderBy clause + +description: "query: cursor method, doc snapshot, inequality where clause, and existing orderBy clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "desc" + > + > + clauses: < + where: < + path: < + field: "a" + > + op: "<" + json_value: "4" + > + > + clauses: < + start_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + where: < + field_filter: < + field: < + field_path: "a" + > + op: LESS_THAN + value: < + integer_value: 4 + > + > + > + order_by: < + field: < + field_path: "a" + > + direction: DESCENDING + > + order_by: < + field: < + field_path: "__name__" + > + direction: DESCENDING + > + start_at: < + values: < + integer_value: 7 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto new file mode 100644 index 0000000..871dd0b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto @@ -0,0 +1,64 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Where clause with an inequality results in an OrderBy clause on that clause's +# path, if there are no other OrderBy clauses. + +description: "query: cursor method with a document snapshot and an inequality where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "<=" + json_value: "3" + > + > + clauses: < + end_before: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + where: < + field_filter: < + field: < + field_path: "a" + > + op: LESS_THAN_OR_EQUAL + value: < + integer_value: 3 + > + > + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + end_at: < + values: < + integer_value: 7 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto new file mode 100644 index 0000000..184bffc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto @@ -0,0 +1,34 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# When a document snapshot is used, the client appends a __name__ order-by clause. + +description: "query: cursor methods with a document snapshot" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + start_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + start_at: < + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto new file mode 100644 index 0000000..fb999dd --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto @@ -0,0 +1,16 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a cursor method with a list of values is provided, there must be at least as +# many explicit orderBy clauses as values. + +description: "query: cursor method without orderBy" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + start_at: < + json_values: "2" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto new file mode 100644 index 0000000..bb08ab7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto @@ -0,0 +1,50 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Cursor methods take the same number of values as there are OrderBy clauses. + +description: "query: StartAt/EndBefore with values" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + start_at: < + json_values: "7" + > + > + clauses: < + end_before: < + json_values: "9" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + start_at: < + values: < + integer_value: 7 + > + before: true + > + end_at: < + values: < + integer_value: 9 + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto new file mode 100644 index 0000000..41e69e9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto @@ -0,0 +1,48 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Cursor methods take the same number of values as there are OrderBy clauses. + +description: "query: StartAfter/EndAt with values" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + start_after: < + json_values: "7" + > + > + clauses: < + end_at: < + json_values: "9" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + start_at: < + values: < + integer_value: 7 + > + > + end_at: < + values: < + integer_value: 9 + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto new file mode 100644 index 0000000..8e37ad0 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto @@ -0,0 +1,71 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Cursor methods take the same number of values as there are OrderBy clauses. + +description: "query: Start/End with two values" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + order_by: < + path: < + field: "b" + > + direction: "desc" + > + > + clauses: < + start_at: < + json_values: "7" + json_values: "8" + > + > + clauses: < + end_at: < + json_values: "9" + json_values: "10" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + order_by: < + field: < + field_path: "b" + > + direction: DESCENDING + > + start_at: < + values: < + integer_value: 7 + > + values: < + integer_value: 8 + > + before: true + > + end_at: < + values: < + integer_value: 9 + > + values: < + integer_value: 10 + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto new file mode 100644 index 0000000..91af348 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto @@ -0,0 +1,50 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Cursor values corresponding to a __name__ field take the document path relative +# to the query's collection. + +description: "query: cursor methods with __name__" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "__name__" + > + direction: "asc" + > + > + clauses: < + start_after: < + json_values: "\"D1\"" + > + > + clauses: < + end_before: < + json_values: "\"D2\"" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + start_at: < + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D1" + > + > + end_at: < + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D2" + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto new file mode 100644 index 0000000..9e8fbb1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto @@ -0,0 +1,60 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# When multiple Start* or End* calls occur, the values of the last one are used. + +description: "query: cursor methods, last one wins" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + start_after: < + json_values: "1" + > + > + clauses: < + start_at: < + json_values: "2" + > + > + clauses: < + end_at: < + json_values: "3" + > + > + clauses: < + end_before: < + json_values: "4" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + start_at: < + values: < + integer_value: 2 + > + before: true + > + end_at: < + values: < + integer_value: 4 + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto new file mode 100644 index 0000000..c9d4adb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto @@ -0,0 +1,23 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Sentinel values are not permitted in queries. + +description: "query: Delete in cursor method" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + end_before: < + json_values: "\"Delete\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-del-where.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-del-where.textproto new file mode 100644 index 0000000..8e92529 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-del-where.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Sentinel values are not permitted in queries. + +description: "query: Delete in Where" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "==" + json_value: "\"Delete\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-invalid-operator.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-operator.textproto new file mode 100644 index 0000000..e580c64 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-operator.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The != operator is not supported. + +description: "query: invalid operator in Where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "!=" + json_value: "4" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-order.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-order.textproto new file mode 100644 index 0000000..e0a7205 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-order.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The path has an empty component. + +description: "query: invalid path in OrderBy clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "*" + field: "" + > + direction: "asc" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-select.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-select.textproto new file mode 100644 index 0000000..944f984 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-select.textproto @@ -0,0 +1,18 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The path has an empty component. + +description: "query: invalid path in Where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + select: < + fields: < + field: "*" + field: "" + > + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-where.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-where.textproto new file mode 100644 index 0000000..527923b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-where.textproto @@ -0,0 +1,20 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The path has an empty component. + +description: "query: invalid path in Where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "*" + field: "" + > + op: "==" + json_value: "4" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit-last-wins.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit-last-wins.textproto new file mode 100644 index 0000000..dc301f4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit-last-wins.textproto @@ -0,0 +1,30 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# With multiple Offset or Limit clauses, the last one wins. + +description: "query: multiple Offset and Limit clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + offset: 2 + > + clauses: < + limit: 3 + > + clauses: < + limit: 4 + > + clauses: < + offset: 5 + > + query: < + from: < + collection_id: "C" + > + offset: 5 + limit: < + value: 4 + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit.textproto new file mode 100644 index 0000000..136d9d4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit.textproto @@ -0,0 +1,24 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Offset and Limit clauses. + +description: "query: Offset and Limit clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + offset: 2 + > + clauses: < + limit: 3 + > + query: < + from: < + collection_id: "C" + > + offset: 2 + limit: < + value: 3 + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-order.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-order.textproto new file mode 100644 index 0000000..7ed4c4e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-order.textproto @@ -0,0 +1,42 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Multiple OrderBy clauses combine. + +description: "query: basic OrderBy clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "b" + > + direction: "asc" + > + > + clauses: < + order_by: < + path: < + field: "a" + > + direction: "desc" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "b" + > + direction: ASCENDING + > + order_by: < + field: < + field_path: "a" + > + direction: DESCENDING + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-select-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-select-empty.textproto new file mode 100644 index 0000000..def8b55 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-select-empty.textproto @@ -0,0 +1,23 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# An empty Select clause selects just the document ID. + +description: "query: empty Select clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + select: < + > + > + query: < + select: < + fields: < + field_path: "__name__" + > + > + from: < + collection_id: "C" + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-select-last-wins.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-select-last-wins.textproto new file mode 100644 index 0000000..bd78d09 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-select-last-wins.textproto @@ -0,0 +1,36 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The last Select clause is the only one used. + +description: "query: two Select clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + select: < + fields: < + field: "a" + > + fields: < + field: "b" + > + > + > + clauses: < + select: < + fields: < + field: "c" + > + > + > + query: < + select: < + fields: < + field_path: "c" + > + > + from: < + collection_id: "C" + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-select.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-select.textproto new file mode 100644 index 0000000..15e1124 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-select.textproto @@ -0,0 +1,32 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# An ordinary Select clause. + +description: "query: Select clause with some fields" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + select: < + fields: < + field: "a" + > + fields: < + field: "b" + > + > + > + query: < + select: < + fields: < + field_path: "a" + > + fields: < + field_path: "b" + > + > + from: < + collection_id: "C" + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-st-cursor.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-st-cursor.textproto new file mode 100644 index 0000000..66885d0 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-st-cursor.textproto @@ -0,0 +1,23 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Sentinel values are not permitted in queries. + +description: "query: ServerTimestamp in cursor method" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + end_before: < + json_values: "\"ServerTimestamp\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-st-where.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-st-where.textproto new file mode 100644 index 0000000..05da28d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-st-where.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Sentinel values are not permitted in queries. + +description: "query: ServerTimestamp in Where" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "==" + json_value: "\"ServerTimestamp\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-where-2.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-where-2.textproto new file mode 100644 index 0000000..1034463 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-where-2.textproto @@ -0,0 +1,59 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Multiple Where clauses are combined into a composite filter. + +description: "query: two Where clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: ">=" + json_value: "5" + > + > + clauses: < + where: < + path: < + field: "b" + > + op: "<" + json_value: "\"foo\"" + > + > + query: < + from: < + collection_id: "C" + > + where: < + composite_filter: < + op: AND + filters: < + field_filter: < + field: < + field_path: "a" + > + op: GREATER_THAN_OR_EQUAL + value: < + integer_value: 5 + > + > + > + filters: < + field_filter: < + field: < + field_path: "b" + > + op: LESS_THAN + value: < + string_value: "foo" + > + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-where-NaN.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-where-NaN.textproto new file mode 100644 index 0000000..4a97ca7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-where-NaN.textproto @@ -0,0 +1,31 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Where clause that tests for equality with NaN results in a unary filter. + +description: "query: a Where clause comparing to NaN" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "==" + json_value: "\"NaN\"" + > + > + query: < + from: < + collection_id: "C" + > + where: < + unary_filter: < + op: IS_NAN + field: < + field_path: "a" + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-where-null.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-where-null.textproto new file mode 100644 index 0000000..1869c60 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-where-null.textproto @@ -0,0 +1,31 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Where clause that tests for equality with null results in a unary filter. + +description: "query: a Where clause comparing to null" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "==" + json_value: "null" + > + > + query: < + from: < + collection_id: "C" + > + where: < + unary_filter: < + op: IS_NULL + field: < + field_path: "a" + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-where.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-where.textproto new file mode 100644 index 0000000..045c2be --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-where.textproto @@ -0,0 +1,34 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple Where clause. + +description: "query: Where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: ">" + json_value: "5" + > + > + query: < + from: < + collection_id: "C" + > + where: < + field_filter: < + field: < + field_path: "a" + > + op: GREATER_THAN + value: < + integer_value: 5 + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-wrong-collection.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-wrong-collection.textproto new file mode 100644 index 0000000..ad6f353 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-wrong-collection.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a document snapshot is passed to a Start*/End* method, it must be in the same +# collection as the query. + +description: "query: doc snapshot with wrong collection in cursor method" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + end_before: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C2/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-basic.textproto new file mode 100644 index 0000000..e9b292e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-basic.textproto @@ -0,0 +1,24 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple call, resulting in a single update operation. + +description: "set: basic" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-complex.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-complex.textproto new file mode 100644 index 0000000..6ec1950 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-complex.textproto @@ -0,0 +1,58 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to a write method with complicated input data. + +description: "set: complex" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + array_value: < + values: < + integer_value: 1 + > + values: < + double_value: 2.5 + > + > + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + array_value: < + values: < + string_value: "three" + > + values: < + map_value: < + fields: < + key: "d" + value: < + boolean_value: true + > + > + > + > + > + > + > + > + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-merge-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-merge-alone.textproto new file mode 100644 index 0000000..811ab8d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-merge-alone.textproto @@ -0,0 +1,28 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Delete sentinel can appear with a merge option. If the delete paths are the +# only ones to be merged, then no document is sent, just an update mask. + +description: "set-merge: Delete with merge" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "b" + field: "c" + > + > + json_data: "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + update_mask: < + field_paths: "b.c" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-merge.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-merge.textproto new file mode 100644 index 0000000..b8d8631 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-merge.textproto @@ -0,0 +1,37 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Delete sentinel can appear with a merge option. + +description: "set-merge: Delete with merge" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + fields: < + field: "b" + field: "c" + > + > + json_data: "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b.c" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-mergeall.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-mergeall.textproto new file mode 100644 index 0000000..af1e845 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-mergeall.textproto @@ -0,0 +1,31 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Delete sentinel can appear with a mergeAll option. + +description: "set: Delete with MergeAll" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b.c" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray-nested.textproto new file mode 100644 index 0000000..bbf6a3d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray-nested.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "set: Delete cannot be anywhere inside an array value" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray.textproto new file mode 100644 index 0000000..07fc649 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "set: Delete cannot be in an array value" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"Delete\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-nomerge.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-nomerge.textproto new file mode 100644 index 0000000..cb6ef4f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-nomerge.textproto @@ -0,0 +1,17 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The client signals an error if the Delete sentinel is in the input data, but not +# selected by a merge option, because this is most likely a programming bug. + +description: "set-merge: Delete cannot appear in an unmerged field" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + > + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-nonleaf.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-nonleaf.textproto new file mode 100644 index 0000000..54f22d9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-nonleaf.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a Delete is part of the value at a merge path, then the user is confused: +# their merge path says "replace this entire value" but their Delete says "delete +# this part of the value". This should be an error, just as if they specified +# Delete in a Set with no merge. + +description: "set-merge: Delete cannot appear as part of a merge path" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "h" + > + > + json_data: "{\"h\": {\"g\": \"Delete\"}}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-wo-merge.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-wo-merge.textproto new file mode 100644 index 0000000..2919662 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-wo-merge.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Without a merge option, Set replaces the document with the input data. A Delete +# sentinel in the data makes no sense in this case. + +description: "set: Delete cannot appear unless a merge option is specified" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-empty.textproto new file mode 100644 index 0000000..c2b73d3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-empty.textproto @@ -0,0 +1,17 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + + +description: "set: creating or setting an empty map" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-fp.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-fp.textproto new file mode 100644 index 0000000..68690f6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-fp.textproto @@ -0,0 +1,40 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A merge with fields that use special characters. + +description: "set-merge: Merge with FieldPaths" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "*" + field: "~" + > + > + json_data: "{\"*\": {\"~\": true}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "*" + value: < + map_value: < + fields: < + key: "~" + value: < + boolean_value: true + > + > + > + > + > + > + update_mask: < + field_paths: "`*`.`~`" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nested.textproto new file mode 100644 index 0000000..0d12828 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nested.textproto @@ -0,0 +1,41 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A merge option where the field is not at top level. Only fields mentioned in the +# option are present in the update operation. + +description: "set-merge: Merge with a nested field" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "h" + field: "g" + > + > + json_data: "{\"h\": {\"g\": 4, \"f\": 5}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "g" + value: < + integer_value: 4 + > + > + > + > + > + > + update_mask: < + field_paths: "h.g" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-nonleaf.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nonleaf.textproto new file mode 100644 index 0000000..ca41cb0 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nonleaf.textproto @@ -0,0 +1,46 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a field path is in a merge option, the value at that path replaces the stored +# value. That is true even if the value is complex. + +description: "set-merge: Merge field is not a leaf" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "h" + > + > + json_data: "{\"h\": {\"f\": 5, \"g\": 6}, \"e\": 7}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "f" + value: < + integer_value: 5 + > + > + fields: < + key: "g" + value: < + integer_value: 6 + > + > + > + > + > + > + update_mask: < + field_paths: "h" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-prefix.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-prefix.textproto new file mode 100644 index 0000000..1e2c2c5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-prefix.textproto @@ -0,0 +1,21 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The prefix would make the other path meaningless, so this is probably a +# programming error. + +description: "set-merge: One merge path cannot be the prefix of another" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + fields: < + field: "a" + field: "b" + > + > + json_data: "{\"a\": {\"b\": 1}}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-present.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-present.textproto new file mode 100644 index 0000000..f6665de --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-present.textproto @@ -0,0 +1,20 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The client signals an error if a merge option mentions a path that is not in the +# input data. + +description: "set-merge: Merge fields must all be present in data" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "b" + > + fields: < + field: "a" + > + > + json_data: "{\"a\": 1}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge.textproto new file mode 100644 index 0000000..2791252 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge.textproto @@ -0,0 +1,32 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Fields in the input data but not in a merge option are pruned. + +description: "set-merge: Merge with a field" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + > + json_data: "{\"a\": 1, \"b\": 2}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-empty.textproto new file mode 100644 index 0000000..1c6615b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-empty.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# It makes no sense to specify MergeAll and provide no data, so we disallow it on +# the client. + +description: "set: MergeAll cannot be specified with empty data." +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-nested.textproto new file mode 100644 index 0000000..1fbc697 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-nested.textproto @@ -0,0 +1,45 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# MergeAll with nested fields results in an update mask that includes entries for +# all the leaf fields. + +description: "set: MergeAll with nested fields" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"h\": { \"g\": 3, \"f\": 4 }}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "f" + value: < + integer_value: 4 + > + > + fields: < + key: "g" + value: < + integer_value: 3 + > + > + > + > + > + > + update_mask: < + field_paths: "h.f" + field_paths: "h.g" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-mergeall.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall.textproto new file mode 100644 index 0000000..cb2ebc5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall.textproto @@ -0,0 +1,37 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The MergeAll option with a simple piece of data. + +description: "set: MergeAll" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"a\": 1, \"b\": 2}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + fields: < + key: "b" + value: < + integer_value: 2 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-nodel.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-nodel.textproto new file mode 100644 index 0000000..0fb887d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-nodel.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel cannot be used in Create, or in Set without a Merge option. + +description: "set: Delete cannot appear in data" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-nosplit.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-nosplit.textproto new file mode 100644 index 0000000..0ff3fad --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-nosplit.textproto @@ -0,0 +1,37 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Create and Set treat their map keys literally. They do not split on dots. + +description: "set: don\342\200\231t split on dots" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a.b" + value: < + map_value: < + fields: < + key: "c.d" + value: < + integer_value: 1 + > + > + > + > + > + fields: < + key: "e" + value: < + integer_value: 2 + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-special-chars.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-special-chars.textproto new file mode 100644 index 0000000..f4122c9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-special-chars.textproto @@ -0,0 +1,38 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Create and Set treat their map keys literally. They do not escape special +# characters. + +description: "set: non-alpha characters in map keys" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{ \"*\": { \".\": 1 }, \"~\": 2 }" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "*" + value: < + map_value: < + fields: < + key: "." + value: < + integer_value: 1 + > + > + > + > + > + fields: < + key: "~" + value: < + integer_value: 2 + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-alone-mergeall.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-alone-mergeall.textproto new file mode 100644 index 0000000..16ce4cf --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-alone-mergeall.textproto @@ -0,0 +1,26 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then no update operation +# should be produced. + +description: "set: ServerTimestamp alone with MergeAll" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"a\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-alone.textproto new file mode 100644 index 0000000..6ce46d7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-alone.textproto @@ -0,0 +1,28 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then an update operation +# with an empty map should be produced. + +description: "set: ServerTimestamp alone" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-both.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-both.textproto new file mode 100644 index 0000000..5cc7bbc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-both.textproto @@ -0,0 +1,45 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Just as when no merge option is specified, ServerTimestamp sentinel values are +# removed from the data in the update operation and become transforms. + +description: "set-merge: ServerTimestamp with Merge of both fields" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + fields: < + field: "b" + > + > + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nonleaf-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nonleaf-alone.textproto new file mode 100644 index 0000000..f513b6c --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nonleaf-alone.textproto @@ -0,0 +1,37 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a field path is in a merge option, the value at that path replaces the stored +# value. If the value has only ServerTimestamps, they become transforms and we +# clear the value by including the field path in the update mask. + +description: "set-merge: non-leaf merge field with ServerTimestamp alone" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "h" + > + > + json_data: "{\"h\": {\"g\": \"ServerTimestamp\"}, \"e\": 7}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + update_mask: < + field_paths: "h" + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "h.g" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nonleaf.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nonleaf.textproto new file mode 100644 index 0000000..e53e7e2 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nonleaf.textproto @@ -0,0 +1,49 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a field path is in a merge option, the value at that path replaces the stored +# value, and ServerTimestamps inside that value become transforms as usual. + +description: "set-merge: non-leaf merge field with ServerTimestamp" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "h" + > + > + json_data: "{\"h\": {\"f\": 5, \"g\": \"ServerTimestamp\"}, \"e\": 7}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "f" + value: < + integer_value: 5 + > + > + > + > + > + > + update_mask: < + field_paths: "h" + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "h.g" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nowrite.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nowrite.textproto new file mode 100644 index 0000000..3222230 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-nowrite.textproto @@ -0,0 +1,28 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If all the fields in the merge option have ServerTimestamp values, then no +# update operation is produced, only a transform. + +description: "set-merge: If no ordinary values in Merge, no write" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "b" + > + > + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-mergeall.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-mergeall.textproto new file mode 100644 index 0000000..b8c53a5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-mergeall.textproto @@ -0,0 +1,40 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Just as when no merge option is specified, ServerTimestamp sentinel values are +# removed from the data in the update operation and become transforms. + +description: "set: ServerTimestamp with MergeAll" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-multi.textproto new file mode 100644 index 0000000..375ec18 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-multi.textproto @@ -0,0 +1,38 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A document can have more than one ServerTimestamp field. Since all the +# ServerTimestamp fields are removed, the only field in the update is "a". + +description: "set: multiple ServerTimestamp fields" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + field_transforms: < + field_path: "c.d" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-nested.textproto new file mode 100644 index 0000000..abfd2e8 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-nested.textproto @@ -0,0 +1,35 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A ServerTimestamp value can occur at any depth. In this case, the transform +# applies to the field path "b.c". Since "c" is removed from the update, "b" +# becomes empty, so it is also removed from the update. + +description: "set: nested ServerTimestamp field" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b.c" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray-nested.textproto new file mode 100644 index 0000000..241d791 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray-nested.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# There cannot be an array value anywhere on the path from the document root to +# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. + +description: "set: ServerTimestamp cannot be anywhere inside an array value" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray.textproto new file mode 100644 index 0000000..591fb03 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The ServerTimestamp sentinel must be the value of a field. Firestore transforms +# don't support array indexing. + +description: "set: ServerTimestamp cannot be in an array value" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-nomerge.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-nomerge.textproto new file mode 100644 index 0000000..20c0ae1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-nomerge.textproto @@ -0,0 +1,33 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the ServerTimestamp value is not mentioned in a merge option, then it is +# pruned from the data but does not result in a transform. + +description: "set-merge: If is ServerTimestamp not in Merge, no transform" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + > + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st.textproto new file mode 100644 index 0000000..8bceddc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st.textproto @@ -0,0 +1,36 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A key with the special ServerTimestamp sentinel is removed from the data in the +# update operation. Instead it appears in a separate Transform operation. Note +# that in these tests, the string "ServerTimestamp" should be replaced with the +# special ServerTimestamp value. + +description: "set: ServerTimestamp with data" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-badchar.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-badchar.textproto new file mode 100644 index 0000000..656ff53 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-badchar.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The keys of the data given to Update are interpreted, unlike those of Create and +# Set. They cannot contain special characters. + +description: "update: invalid character" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a~b\": 1}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-basic.textproto new file mode 100644 index 0000000..9da316f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-basic.textproto @@ -0,0 +1,30 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple call, resulting in a single update operation. + +description: "update: basic" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-complex.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-complex.textproto new file mode 100644 index 0000000..1a6d9ef --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-complex.textproto @@ -0,0 +1,65 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to a write method with complicated input data. + +description: "update: complex" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + array_value: < + values: < + integer_value: 1 + > + values: < + double_value: 2.5 + > + > + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + array_value: < + values: < + string_value: "three" + > + values: < + map_value: < + fields: < + key: "d" + value: < + boolean_value: true + > + > + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-alone.textproto new file mode 100644 index 0000000..8f55823 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-alone.textproto @@ -0,0 +1,25 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the input data consists solely of Deletes, then the update operation has no +# map, just an update mask. + +description: "update: Delete alone" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": \"Delete\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-dot.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-dot.textproto new file mode 100644 index 0000000..c0ebdf6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-dot.textproto @@ -0,0 +1,46 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# After expanding top-level dotted fields, fields with Delete values are pruned +# from the output data, but appear in the update mask. + +description: "update: Delete with a dotted field" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b.c\": \"Delete\", \"b.d\": 2}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "d" + value: < + integer_value: 2 + > + > + > + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b.c" + field_paths: "b.d" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-nested.textproto new file mode 100644 index 0000000..ed10269 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-nested.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a top-level key. + +description: "update: Delete cannot be nested" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": {\"b\": \"Delete\"}}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray-nested.textproto new file mode 100644 index 0000000..a2eec49 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray-nested.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "update: Delete cannot be anywhere inside an array value" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray.textproto new file mode 100644 index 0000000..a7eea87 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "update: Delete cannot be in an array value" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"Delete\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del.textproto new file mode 100644 index 0000000..ec443e6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del.textproto @@ -0,0 +1,32 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a field's value is the Delete sentinel, then it doesn't appear in the update +# data, but does in the mask. + +description: "update: Delete" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-exists-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-exists-precond.textproto new file mode 100644 index 0000000..3c6fef4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-exists-precond.textproto @@ -0,0 +1,14 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update method does not support an explicit exists precondition. + +description: "update: Exists precondition is invalid" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + exists: true + > + json_data: "{\"a\": 1}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-fp-empty-component.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-fp-empty-component.textproto new file mode 100644 index 0000000..c3bceff --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-fp-empty-component.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Empty fields are not allowed. + +description: "update: empty field path component" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a..b\": 1}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-no-paths.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-no-paths.textproto new file mode 100644 index 0000000..b524b74 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-no-paths.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# It is a client-side error to call Update with empty data. + +description: "update: no paths" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-basic.textproto new file mode 100644 index 0000000..515f29d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-basic.textproto @@ -0,0 +1,33 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple call, resulting in a single update operation. + +description: "update-paths: basic" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "1" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-complex.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-complex.textproto new file mode 100644 index 0000000..38a8322 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-complex.textproto @@ -0,0 +1,72 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to a write method with complicated input data. + +description: "update-paths: complex" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + json_values: "[1, 2.5]" + json_values: "{\"c\": [\"three\", {\"d\": true}]}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + array_value: < + values: < + integer_value: 1 + > + values: < + double_value: 2.5 + > + > + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + array_value: < + values: < + string_value: "three" + > + values: < + map_value: < + fields: < + key: "d" + value: < + boolean_value: true + > + > + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-alone.textproto new file mode 100644 index 0000000..5dbb787 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-alone.textproto @@ -0,0 +1,28 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the input data consists solely of Deletes, then the update operation has no +# map, just an update mask. + +description: "update-paths: Delete alone" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "\"Delete\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-nested.textproto new file mode 100644 index 0000000..bdf65fb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-nested.textproto @@ -0,0 +1,14 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a top-level key. + +description: "update-paths: Delete cannot be nested" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "{\"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray-nested.textproto new file mode 100644 index 0000000..d3da15d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray-nested.textproto @@ -0,0 +1,16 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "update-paths: Delete cannot be anywhere inside an array value" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "[1, {\"b\": \"Delete\"}]" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray.textproto new file mode 100644 index 0000000..9ebdd09 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray.textproto @@ -0,0 +1,16 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "update-paths: Delete cannot be in an array value" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "[1, 2, \"Delete\"]" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del.textproto new file mode 100644 index 0000000..5197a78 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del.textproto @@ -0,0 +1,39 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a field's value is the Delete sentinel, then it doesn't appear in the update +# data, but does in the mask. + +description: "update-paths: Delete" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + json_values: "1" + json_values: "\"Delete\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-exists-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-exists-precond.textproto new file mode 100644 index 0000000..084e077 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-exists-precond.textproto @@ -0,0 +1,17 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update method does not support an explicit exists precondition. + +description: "update-paths: Exists precondition is invalid" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + exists: true + > + field_paths: < + field: "a" + > + json_values: "1" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-del.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-del.textproto new file mode 100644 index 0000000..5c92aeb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-del.textproto @@ -0,0 +1,47 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If one nested field is deleted, and another isn't, preserve the second. + +description: "update-paths: field paths with delete" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "foo" + field: "bar" + > + field_paths: < + field: "foo" + field: "delete" + > + json_values: "1" + json_values: "\"Delete\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "foo" + value: < + map_value: < + fields: < + key: "bar" + value: < + integer_value: 1 + > + > + > + > + > + > + update_mask: < + field_paths: "foo.bar" + field_paths: "foo.delete" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-dup.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-dup.textproto new file mode 100644 index 0000000..fedbd3a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-dup.textproto @@ -0,0 +1,22 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The same field cannot occur more than once. + +description: "update-paths: duplicate field path" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + field_paths: < + field: "a" + > + json_values: "1" + json_values: "2" + json_values: "3" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty-component.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty-component.textproto new file mode 100644 index 0000000..7a5df25 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty-component.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Empty fields are not allowed. + +description: "update-paths: empty field path component" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "*" + field: "" + > + json_values: "1" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty.textproto new file mode 100644 index 0000000..311e309 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A FieldPath of length zero is invalid. + +description: "update-paths: empty field path" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + > + json_values: "1" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-multi.textproto new file mode 100644 index 0000000..9ba41e3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-multi.textproto @@ -0,0 +1,42 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The UpdatePaths or equivalent method takes a list of FieldPaths. Each FieldPath +# is a sequence of uninterpreted path components. + +description: "update-paths: multiple-element field path" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + field: "b" + > + json_values: "1" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + map_value: < + fields: < + key: "b" + value: < + integer_value: 1 + > + > + > + > + > + > + update_mask: < + field_paths: "a.b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-nosplit.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-nosplit.textproto new file mode 100644 index 0000000..5164952 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-nosplit.textproto @@ -0,0 +1,48 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# FieldPath components are not split on dots. + +description: "update-paths: FieldPath elements are not split on dots" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a.b" + field: "f.g" + > + json_values: "{\"n.o\": 7}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a.b" + value: < + map_value: < + fields: < + key: "f.g" + value: < + map_value: < + fields: < + key: "n.o" + value: < + integer_value: 7 + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "`a.b`.`f.g`" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-no-paths.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-no-paths.textproto new file mode 100644 index 0000000..d9939dc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-no-paths.textproto @@ -0,0 +1,10 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# It is a client-side error to call Update with empty data. + +description: "update-paths: no paths" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-1.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-1.textproto new file mode 100644 index 0000000..1710b91 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-1.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another. + +description: "update-paths: prefix #1" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + field: "b" + > + field_paths: < + field: "a" + > + json_values: "1" + json_values: "2" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-2.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-2.textproto new file mode 100644 index 0000000..be78ab5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-2.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another. + +description: "update-paths: prefix #2" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "a" + field: "b" + > + json_values: "1" + json_values: "2" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-3.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-3.textproto new file mode 100644 index 0000000..b8a84c9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-3.textproto @@ -0,0 +1,20 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another, even if the values +# could in principle be combined. + +description: "update-paths: prefix #3" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "a" + field: "d" + > + json_values: "{\"b\": 1}" + json_values: "2" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-special-chars.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-special-chars.textproto new file mode 100644 index 0000000..51cb33b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-special-chars.textproto @@ -0,0 +1,53 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# FieldPaths can contain special characters. + +description: "update-paths: special characters" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "*" + field: "~" + > + field_paths: < + field: "*" + field: "`" + > + json_values: "1" + json_values: "2" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "*" + value: < + map_value: < + fields: < + key: "`" + value: < + integer_value: 2 + > + > + fields: < + key: "~" + value: < + integer_value: 1 + > + > + > + > + > + > + update_mask: < + field_paths: "`*`.`\\``" + field_paths: "`*`.`~`" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-alone.textproto new file mode 100644 index 0000000..abc44f5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-alone.textproto @@ -0,0 +1,29 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then no update operation +# should be produced. + +description: "update-paths: ServerTimestamp alone" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "\"ServerTimestamp\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-multi.textproto new file mode 100644 index 0000000..b0b7df1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-multi.textproto @@ -0,0 +1,56 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A document can have more than one ServerTimestamp field. Since all the +# ServerTimestamp fields are removed, the only field in the update is "a". + +description: "update-paths: multiple ServerTimestamp fields" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + field_paths: < + field: "c" + > + json_values: "1" + json_values: "\"ServerTimestamp\"" + json_values: "{\"d\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "c" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + field_transforms: < + field_path: "c.d" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-nested.textproto new file mode 100644 index 0000000..3077368 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-nested.textproto @@ -0,0 +1,49 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A ServerTimestamp value can occur at any depth. In this case, the transform +# applies to the field path "b.c". Since "c" is removed from the update, "b" +# becomes empty, so it is also removed from the update. + +description: "update-paths: nested ServerTimestamp field" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + json_values: "1" + json_values: "{\"c\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b.c" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray-nested.textproto new file mode 100644 index 0000000..2c2cb89 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray-nested.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# There cannot be an array value anywhere on the path from the document root to +# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. + +description: "update-paths: ServerTimestamp cannot be anywhere inside an array value" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "[1, {\"b\": \"ServerTimestamp\"}]" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray.textproto new file mode 100644 index 0000000..a2baa66 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The ServerTimestamp sentinel must be the value of a field. Firestore transforms +# don't support array indexing. + +description: "update-paths: ServerTimestamp cannot be in an array value" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "[1, 2, \"ServerTimestamp\"]" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st.textproto new file mode 100644 index 0000000..40634c1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st.textproto @@ -0,0 +1,49 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A key with the special ServerTimestamp sentinel is removed from the data in the +# update operation. Instead it appears in a separate Transform operation. Note +# that in these tests, the string "ServerTimestamp" should be replaced with the +# special ServerTimestamp value. + +description: "update-paths: ServerTimestamp with data" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + json_values: "1" + json_values: "\"ServerTimestamp\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-uptime.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-uptime.textproto new file mode 100644 index 0000000..7a15874 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-uptime.textproto @@ -0,0 +1,40 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update call supports a last-update-time precondition. + +description: "update-paths: last-update-time precondition" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + update_time: < + seconds: 42 + > + > + field_paths: < + field: "a" + > + json_values: "1" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + update_time: < + seconds: 42 + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-prefix-1.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-1.textproto new file mode 100644 index 0000000..e5c895e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-1.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another. + +description: "update: prefix #1" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a.b\": 1, \"a\": 2}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-prefix-2.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-2.textproto new file mode 100644 index 0000000..4870176 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-2.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another. + +description: "update: prefix #2" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"a.b\": 2}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-prefix-3.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-3.textproto new file mode 100644 index 0000000..0c03b0d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-3.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another, even if the values +# could in principle be combined. + +description: "update: prefix #3" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": {\"b\": 1}, \"a.d\": 2}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-quoting.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-quoting.textproto new file mode 100644 index 0000000..20e530a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-quoting.textproto @@ -0,0 +1,45 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In a field path, any component beginning with a non-letter or underscore is +# quoted. + +description: "update: non-letter starting chars are quoted, except underscore" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"_0.1.+2\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "_0" + value: < + map_value: < + fields: < + key: "1" + value: < + map_value: < + fields: < + key: "+2" + value: < + integer_value: 1 + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "_0.`1`.`+2`" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-split-top-level.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-split-top-level.textproto new file mode 100644 index 0000000..d1b0ca0 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-split-top-level.textproto @@ -0,0 +1,45 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update method splits only top-level keys at dots. Keys at other levels are +# taken literally. + +description: "update: Split on dots for top-level keys only" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"h.g\": {\"j.k\": 6}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "g" + value: < + map_value: < + fields: < + key: "j.k" + value: < + integer_value: 6 + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "h.g" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-split.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-split.textproto new file mode 100644 index 0000000..b96fd6a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-split.textproto @@ -0,0 +1,44 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update method splits top-level keys at dots. + +description: "update: split on dots" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a.b.c\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + map_value: < + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + integer_value: 1 + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "a.b.c" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-alone.textproto new file mode 100644 index 0000000..0d5ab6e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-alone.textproto @@ -0,0 +1,26 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then no update operation +# should be produced. + +description: "update: ServerTimestamp alone" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-dot.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-dot.textproto new file mode 100644 index 0000000..19d4d18 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-dot.textproto @@ -0,0 +1,27 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Like other uses of ServerTimestamp, the data is pruned and the field does not +# appear in the update mask, because it is in the transform. In this case An +# update operation is produced just to hold the precondition. + +description: "update: ServerTimestamp with dotted field" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a.b.c\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a.b.c" + set_to_server_value: REQUEST_TIME + > + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-multi.textproto new file mode 100644 index 0000000..0434cb5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-multi.textproto @@ -0,0 +1,49 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A document can have more than one ServerTimestamp field. Since all the +# ServerTimestamp fields are removed, the only field in the update is "a". + +# b is not in the mask because it will be set in the transform. c must be in the +# mask: it should be replaced entirely. The transform will set c.d to the +# timestamp, but the update will delete the rest of c. + +description: "update: multiple ServerTimestamp fields" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "c" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + field_transforms: < + field_path: "c.d" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-nested.textproto new file mode 100644 index 0000000..f79d9c6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-nested.textproto @@ -0,0 +1,42 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A ServerTimestamp value can occur at any depth. In this case, the transform +# applies to the field path "b.c". Since "c" is removed from the update, "b" +# becomes empty, so it is also removed from the update. + +description: "update: nested ServerTimestamp field" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b.c" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray-nested.textproto new file mode 100644 index 0000000..2939dd6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray-nested.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# There cannot be an array value anywhere on the path from the document root to +# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. + +description: "update: ServerTimestamp cannot be anywhere inside an array value" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray.textproto new file mode 100644 index 0000000..f3879cd --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The ServerTimestamp sentinel must be the value of a field. Firestore transforms +# don't support array indexing. + +description: "update: ServerTimestamp cannot be in an array value" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st.textproto new file mode 100644 index 0000000..12045a9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st.textproto @@ -0,0 +1,42 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A key with the special ServerTimestamp sentinel is removed from the data in the +# update operation. Instead it appears in a separate Transform operation. Note +# that in these tests, the string "ServerTimestamp" should be replaced with the +# special ServerTimestamp value. + +description: "update: ServerTimestamp with data" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-uptime.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-uptime.textproto new file mode 100644 index 0000000..66119ac --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-uptime.textproto @@ -0,0 +1,37 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update call supports a last-update-time precondition. + +description: "update: last-update-time precondition" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + update_time: < + seconds: 42 + > + > + json_data: "{\"a\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + update_time: < + seconds: 42 + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/to_value.go b/vendor/cloud.google.com/go/firestore/to_value.go new file mode 100644 index 0000000..1c97ecf --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/to_value.go @@ -0,0 +1,278 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "reflect" + "time" + + "cloud.google.com/go/internal/fields" + "github.com/golang/protobuf/ptypes" + ts "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/genproto/googleapis/type/latlng" +) + +var nullValue = &pb.Value{&pb.Value_NullValue{}} + +var ( + typeOfByteSlice = reflect.TypeOf([]byte{}) + typeOfGoTime = reflect.TypeOf(time.Time{}) + typeOfLatLng = reflect.TypeOf((*latlng.LatLng)(nil)) + typeOfDocumentRef = reflect.TypeOf((*DocumentRef)(nil)) + typeOfProtoTimestamp = reflect.TypeOf((*ts.Timestamp)(nil)) +) + +// toProtoValue converts a Go value to a Firestore Value protobuf. +// Some corner cases: +// - All nils (nil interface, nil slice, nil map, nil pointer) are converted to +// a NullValue (not a nil *pb.Value). toProtoValue never returns (nil, false, nil). +// It returns (nil, true, nil) if everything in the value is ServerTimestamp. +// - An error is returned for uintptr, uint and uint64, because Firestore uses +// an int64 to represent integral values, and those types can't be properly +// represented in an int64. +// - An error is returned for the special Delete value. +func toProtoValue(v reflect.Value) (pbv *pb.Value, sawServerTimestamp bool, err error) { + if !v.IsValid() { + return nullValue, false, nil + } + vi := v.Interface() + if vi == Delete { + return nil, false, errors.New("firestore: cannot use Delete in value") + } + if vi == ServerTimestamp { + return nil, false, errors.New("firestore: must use ServerTimestamp as a map value") + } + switch x := vi.(type) { + case []byte: + return &pb.Value{&pb.Value_BytesValue{x}}, false, nil + case time.Time: + ts, err := ptypes.TimestampProto(x) + if err != nil { + return nil, false, err + } + return &pb.Value{&pb.Value_TimestampValue{ts}}, false, nil + case *ts.Timestamp: + if x == nil { + // gRPC doesn't like nil oneofs. Use NullValue. + return nullValue, false, nil + } + return &pb.Value{&pb.Value_TimestampValue{x}}, false, nil + case *latlng.LatLng: + if x == nil { + // gRPC doesn't like nil oneofs. Use NullValue. + return nullValue, false, nil + } + return &pb.Value{&pb.Value_GeoPointValue{x}}, false, nil + case *DocumentRef: + if x == nil { + // gRPC doesn't like nil oneofs. Use NullValue. + return nullValue, false, nil + } + return &pb.Value{&pb.Value_ReferenceValue{x.Path}}, false, nil + // Do not add bool, string, int, etc. to this switch; leave them in the + // reflect-based switch below. Moving them here would drop support for + // types whose underlying types are those primitives. + // E.g. Given "type mybool bool", an ordinary type switch on bool will + // not catch a mybool, but the reflect.Kind of a mybool is reflect.Bool. + } + switch v.Kind() { + case reflect.Bool: + return &pb.Value{&pb.Value_BooleanValue{v.Bool()}}, false, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return &pb.Value{&pb.Value_IntegerValue{v.Int()}}, false, nil + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return &pb.Value{&pb.Value_IntegerValue{int64(v.Uint())}}, false, nil + case reflect.Float32, reflect.Float64: + return &pb.Value{&pb.Value_DoubleValue{v.Float()}}, false, nil + case reflect.String: + return &pb.Value{&pb.Value_StringValue{v.String()}}, false, nil + case reflect.Slice: + return sliceToProtoValue(v) + case reflect.Map: + return mapToProtoValue(v) + case reflect.Struct: + return structToProtoValue(v) + case reflect.Ptr: + if v.IsNil() { + return nullValue, false, nil + } + return toProtoValue(v.Elem()) + case reflect.Interface: + if v.NumMethod() == 0 { // empty interface: recurse on its contents + return toProtoValue(v.Elem()) + } + fallthrough // any other interface value is an error + + default: + return nil, false, fmt.Errorf("firestore: cannot convert type %s to value", v.Type()) + } +} + +func sliceToProtoValue(v reflect.Value) (*pb.Value, bool, error) { + // A nil slice is converted to a null value. + if v.IsNil() { + return nullValue, false, nil + } + vals := make([]*pb.Value, v.Len()) + for i := 0; i < v.Len(); i++ { + val, sawServerTimestamp, err := toProtoValue(v.Index(i)) + if err != nil { + return nil, false, err + } + if sawServerTimestamp { + return nil, false, errors.New("firestore: ServerTimestamp cannot occur in an array") + } + vals[i] = val + } + return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{vals}}}, false, nil +} + +func mapToProtoValue(v reflect.Value) (*pb.Value, bool, error) { + if v.Type().Key().Kind() != reflect.String { + return nil, false, errors.New("firestore: map key type must be string") + } + // A nil map is converted to a null value. + if v.IsNil() { + return nullValue, false, nil + } + m := map[string]*pb.Value{} + sawServerTimestamp := false + for _, k := range v.MapKeys() { + mi := v.MapIndex(k) + if mi.Interface() == ServerTimestamp { + sawServerTimestamp = true + continue + } + val, sst, err := toProtoValue(mi) + if err != nil { + return nil, false, err + } + if sst { + sawServerTimestamp = true + } + if val == nil { // value was a map with all ServerTimestamp values + continue + } + m[k.String()] = val + } + var pv *pb.Value + if len(m) == 0 && sawServerTimestamp { + // The entire map consisted of ServerTimestamp values. + pv = nil + } else { + pv = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}} + } + return pv, sawServerTimestamp, nil +} + +func structToProtoValue(v reflect.Value) (*pb.Value, bool, error) { + m := map[string]*pb.Value{} + fields, err := fieldCache.Fields(v.Type()) + if err != nil { + return nil, false, err + } + sawServerTimestamp := false + for _, f := range fields { + fv := v.FieldByIndex(f.Index) + opts := f.ParsedTag.(tagOptions) + if opts.serverTimestamp { + // TODO(jba): should we return a non-zero time? + sawServerTimestamp = true + continue + } + if opts.omitEmpty && isEmptyValue(fv) { + continue + } + val, sst, err := toProtoValue(fv) + if err != nil { + return nil, false, err + } + if sst { + sawServerTimestamp = true + } + if val == nil { // value was a map with all ServerTimestamp values + continue + } + m[f.Name] = val + } + var pv *pb.Value + if len(m) == 0 && sawServerTimestamp { + // The entire struct consisted of ServerTimestamp or omitempty values. + pv = nil + } else { + pv = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}} + } + return pv, sawServerTimestamp, nil +} + +type tagOptions struct { + omitEmpty bool // do not marshal value if empty + serverTimestamp bool // set time.Time to server timestamp on write +} + +// parseTag interprets firestore struct field tags. +func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + name, keep, opts, err := fields.ParseStandardTag("firestore", t) + if err != nil { + return "", false, nil, fmt.Errorf("firestore: %v", err) + } + tagOpts := tagOptions{} + for _, opt := range opts { + switch opt { + case "omitempty": + tagOpts.omitEmpty = true + case "serverTimestamp": + tagOpts.serverTimestamp = true + default: + return "", false, nil, fmt.Errorf("firestore: unknown tag option: %q", opt) + } + } + return name, keep, tagOpts, nil +} + +// isLeafType determines whether or not a type is a 'leaf type' +// and should not be recursed into, but considered one field. +func isLeafType(t reflect.Type) bool { + return t == typeOfGoTime || t == typeOfLatLng || t == typeOfProtoTimestamp +} + +var fieldCache = fields.NewCache(parseTag, nil, isLeafType) + +// isEmptyValue is taken from the encoding/json package in the +// standard library. +// TODO(jba): move to the fields package +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + if v.Type() == typeOfGoTime { + return v.Interface().(time.Time).IsZero() + } + return false +} diff --git a/vendor/cloud.google.com/go/firestore/to_value_test.go b/vendor/cloud.google.com/go/firestore/to_value_test.go new file mode 100644 index 0000000..eac26a3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/to_value_test.go @@ -0,0 +1,276 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "fmt" + "reflect" + "testing" + "time" + + ts "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "google.golang.org/genproto/googleapis/type/latlng" +) + +type testStruct1 struct { + B bool + I int + U uint32 + F float64 + S string + Y []byte + T time.Time + Ts *ts.Timestamp + G *latlng.LatLng + L []int + M map[string]int + P *int +} + +var ( + p = new(int) + + testVal1 = testStruct1{ + B: true, + I: 1, + U: 2, + F: 3.0, + S: "four", + Y: []byte{5}, + T: tm, + Ts: ptm, + G: ll, + L: []int{6}, + M: map[string]int{"a": 7}, + P: p, + } + + mapVal1 = mapval(map[string]*pb.Value{ + "B": boolval(true), + "I": intval(1), + "U": intval(2), + "F": floatval(3), + "S": &pb.Value{&pb.Value_StringValue{"four"}}, + "Y": bytesval([]byte{5}), + "T": tsval(tm), + "Ts": &pb.Value{&pb.Value_TimestampValue{ptm}}, + "G": geoval(ll), + "L": arrayval(intval(6)), + "M": mapval(map[string]*pb.Value{"a": intval(7)}), + "P": intval(8), + }) +) + +func TestToProtoValue(t *testing.T) { + *p = 8 + for _, test := range []struct { + in interface{} + want *pb.Value + }{ + {nil, nullValue}, + {[]int(nil), nullValue}, + {map[string]int(nil), nullValue}, + {(*testStruct1)(nil), nullValue}, + {(*ts.Timestamp)(nil), nullValue}, + {(*latlng.LatLng)(nil), nullValue}, + {(*DocumentRef)(nil), nullValue}, + {true, boolval(true)}, + {3, intval(3)}, + {uint32(3), intval(3)}, + {1.5, floatval(1.5)}, + {"str", strval("str")}, + {[]byte{1, 2}, bytesval([]byte{1, 2})}, + {tm, tsval(tm)}, + {ptm, &pb.Value{&pb.Value_TimestampValue{ptm}}}, + {ll, geoval(ll)}, + {[]int{1, 2}, arrayval(intval(1), intval(2))}, + {&[]int{1, 2}, arrayval(intval(1), intval(2))}, + {[]int{}, arrayval()}, + {map[string]int{"a": 1, "b": 2}, + mapval(map[string]*pb.Value{"a": intval(1), "b": intval(2)})}, + {map[string]int{}, mapval(map[string]*pb.Value{})}, + {p, intval(8)}, + {&p, intval(8)}, + {map[string]interface{}{"a": 1, "p": p, "s": "str"}, + mapval(map[string]*pb.Value{"a": intval(1), "p": intval(8), "s": strval("str")})}, + {map[string]fmt.Stringer{"a": tm}, + mapval(map[string]*pb.Value{"a": tsval(tm)})}, + {testVal1, mapVal1}, + { + &DocumentRef{ + ID: "d", + Path: "projects/P/databases/D/documents/c/d", + Parent: &CollectionRef{ + ID: "c", + parentPath: "projects/P/databases/D", + Path: "projects/P/databases/D/documents/c", + Query: Query{collectionID: "c", parentPath: "projects/P/databases/D"}, + }, + }, + refval("projects/P/databases/D/documents/c/d"), + }, + // ServerTimestamps are removed, possibly leaving nil. + {map[string]interface{}{"a": ServerTimestamp}, nil}, + { + map[string]interface{}{ + "a": map[string]interface{}{ + "b": map[string]interface{}{ + "c": ServerTimestamp, + }, + }, + }, + nil, + }, + { + map[string]interface{}{ + "a": map[string]interface{}{ + "b": map[string]interface{}{ + "c": ServerTimestamp, + "d": ServerTimestamp, + }, + }, + }, + nil, + }, + { + map[string]interface{}{ + "a": map[string]interface{}{ + "b": map[string]interface{}{ + "c": ServerTimestamp, + "d": ServerTimestamp, + "e": 1, + }, + }, + }, + mapval(map[string]*pb.Value{ + "a": mapval(map[string]*pb.Value{ + "b": mapval(map[string]*pb.Value{"e": intval(1)}), + }), + }), + }, + } { + got, _, err := toProtoValue(reflect.ValueOf(test.in)) + if err != nil { + t.Errorf("%v (%T): %v", test.in, test.in, err) + continue + } + if !testEqual(got, test.want) { + t.Errorf("%+v (%T):\ngot\n%+v\nwant\n%+v", test.in, test.in, got, test.want) + } + } +} + +type stringy struct{} + +func (stringy) String() string { return "stringy" } + +func TestToProtoValueErrors(t *testing.T) { + for _, in := range []interface{}{ + uint64(0), // a bad fit for int64 + map[int]bool{}, // map key type is not string + make(chan int), // can't handle type + map[string]fmt.Stringer{"a": stringy{}}, // only empty interfaces + ServerTimestamp, // ServerTimestamp can only be a field value + []interface{}{ServerTimestamp}, + map[string]interface{}{"a": []interface{}{ServerTimestamp}}, + map[string]interface{}{"a": []interface{}{ + map[string]interface{}{"b": ServerTimestamp}, + }}, + Delete, // Delete should never appear + []interface{}{Delete}, + map[string]interface{}{"a": Delete}, + map[string]interface{}{"a": []interface{}{Delete}}, + } { + _, _, err := toProtoValue(reflect.ValueOf(in)) + if err == nil { + t.Errorf("%v: got nil, want error", in) + } + } +} + +type testStruct2 struct { + Ignore int `firestore:"-"` + Rename int `firestore:"a"` + OmitEmpty int `firestore:",omitempty"` + OmitEmptyTime time.Time `firestore:",omitempty"` +} + +func TestToProtoValueTags(t *testing.T) { + in := &testStruct2{ + Ignore: 1, + Rename: 2, + OmitEmpty: 3, + OmitEmptyTime: aTime, + } + got, _, err := toProtoValue(reflect.ValueOf(in)) + if err != nil { + t.Fatal(err) + } + want := mapval(map[string]*pb.Value{ + "a": intval(2), + "OmitEmpty": intval(3), + "OmitEmptyTime": tsval(aTime), + }) + if !testEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + got, _, err = toProtoValue(reflect.ValueOf(testStruct2{})) + if err != nil { + t.Fatal(err) + } + want = mapval(map[string]*pb.Value{"a": intval(0)}) + if !testEqual(got, want) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +func TestToProtoValueEmbedded(t *testing.T) { + // Embedded time.Time, LatLng, or Timestamp should behave like non-embedded. + type embed struct { + time.Time + *latlng.LatLng + *ts.Timestamp + } + + got, _, err := toProtoValue(reflect.ValueOf(embed{tm, ll, ptm})) + if err != nil { + t.Fatal(err) + } + want := mapval(map[string]*pb.Value{ + "Time": tsval(tm), + "LatLng": geoval(ll), + "Timestamp": &pb.Value{&pb.Value_TimestampValue{ptm}}, + }) + if !testEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestIsEmpty(t *testing.T) { + for _, e := range []interface{}{int(0), float32(0), false, "", []int{}, []int(nil), (*int)(nil)} { + if !isEmptyValue(reflect.ValueOf(e)) { + t.Errorf("%v (%T): want true, got false", e, e) + } + } + i := 3 + for _, n := range []interface{}{int(1), float32(1), true, "x", []int{1}, &i} { + if isEmptyValue(reflect.ValueOf(n)) { + t.Errorf("%v (%T): want false, got true", n, n) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/transaction.go b/vendor/cloud.google.com/go/firestore/transaction.go new file mode 100644 index 0000000..ec08ad3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/transaction.go @@ -0,0 +1,276 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Transaction represents a Firestore transaction. +type Transaction struct { + c *Client + ctx context.Context + id []byte + writes []*pb.Write + maxAttempts int + readOnly bool + readAfterWrite bool +} + +// A TransactionOption is an option passed to Client.Transaction. +type TransactionOption interface { + config(t *Transaction) +} + +// MaxAttempts is a TransactionOption that configures the maximum number of times to +// try a transaction. In defaults to DefaultTransactionMaxAttempts. +func MaxAttempts(n int) maxAttempts { return maxAttempts(n) } + +type maxAttempts int + +func (m maxAttempts) config(t *Transaction) { t.maxAttempts = int(m) } + +// DefaultTransactionMaxAttempts is the default number of times to attempt a transaction. +const DefaultTransactionMaxAttempts = 5 + +// ReadOnly is a TransactionOption that makes the transaction read-only. Read-only +// transactions cannot issue write operations, but are more efficient. +var ReadOnly = ro{} + +type ro struct{} + +func (ro) config(t *Transaction) { t.readOnly = true } + +var ( + // ErrConcurrentTransaction is returned when a transaction is rolled back due + // to a conflict with a concurrent transaction. + ErrConcurrentTransaction = errors.New("firestore: concurrent transaction") + + // Defined here for testing. + errReadAfterWrite = errors.New("firestore: read after write in transaction") + errWriteReadOnly = errors.New("firestore: write in read-only transaction") + errNonTransactionalOp = errors.New("firestore: non-transactional operation inside a transaction") + errNestedTransaction = errors.New("firestore: nested transaction") +) + +type transactionInProgressKey struct{} + +func checkTransaction(ctx context.Context) error { + if ctx.Value(transactionInProgressKey{}) != nil { + return errNonTransactionalOp + } + return nil +} + +// RunTransaction runs f in a transaction. f should use the transaction it is given +// for all Firestore operations. For any operation requiring a context, f should use +// the context it is passed, not the first argument to RunTransaction. +// +// f must not call Commit or Rollback on the provided Transaction. +// +// If f returns nil, RunTransaction commits the transaction. If the commit fails due +// to a conflicting transaction, RunTransaction retries f. It gives up and returns +// ErrConcurrentTransaction after a number of attempts that can be configured with +// the MaxAttempts option. If the commit succeeds, RunTransaction returns a nil error. +// +// If f returns non-nil, then the transaction will be rolled back and +// this method will return the same error. The function f is not retried. +// +// Note that when f returns, the transaction is not committed. Calling code +// must not assume that any of f's changes have been committed until +// RunTransaction returns nil. +// +// Since f may be called more than once, f should usually be idempotent – that is, it +// should have the same result when called multiple times. +func (c *Client) RunTransaction(ctx context.Context, f func(context.Context, *Transaction) error, opts ...TransactionOption) error { + if ctx.Value(transactionInProgressKey{}) != nil { + return errNestedTransaction + } + db := c.path() + t := &Transaction{ + c: c, + ctx: withResourceHeader(ctx, db), + maxAttempts: DefaultTransactionMaxAttempts, + } + for _, opt := range opts { + opt.config(t) + } + var txOpts *pb.TransactionOptions + if t.readOnly { + txOpts = &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadOnly_{&pb.TransactionOptions_ReadOnly{}}, + } + } + var backoff gax.Backoff + // TODO(jba): use other than the standard backoff parameters? + // TODO(jba): get backoff time from gRPC trailer metadata? See extractRetryDelay in https://code.googlesource.com/gocloud/+/master/spanner/retry.go. + var err error + for i := 0; i < t.maxAttempts; i++ { + var res *pb.BeginTransactionResponse + res, err = t.c.c.BeginTransaction(t.ctx, &pb.BeginTransactionRequest{ + Database: db, + Options: txOpts, + }) + if err != nil { + return err + } + t.id = res.Transaction + err = f(context.WithValue(ctx, transactionInProgressKey{}, 1), t) + // Read after write can only be checked client-side, so we make sure to check + // even if the user does not. + if err == nil && t.readAfterWrite { + err = errReadAfterWrite + } + if err != nil { + t.rollback() + // Prefer f's returned error to rollback error. + return err + } + _, err = t.c.c.Commit(t.ctx, &pb.CommitRequest{ + Database: t.c.path(), + Writes: t.writes, + Transaction: t.id, + }) + // If a read-write transaction returns Aborted, retry. + // On success or other failures, return here. + if t.readOnly || grpc.Code(err) != codes.Aborted { + // According to the Firestore team, we should not roll back here + // if err != nil. But spanner does. + // See https://code.googlesource.com/gocloud/+/master/spanner/transaction.go#740. + return err + } + + if txOpts == nil { + // txOpts can only be nil if is the first retry of a read-write transaction. + // (It is only set here and in the body of "if t.readOnly" above.) + // Mention the transaction ID in BeginTransaction so the service + // knows it is a retry. + txOpts = &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ + &pb.TransactionOptions_ReadWrite{RetryTransaction: t.id}, + }, + } + } + // Use exponential backoff to avoid contention with other running + // transactions. + if cerr := sleep(ctx, backoff.Pause()); cerr != nil { + err = cerr + break + } + } + // If we run out of retries, return the last error we saw (which should + // be the Aborted from Commit, or a context error). + if err != nil { + t.rollback() + } + return err +} + +func (t *Transaction) rollback() { + _ = t.c.c.Rollback(t.ctx, &pb.RollbackRequest{ + Database: t.c.path(), + Transaction: t.id, + }) + // Ignore the rollback error. + // TODO(jba): Log it? + // Note: Rollback is idempotent so it will be retried by the gapic layer. +} + +// Get gets the document in the context of the transaction. The transaction holds a +// pessimistic lock on the returned document. +func (t *Transaction) Get(dr *DocumentRef) (*DocumentSnapshot, error) { + docsnaps, err := t.GetAll([]*DocumentRef{dr}) + if err != nil { + return nil, err + } + ds := docsnaps[0] + if !ds.Exists() { + return ds, status.Errorf(codes.NotFound, "%q not found", dr.Path) + } + return ds, nil +} + +// GetAll retrieves multiple documents with a single call. The DocumentSnapshots are +// returned in the order of the given DocumentRefs. If a document is not present, the +// corresponding DocumentSnapshot's Exists method will return false. The transaction +// holds a pessimistic lock on all of the returned documents. +func (t *Transaction) GetAll(drs []*DocumentRef) ([]*DocumentSnapshot, error) { + if len(t.writes) > 0 { + t.readAfterWrite = true + return nil, errReadAfterWrite + } + return t.c.getAll(t.ctx, drs, t.id) +} + +// A Queryer is a Query or a CollectionRef. CollectionRefs act as queries whose +// results are all the documents in the collection. +type Queryer interface { + query() *Query +} + +// Documents returns a DocumentIterator based on given Query or CollectionRef. The +// results will be in the context of the transaction. +func (t *Transaction) Documents(q Queryer) *DocumentIterator { + if len(t.writes) > 0 { + t.readAfterWrite = true + return &DocumentIterator{err: errReadAfterWrite} + } + return &DocumentIterator{ + iter: newQueryDocumentIterator(t.ctx, q.query(), t.id), + } +} + +// Create adds a Create operation to the Transaction. +// See DocumentRef.Create for details. +func (t *Transaction) Create(dr *DocumentRef, data interface{}) error { + return t.addWrites(dr.newCreateWrites(data)) +} + +// Set adds a Set operation to the Transaction. +// See DocumentRef.Set for details. +func (t *Transaction) Set(dr *DocumentRef, data interface{}, opts ...SetOption) error { + return t.addWrites(dr.newSetWrites(data, opts)) +} + +// Delete adds a Delete operation to the Transaction. +// See DocumentRef.Delete for details. +func (t *Transaction) Delete(dr *DocumentRef, opts ...Precondition) error { + return t.addWrites(dr.newDeleteWrites(opts)) +} + +// Update adds a new Update operation to the Transaction. +// See DocumentRef.Update for details. +func (t *Transaction) Update(dr *DocumentRef, data []Update, opts ...Precondition) error { + return t.addWrites(dr.newUpdatePathWrites(data, opts)) +} + +func (t *Transaction) addWrites(ws []*pb.Write, err error) error { + if t.readOnly { + return errWriteReadOnly + } + if err != nil { + return err + } + t.writes = append(t.writes, ws...) + return nil +} diff --git a/vendor/cloud.google.com/go/firestore/transaction_test.go b/vendor/cloud.google.com/go/firestore/transaction_test.go new file mode 100644 index 0000000..014bc7d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/transaction_test.go @@ -0,0 +1,389 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + "golang.org/x/net/context" + "google.golang.org/grpc/status" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes/empty" + "google.golang.org/api/iterator" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func TestRunTransaction(t *testing.T) { + ctx := context.Background() + const db = "projects/projectID/databases/(default)" + tid := []byte{1} + c, srv := newMock(t) + beginReq := &pb.BeginTransactionRequest{Database: db} + beginRes := &pb.BeginTransactionResponse{Transaction: tid} + commitReq := &pb.CommitRequest{Database: db, Transaction: tid} + // Empty transaction. + srv.addRPC(beginReq, beginRes) + srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp}) + err := c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }) + if err != nil { + t.Fatal(err) + } + + // Transaction with read and write. + srv.reset() + srv.addRPC(beginReq, beginRes) + aDoc := &pb.Document{ + Name: db + "/documents/C/a", + CreateTime: aTimestamp, + UpdateTime: aTimestamp2, + Fields: map[string]*pb.Value{"count": intval(1)}, + } + srv.addRPC( + &pb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: []string{db + "/documents/C/a"}, + ConsistencySelector: &pb.BatchGetDocumentsRequest_Transaction{tid}, + }, []interface{}{ + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{aDoc}, + ReadTime: aTimestamp2, + }, + }) + aDoc2 := &pb.Document{ + Name: aDoc.Name, + Fields: map[string]*pb.Value{"count": intval(2)}, + } + srv.addRPC( + &pb.CommitRequest{ + Database: db, + Transaction: tid, + Writes: []*pb.Write{{ + Operation: &pb.Write_Update{aDoc2}, + UpdateMask: &pb.DocumentMask{FieldPaths: []string{"count"}}, + CurrentDocument: &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{true}, + }, + }}, + }, + &pb.CommitResponse{CommitTime: aTimestamp3}, + ) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + docref := c.Collection("C").Doc("a") + doc, err := tx.Get(docref) + if err != nil { + return err + } + count, err := doc.DataAt("count") + if err != nil { + return err + } + tx.Update(docref, []Update{{Path: "count", Value: count.(int64) + 1}}) + return nil + }) + if err != nil { + t.Fatal(err) + } + + // Query + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC( + &pb.RunQueryRequest{ + Parent: db, + QueryType: &pb.RunQueryRequest_StructuredQuery{ + &pb.StructuredQuery{ + From: []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}}, + }, + }, + ConsistencySelector: &pb.RunQueryRequest_Transaction{tid}, + }, + []interface{}{}, + ) + srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp3}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + it := tx.Documents(c.Collection("C")) + defer it.Stop() + _, err := it.Next() + if err != iterator.Done { + return err + } + return nil + }) + if err != nil { + t.Fatal(err) + } + + // Retry entire transaction. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(commitReq, status.Errorf(codes.Aborted, "")) + srv.addRPC( + &pb.BeginTransactionRequest{ + Database: db, + Options: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ + &pb.TransactionOptions_ReadWrite{tid}, + }, + }, + }, + beginRes, + ) + srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { return nil }) + if err != nil { + t.Fatal(err) + } +} + +func TestTransactionErrors(t *testing.T) { + ctx := context.Background() + const db = "projects/projectID/databases/(default)" + c, srv := newMock(t) + var ( + tid = []byte{1} + internalErr = status.Errorf(codes.Internal, "so sad") + beginReq = &pb.BeginTransactionRequest{ + Database: db, + } + beginRes = &pb.BeginTransactionResponse{Transaction: tid} + getReq = &pb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: []string{db + "/documents/C/a"}, + ConsistencySelector: &pb.BatchGetDocumentsRequest_Transaction{tid}, + } + rollbackReq = &pb.RollbackRequest{Database: db, Transaction: tid} + commitReq = &pb.CommitRequest{Database: db, Transaction: tid} + ) + + // BeginTransaction has a permanent error. + srv.addRPC(beginReq, internalErr) + err := c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }) + if grpc.Code(err) != codes.Internal { + t.Errorf("got <%v>, want Internal", err) + } + + // Get has a permanent error. + get := func(_ context.Context, tx *Transaction) error { + _, err := tx.Get(c.Doc("C/a")) + return err + } + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(getReq, internalErr) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, get) + if grpc.Code(err) != codes.Internal { + t.Errorf("got <%v>, want Internal", err) + } + + // Get has a permanent error, but the rollback fails. We still + // return Get's error. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(getReq, internalErr) + srv.addRPC(rollbackReq, status.Errorf(codes.FailedPrecondition, "")) + err = c.RunTransaction(ctx, get) + if grpc.Code(err) != codes.Internal { + t.Errorf("got <%v>, want Internal", err) + } + + // Commit has a permanent error. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(getReq, []interface{}{ + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{ + Name: "projects/projectID/databases/(default)/documents/C/a", + CreateTime: aTimestamp, + UpdateTime: aTimestamp2, + }}, + ReadTime: aTimestamp2, + }, + }) + srv.addRPC(commitReq, internalErr) + err = c.RunTransaction(ctx, get) + if grpc.Code(err) != codes.Internal { + t.Errorf("got <%v>, want Internal", err) + } + + // Read after write. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + tx.Delete(c.Doc("C/a")) + if _, err := tx.Get(c.Doc("C/a")); err != nil { + return err + } + return nil + }) + if err != errReadAfterWrite { + t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite) + } + + // Read after write, with query. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + tx.Delete(c.Doc("C/a")) + it := tx.Documents(c.Collection("C").Select("x")) + defer it.Stop() + if _, err := it.Next(); err != iterator.Done { + return err + } + return nil + }) + if err != errReadAfterWrite { + t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite) + } + + // Read after write fails even if the user ignores the read's error. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + tx.Delete(c.Doc("C/a")) + tx.Get(c.Doc("C/a")) + return nil + }) + if err != errReadAfterWrite { + t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite) + } + + // Write in read-only transaction. + srv.reset() + srv.addRPC( + &pb.BeginTransactionRequest{ + Database: db, + Options: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadOnly_{&pb.TransactionOptions_ReadOnly{}}, + }, + }, + beginRes, + ) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + return tx.Delete(c.Doc("C/a")) + }, ReadOnly) + if err != errWriteReadOnly { + t.Errorf("got <%v>, want <%v>", err, errWriteReadOnly) + } + + // Too many retries. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(commitReq, status.Errorf(codes.Aborted, "")) + srv.addRPC( + &pb.BeginTransactionRequest{ + Database: db, + Options: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ + &pb.TransactionOptions_ReadWrite{tid}, + }, + }, + }, + beginRes, + ) + srv.addRPC(commitReq, status.Errorf(codes.Aborted, "")) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }, + MaxAttempts(2)) + if grpc.Code(err) != codes.Aborted { + t.Errorf("got <%v>, want Aborted", err) + } + + // Nested transaction. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(ctx context.Context, tx *Transaction) error { + return c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }) + }) + if got, want := err, errNestedTransaction; got != want { + t.Errorf("got <%v>, want <%v>", got, want) + } + + // Non-transactional operation. + dr := c.Doc("C/d") + + for i, op := range []func(ctx context.Context) error{ + func(ctx context.Context) error { _, err := c.GetAll(ctx, []*DocumentRef{dr}); return err }, + func(ctx context.Context) error { _, _, err := c.Collection("C").Add(ctx, testData); return err }, + func(ctx context.Context) error { _, err := dr.Get(ctx); return err }, + func(ctx context.Context) error { _, err := dr.Create(ctx, testData); return err }, + func(ctx context.Context) error { _, err := dr.Set(ctx, testData); return err }, + func(ctx context.Context) error { _, err := dr.Delete(ctx); return err }, + func(ctx context.Context) error { + _, err := dr.Update(ctx, []Update{{FieldPath: []string{"*"}, Value: 1}}) + return err + }, + func(ctx context.Context) error { it := c.Collections(ctx); _, err := it.Next(); return err }, + func(ctx context.Context) error { it := dr.Collections(ctx); _, err := it.Next(); return err }, + func(ctx context.Context) error { + _, err := c.Batch().Set(dr, testData).Commit(ctx) + return err + }, + func(ctx context.Context) error { + it := c.Collection("C").Documents(ctx) + defer it.Stop() + _, err := it.Next() + return err + }, + } { + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(ctx context.Context, _ *Transaction) error { + return op(ctx) + }) + if got, want := err, errNonTransactionalOp; got != want { + t.Errorf("#%d: got <%v>, want <%v>", i, got, want) + } + } +} + +func TestTransactionGetAll(t *testing.T) { + c, srv := newMock(t) + defer c.Close() + const dbPath = "projects/projectID/databases/(default)" + tid := []byte{1} + beginReq := &pb.BeginTransactionRequest{Database: dbPath} + beginRes := &pb.BeginTransactionResponse{Transaction: tid} + srv.addRPC(beginReq, beginRes) + req := &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{ + dbPath + "/documents/C/a", + dbPath + "/documents/C/b", + dbPath + "/documents/C/c", + }, + ConsistencySelector: &pb.BatchGetDocumentsRequest_Transaction{tid}, + } + err := c.RunTransaction(context.Background(), func(_ context.Context, tx *Transaction) error { + testGetAll(t, c, srv, dbPath, + func(drs []*DocumentRef) ([]*DocumentSnapshot, error) { return tx.GetAll(drs) }, + req) + commitReq := &pb.CommitRequest{Database: dbPath, Transaction: tid} + srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp}) + return nil + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/firestore/util_test.go b/vendor/cloud.google.com/go/firestore/util_test.go new file mode 100644 index 0000000..0478d1b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/util_test.go @@ -0,0 +1,150 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "fmt" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" + tspb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/api/option" + "google.golang.org/genproto/googleapis/type/latlng" + "google.golang.org/grpc" +) + +var ( + aTime = time.Date(2017, 1, 26, 0, 0, 0, 0, time.UTC) + aTime2 = time.Date(2017, 2, 5, 0, 0, 0, 0, time.UTC) + aTime3 = time.Date(2017, 3, 20, 0, 0, 0, 0, time.UTC) + aTimestamp = mustTimestampProto(aTime) + aTimestamp2 = mustTimestampProto(aTime2) + aTimestamp3 = mustTimestampProto(aTime3) +) + +func mustTimestampProto(t time.Time) *tspb.Timestamp { + ts, err := ptypes.TimestampProto(t) + if err != nil { + panic(err) + } + return ts +} + +var cmpOpts = []cmp.Option{ + cmp.AllowUnexported(DocumentRef{}, CollectionRef{}, DocumentSnapshot{}, + Query{}, filter{}, order{}, fpv{}), + cmpopts.IgnoreTypes(Client{}, &Client{}), +} + +// testEqual implements equality for Firestore tests. +func testEqual(a, b interface{}) bool { + return testutil.Equal(a, b, cmpOpts...) +} + +func testDiff(a, b interface{}) string { + return testutil.Diff(a, b, cmpOpts...) +} + +func TestTestEqual(t *testing.T) { + for _, test := range []struct { + a, b interface{} + want bool + }{ + {nil, nil, true}, + {([]int)(nil), nil, false}, + {nil, ([]int)(nil), false}, + {([]int)(nil), ([]int)(nil), true}, + } { + if got := testEqual(test.a, test.b); got != test.want { + t.Errorf("testEqual(%#v, %#v) == %t, want %t", test.a, test.b, got, test.want) + } + } +} + +func newMock(t *testing.T) (*Client, *mockServer) { + srv, err := newMockServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + t.Fatal(err) + } + client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + return client, srv +} + +func intval(i int) *pb.Value { + return int64val(int64(i)) +} + +func int64val(i int64) *pb.Value { + return &pb.Value{&pb.Value_IntegerValue{i}} +} + +func boolval(b bool) *pb.Value { + return &pb.Value{&pb.Value_BooleanValue{b}} +} + +func floatval(f float64) *pb.Value { + return &pb.Value{&pb.Value_DoubleValue{f}} +} + +func strval(s string) *pb.Value { + return &pb.Value{&pb.Value_StringValue{s}} +} + +func bytesval(b []byte) *pb.Value { + return &pb.Value{&pb.Value_BytesValue{b}} +} + +func tsval(t time.Time) *pb.Value { + ts, err := ptypes.TimestampProto(t) + if err != nil { + panic(fmt.Sprintf("bad time %s in test: %v", t, err)) + } + return &pb.Value{&pb.Value_TimestampValue{ts}} +} + +func geoval(ll *latlng.LatLng) *pb.Value { + return &pb.Value{&pb.Value_GeoPointValue{ll}} +} + +func arrayval(s ...*pb.Value) *pb.Value { + if s == nil { + s = []*pb.Value{} + } + return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{s}}} +} + +func mapval(m map[string]*pb.Value) *pb.Value { + return &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}} +} + +func refval(path string) *pb.Value { + return &pb.Value{&pb.Value_ReferenceValue{path}} +} diff --git a/vendor/cloud.google.com/go/firestore/watch.go b/vendor/cloud.google.com/go/firestore/watch.go new file mode 100644 index 0000000..8eb301b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/watch.go @@ -0,0 +1,515 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "io" + "log" + "sort" + "time" + + "cloud.google.com/go/internal/btree" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// LogWatchStreams controls whether watch stream status changes are logged. +// This feature is EXPERIMENTAL and may disappear at any time. +var LogWatchStreams bool = false + +// DocumentChangeKind describes the kind of change to a document between +// query snapshots. +type DocumentChangeKind int + +const ( + DocumentAdded DocumentChangeKind = iota + DocumentRemoved + DocumentModified +) + +// A DocumentChange describes the change to a document from one query snapshot to the next. +type DocumentChange struct { + Kind DocumentChangeKind + Doc *DocumentSnapshot + // The zero-based index of the document in the sequence of query results prior to this change, + // or -1 if the document was not present. + OldIndex int + // The zero-based index of the document in the sequence of query results after this change, + // or -1 if the document is no longer present. + NewIndex int +} + +// Implementation of realtime updates (a.k.a. watch). +// This code is closely based on the Node.js implementation, +// https://github.com/googleapis/nodejs-firestore/blob/master/src/watch.js. + +// The sole target ID for all streams from this client. +const watchTargetID int32 = 'g' + 'o' + +var defaultBackoff = gax.Backoff{ + // Values from https://github.com/googleapis/nodejs-firestore/blob/master/src/backoff.js. + Initial: 1 * time.Second, + Max: 60 * time.Second, + Multiplier: 1.5, +} + +// not goroutine-safe +type watchStream struct { + ctx context.Context + c *Client + lc pb.Firestore_ListenClient // the gRPC stream + target *pb.Target // document or query being watched + backoff gax.Backoff // for stream retries + err error // sticky permanent error + readTime time.Time // time of most recent snapshot + current bool // saw CURRENT, but not RESET; precondition for a snapshot + hasReturned bool // have we returned a snapshot yet? + compare func(a, b *DocumentSnapshot) (int, error) // compare documents according to query + + // An ordered tree where DocumentSnapshots are the keys. + docTree *btree.BTree + // Map of document name to DocumentSnapshot for the last returned snapshot. + docMap map[string]*DocumentSnapshot + // Map of document name to DocumentSnapshot for accumulated changes for the current snapshot. + // A nil value means the document was removed. + changeMap map[string]*DocumentSnapshot +} + +func newWatchStreamForDocument(ctx context.Context, dr *DocumentRef) *watchStream { + // A single document is always equal to itself. + compare := func(_, _ *DocumentSnapshot) (int, error) { return 0, nil } + return newWatchStream(ctx, dr.Parent.c, compare, &pb.Target{ + TargetType: &pb.Target_Documents{ + Documents: &pb.Target_DocumentsTarget{[]string{dr.Path}}, + }, + TargetId: watchTargetID, + }) +} + +func newWatchStreamForQuery(ctx context.Context, q Query) (*watchStream, error) { + qp, err := q.toProto() + if err != nil { + return nil, err + } + target := &pb.Target{ + TargetType: &pb.Target_Query{ + Query: &pb.Target_QueryTarget{ + Parent: q.parentPath, + QueryType: &pb.Target_QueryTarget_StructuredQuery{qp}, + }, + }, + TargetId: watchTargetID, + } + return newWatchStream(ctx, q.c, q.compareFunc(), target), nil +} + +const btreeDegree = 4 + +func newWatchStream(ctx context.Context, c *Client, compare func(_, _ *DocumentSnapshot) (int, error), target *pb.Target) *watchStream { + w := &watchStream{ + ctx: ctx, + c: c, + compare: compare, + target: target, + backoff: defaultBackoff, + docMap: map[string]*DocumentSnapshot{}, + changeMap: map[string]*DocumentSnapshot{}, + } + w.docTree = btree.New(btreeDegree, func(a, b interface{}) bool { + return w.less(a.(*DocumentSnapshot), b.(*DocumentSnapshot)) + }) + return w +} + +func (s *watchStream) less(a, b *DocumentSnapshot) bool { + c, err := s.compare(a, b) + if err != nil { + s.err = err + return false + } + return c < 0 +} + +// Once nextSnapshot returns an error, it will always return the same error. +func (s *watchStream) nextSnapshot() (*btree.BTree, []DocumentChange, time.Time, error) { + if s.err != nil { + return nil, nil, time.Time{}, s.err + } + var changes []DocumentChange + for { + // Process messages until we are in a consistent state. + for !s.handleNextMessage() { + } + if s.err != nil { + _ = s.close() // ignore error + return nil, nil, time.Time{}, s.err + } + var newDocTree *btree.BTree + newDocTree, changes = s.computeSnapshot(s.docTree, s.docMap, s.changeMap, s.readTime) + if s.err != nil { + return nil, nil, time.Time{}, s.err + } + // Only return a snapshot if something has changed, or this is the first snapshot. + if !s.hasReturned || newDocTree != s.docTree { + s.docTree = newDocTree + break + } + } + s.changeMap = map[string]*DocumentSnapshot{} + s.hasReturned = true + return s.docTree, changes, s.readTime, nil +} + +// Read a message from the stream and handle it. Return true when +// we're in a consistent state, or there is a permanent error. +func (s *watchStream) handleNextMessage() bool { + res, err := s.recv() + if err != nil { + s.err = err + // Errors returned by recv are permanent. + return true + } + switch r := res.ResponseType.(type) { + case *pb.ListenResponse_TargetChange: + return s.handleTargetChange(r.TargetChange) + + case *pb.ListenResponse_DocumentChange: + name := r.DocumentChange.Document.Name + s.logf("DocumentChange %q", name) + if hasWatchTargetID(r.DocumentChange.TargetIds) { // document changed + ref, err := pathToDoc(name, s.c) + if err == nil { + s.changeMap[name], err = newDocumentSnapshot(ref, r.DocumentChange.Document, s.c, nil) + } + if err != nil { + s.err = err + return true + } + } else if hasWatchTargetID(r.DocumentChange.RemovedTargetIds) { // document removed + s.changeMap[name] = nil + } + + case *pb.ListenResponse_DocumentDelete: + s.logf("Delete %q", r.DocumentDelete.Document) + s.changeMap[r.DocumentDelete.Document] = nil + + case *pb.ListenResponse_DocumentRemove: + s.logf("Remove %q", r.DocumentRemove.Document) + s.changeMap[r.DocumentRemove.Document] = nil + + case *pb.ListenResponse_Filter: + s.logf("Filter %d", r.Filter.Count) + if int(r.Filter.Count) != s.currentSize() { + s.resetDocs() // Remove all the current results. + // The filter didn't match; close the stream so it will be re-opened on the next + // call to nextSnapshot. + _ = s.close() // ignore error + s.lc = nil + } + + default: + s.err = fmt.Errorf("unknown response type %T", r) + return true + } + return false +} + +// Return true iff in a consistent state, or there is a permanent error. +func (s *watchStream) handleTargetChange(tc *pb.TargetChange) bool { + switch tc.TargetChangeType { + case pb.TargetChange_NO_CHANGE: + s.logf("TargetNoChange %d %v", len(tc.TargetIds), tc.ReadTime) + if len(tc.TargetIds) == 0 && tc.ReadTime != nil && s.current { + // Everything is up-to-date, so we are ready to return a snapshot. + rt, err := ptypes.Timestamp(tc.ReadTime) + if err != nil { + s.err = err + return true + } + s.readTime = rt + s.target.ResumeType = &pb.Target_ResumeToken{tc.ResumeToken} + return true + } + + case pb.TargetChange_ADD: + s.logf("TargetAdd") + if tc.TargetIds[0] != watchTargetID { + s.err = errors.New("unexpected target ID sent by server") + return true + } + + case pb.TargetChange_REMOVE: + s.logf("TargetRemove") + // We should never see a remove. + if tc.Cause != nil { + s.err = status.Error(codes.Code(tc.Cause.Code), tc.Cause.Message) + } else { + s.err = status.Error(codes.Internal, "firestore: client saw REMOVE") + } + return true + + // The targets reflect all changes committed before the targets were added + // to the stream. + case pb.TargetChange_CURRENT: + s.logf("TargetCurrent") + s.current = true + + // The targets have been reset, and a new initial state for the targets will be + // returned in subsequent changes. Whatever changes have happened so far no + // longer matter. + case pb.TargetChange_RESET: + s.logf("TargetReset") + s.resetDocs() + + default: + s.err = fmt.Errorf("firestore: unknown TargetChange type %s", tc.TargetChangeType) + return true + } + // If we see a resume token and our watch ID is affected, we assume the stream + // is now healthy, so we reset our backoff time to the minimum. + if tc.ResumeToken != nil && (len(tc.TargetIds) == 0 || hasWatchTargetID(tc.TargetIds)) { + s.backoff = defaultBackoff + } + return false // not in a consistent state, keep receiving +} + +func (s *watchStream) resetDocs() { + s.target.ResumeType = nil // clear resume token + s.current = false + s.changeMap = map[string]*DocumentSnapshot{} + // Mark each document as deleted. If documents are not deleted, they + // will be send again by the server. + it := s.docTree.BeforeIndex(0) + for it.Next() { + s.changeMap[it.Key.(*DocumentSnapshot).Ref.Path] = nil + } +} + +func (s *watchStream) currentSize() int { + _, adds, deletes := extractChanges(s.docMap, s.changeMap) + return len(s.docMap) + len(adds) - len(deletes) +} + +// Return the changes that have occurred since the last snapshot. +func extractChanges(docMap, changeMap map[string]*DocumentSnapshot) (updates, adds []*DocumentSnapshot, deletes []string) { + for name, doc := range changeMap { + switch { + case doc == nil: + if _, ok := docMap[name]; ok { + deletes = append(deletes, name) + } + case docMap[name] != nil: + updates = append(updates, doc) + default: + adds = append(adds, doc) + } + } + return updates, adds, deletes +} + +// For development only. +// TODO(jba): remove. +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +// Applies the mutations in changeMap to both the document tree and the +// document lookup map. Modifies docMap in place and returns a new docTree. +// If there were no changes, returns docTree unmodified. +func (s *watchStream) computeSnapshot(docTree *btree.BTree, docMap, changeMap map[string]*DocumentSnapshot, readTime time.Time) (*btree.BTree, []DocumentChange) { + var changes []DocumentChange + updatedTree := docTree + assert(docTree.Len() == len(docMap)) + updates, adds, deletes := extractChanges(docMap, changeMap) + if len(adds) > 0 || len(deletes) > 0 { + updatedTree = docTree.Clone() + } + // Process the sorted changes in the order that is expected by our clients + // (removals, additions, and then modifications). We also need to sort the + // individual changes to assure that oldIndex/newIndex keep incrementing. + deldocs := make([]*DocumentSnapshot, len(deletes)) + for i, d := range deletes { + deldocs[i] = docMap[d] + } + sort.Sort(byLess{deldocs, s.less}) + for _, oldDoc := range deldocs { + assert(oldDoc != nil) + delete(docMap, oldDoc.Ref.Path) + _, oldi := updatedTree.GetWithIndex(oldDoc) + // TODO(jba): have btree.Delete return old index + _, found := updatedTree.Delete(oldDoc) + assert(found) + changes = append(changes, DocumentChange{ + Kind: DocumentRemoved, + Doc: oldDoc, + OldIndex: oldi, + NewIndex: -1, + }) + } + sort.Sort(byLess{adds, s.less}) + for _, newDoc := range adds { + name := newDoc.Ref.Path + assert(docMap[name] == nil) + newDoc.ReadTime = readTime + docMap[name] = newDoc + updatedTree.Set(newDoc, nil) + // TODO(jba): change btree so Set returns index as second value. + _, newi := updatedTree.GetWithIndex(newDoc) + changes = append(changes, DocumentChange{ + Kind: DocumentAdded, + Doc: newDoc, + OldIndex: -1, + NewIndex: newi, + }) + } + sort.Sort(byLess{updates, s.less}) + for _, newDoc := range updates { + name := newDoc.Ref.Path + oldDoc := docMap[name] + assert(oldDoc != nil) + if newDoc.UpdateTime.Equal(oldDoc.UpdateTime) { + continue + } + if updatedTree == docTree { + updatedTree = docTree.Clone() + } + newDoc.ReadTime = readTime + docMap[name] = newDoc + _, oldi := updatedTree.GetWithIndex(oldDoc) + updatedTree.Delete(oldDoc) + updatedTree.Set(newDoc, nil) + _, newi := updatedTree.GetWithIndex(newDoc) + changes = append(changes, DocumentChange{ + Kind: DocumentModified, + Doc: newDoc, + OldIndex: oldi, + NewIndex: newi, + }) + } + assert(updatedTree.Len() == len(docMap)) + return updatedTree, changes +} + +type byLess struct { + s []*DocumentSnapshot + less func(a, b *DocumentSnapshot) bool +} + +func (b byLess) Len() int { return len(b.s) } +func (b byLess) Swap(i, j int) { b.s[i], b.s[j] = b.s[j], b.s[i] } +func (b byLess) Less(i, j int) bool { return b.less(b.s[i], b.s[j]) } + +func hasWatchTargetID(ids []int32) bool { + for _, id := range ids { + if id == watchTargetID { + return true + } + } + return false +} + +func (s *watchStream) logf(format string, args ...interface{}) { + if LogWatchStreams { + log.Printf(format, args...) + } +} + +// Close the stream. From this point on, calls to nextSnapshot will return +// io.EOF, or the error from CloseSend. +func (s *watchStream) stop() { + err := s.close() + if s.err != nil { // don't change existing error + return + } + if err != nil { + s.err = err + } + s.err = io.EOF // normal shutdown +} + +func (s *watchStream) close() error { + if s.lc == nil { + return nil + } + return s.lc.CloseSend() +} + +// recv receives the next message from the stream. It also handles opening the stream +// initially, and reopening it on non-permanent errors. +// recv doesn't have to be goroutine-safe. +func (s *watchStream) recv() (*pb.ListenResponse, error) { + var err error + for { + if s.lc == nil { + s.lc, err = s.open() + if err != nil { + // Do not retry if open fails. + return nil, err + } + } + res, err := s.lc.Recv() + if err == nil || isPermanentWatchError(err) { + return res, err + } + // Non-permanent error. Sleep and retry. + s.changeMap = map[string]*DocumentSnapshot{} // clear changeMap + dur := s.backoff.Pause() + // If we're out of quota, wait a long time before retrying. + if status.Code(err) == codes.ResourceExhausted { + dur = s.backoff.Max + } + if err := sleep(s.ctx, dur); err != nil { + return nil, err + } + s.lc = nil + } +} + +func (s *watchStream) open() (pb.Firestore_ListenClient, error) { + dbPath := s.c.path() + lc, err := s.c.c.Listen(withResourceHeader(s.ctx, dbPath)) + if err == nil { + err = lc.Send(&pb.ListenRequest{ + Database: dbPath, + TargetChange: &pb.ListenRequest_AddTarget{AddTarget: s.target}, + }) + } + if err != nil { + return nil, err + } + return lc, nil +} + +func isPermanentWatchError(err error) bool { + if err == io.EOF { + // Retry on normal end-of-stream. + return false + } + switch status.Code(err) { + case codes.Unknown, codes.DeadlineExceeded, codes.ResourceExhausted, + codes.Internal, codes.Unavailable, codes.Unauthenticated: + return false + default: + return true + } +} diff --git a/vendor/cloud.google.com/go/firestore/watch_test.go b/vendor/cloud.google.com/go/firestore/watch_test.go new file mode 100644 index 0000000..844887b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/watch_test.go @@ -0,0 +1,326 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "sort" + "testing" + "time" + + "cloud.google.com/go/internal/btree" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestWatchRecv(t *testing.T) { + ctx := context.Background() + c, srv := newMock(t) + db := defaultBackoff + defaultBackoff = gax.Backoff{Initial: 1, Max: 1, Multiplier: 1} + defer func() { defaultBackoff = db }() + + ws := newWatchStream(ctx, c, nil, &pb.Target{}) + request := &pb.ListenRequest{ + Database: "projects/projectID/databases/(default)", + TargetChange: &pb.ListenRequest_AddTarget{&pb.Target{}}, + } + response := &pb.ListenResponse{ResponseType: &pb.ListenResponse_DocumentChange{&pb.DocumentChange{}}} + // Stream should retry on non-permanent errors, returning only the responses. + srv.addRPC(request, []interface{}{response, status.Error(codes.Unknown, "")}) + srv.addRPC(request, []interface{}{response}) // stream will return io.EOF + srv.addRPC(request, []interface{}{response, status.Error(codes.DeadlineExceeded, "")}) + srv.addRPC(request, []interface{}{status.Error(codes.ResourceExhausted, "")}) + srv.addRPC(request, []interface{}{status.Error(codes.Internal, "")}) + srv.addRPC(request, []interface{}{status.Error(codes.Unavailable, "")}) + srv.addRPC(request, []interface{}{status.Error(codes.Unauthenticated, "")}) + srv.addRPC(request, []interface{}{response}) + for i := 0; i < 4; i++ { + res, err := ws.recv() + if err != nil { + t.Fatal(err) + } + if !proto.Equal(res, response) { + t.Fatalf("got %v, want %v", res, response) + } + } + + // Stream should not retry on a permanent error. + srv.addRPC(request, []interface{}{status.Error(codes.AlreadyExists, "")}) + _, err := ws.recv() + if got, want := status.Code(err), codes.AlreadyExists; got != want { + t.Fatalf("got %s, want %s", got, want) + } +} + +func TestComputeSnapshot(t *testing.T) { + c := &Client{ + projectID: "projID", + databaseID: "(database)", + } + ws := newWatchStream(context.Background(), c, nil, &pb.Target{}) + tm := time.Now() + i := 0 + doc := func(path, value string) *DocumentSnapshot { + i++ + return &DocumentSnapshot{ + Ref: c.Doc(path), + proto: &pb.Document{Fields: map[string]*pb.Value{"foo": strval(value)}}, + UpdateTime: tm.Add(time.Duration(i) * time.Second), // need unique time for updates + } + } + val := func(d *DocumentSnapshot) string { return d.proto.Fields["foo"].GetStringValue() } + less := func(a, b *DocumentSnapshot) bool { return val(a) < val(b) } + + type dmap map[string]*DocumentSnapshot + + ds1 := doc("C/d1", "a") + ds2 := doc("C/d2", "b") + ds2c := doc("C/d2", "c") + docTree := btree.New(4, func(a, b interface{}) bool { return less(a.(*DocumentSnapshot), b.(*DocumentSnapshot)) }) + var gotChanges []DocumentChange + docMap := dmap{} + // The following test cases are not independent; each builds on the output of the previous. + for _, test := range []struct { + desc string + changeMap dmap + wantDocs []*DocumentSnapshot + wantChanges []DocumentChange + }{ + { + "no changes", + nil, + nil, + nil, + }, + { + "add a doc", + dmap{ds1.Ref.Path: ds1}, + []*DocumentSnapshot{ds1}, + []DocumentChange{{Kind: DocumentAdded, Doc: ds1, OldIndex: -1, NewIndex: 0}}, + }, + { + "add, remove", + dmap{ds1.Ref.Path: nil, ds2.Ref.Path: ds2}, + []*DocumentSnapshot{ds2}, + []DocumentChange{ + {Kind: DocumentRemoved, Doc: ds1, OldIndex: 0, NewIndex: -1}, + {Kind: DocumentAdded, Doc: ds2, OldIndex: -1, NewIndex: 0}, + }, + }, + { + "add back, modify", + dmap{ds1.Ref.Path: ds1, ds2c.Ref.Path: ds2c}, + []*DocumentSnapshot{ds1, ds2c}, + []DocumentChange{ + {Kind: DocumentAdded, Doc: ds1, OldIndex: -1, NewIndex: 0}, + {Kind: DocumentModified, Doc: ds2c, OldIndex: 1, NewIndex: 1}, + }, + }, + } { + docTree, gotChanges = ws.computeSnapshot(docTree, docMap, test.changeMap, time.Time{}) + gotDocs := treeDocs(docTree) + if diff := testDiff(gotDocs, test.wantDocs); diff != "" { + t.Fatalf("%s: %s", test.desc, diff) + } + mgot := mapDocs(docMap, less) + if diff := testDiff(gotDocs, mgot); diff != "" { + t.Fatalf("%s: docTree and docMap disagree: %s", test.desc, diff) + } + if diff := testDiff(gotChanges, test.wantChanges); diff != "" { + t.Fatalf("%s: %s", test.desc, diff) + } + } + + // Verify that if there are no changes, the returned docTree is identical to the first arg. + // docTree already has ds2c. + got, _ := ws.computeSnapshot(docTree, docMap, dmap{ds2c.Ref.Path: ds2c}, time.Time{}) + if got != docTree { + t.Error("returned docTree != arg docTree") + } +} + +func treeDocs(bt *btree.BTree) []*DocumentSnapshot { + var ds []*DocumentSnapshot + it := bt.BeforeIndex(0) + for it.Next() { + ds = append(ds, it.Key.(*DocumentSnapshot)) + } + return ds +} + +func mapDocs(m map[string]*DocumentSnapshot, less func(a, b *DocumentSnapshot) bool) []*DocumentSnapshot { + var ds []*DocumentSnapshot + for _, d := range m { + ds = append(ds, d) + } + sort.Sort(byLess{ds, less}) + return ds +} + +func TestWatchStream(t *testing.T) { + // Preliminary, very basic tests. Will expand and turn into cross-language tests + // later. + ctx := context.Background() + c, srv := newMock(t) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + baseTime := time.Now() + readTime := baseTime.Add(5 * time.Second) + readTimestamp := mustTimestampProto(readTime) + doc := func(path string, value int, tm time.Time) *DocumentSnapshot { + ref := c.Doc(path) + ts := mustTimestampProto(tm) + return &DocumentSnapshot{ + Ref: ref, + proto: &pb.Document{ + Name: ref.Path, + Fields: map[string]*pb.Value{"foo": intval(value)}, + CreateTime: ts, + UpdateTime: ts, + }, + CreateTime: tm, + UpdateTime: tm, + ReadTime: readTime, + } + } + change := func(ds *DocumentSnapshot) *pb.ListenResponse { + return &pb.ListenResponse{ResponseType: &pb.ListenResponse_DocumentChange{&pb.DocumentChange{ + Document: ds.proto, + TargetIds: []int32{watchTargetID}, + }}} + } + + del := func(ds *DocumentSnapshot) *pb.ListenResponse { + return &pb.ListenResponse{ResponseType: &pb.ListenResponse_DocumentDelete{&pb.DocumentDelete{ + Document: ds.Ref.Path, + }}} + } + + q := Query{c: c, collectionID: "x"} + current := &pb.ListenResponse{ResponseType: &pb.ListenResponse_TargetChange{&pb.TargetChange{ + TargetChangeType: pb.TargetChange_CURRENT, + }}} + noChange := &pb.ListenResponse{ResponseType: &pb.ListenResponse_TargetChange{&pb.TargetChange{ + TargetChangeType: pb.TargetChange_NO_CHANGE, + ReadTime: readTimestamp, + }}} + doc1 := doc("C/d1", 1, baseTime) + doc1a := doc("C/d1", 2, baseTime.Add(time.Second)) + doc2 := doc("C/d2", 3, baseTime) + for _, test := range []struct { + desc string + responses []interface{} + want []*DocumentSnapshot + }{ + { + "no changes: empty btree", + []interface{}{current, noChange}, + nil, + }, + { + "add a doc", + []interface{}{change(doc1), current, noChange}, + []*DocumentSnapshot{doc1}, + }, + { + "add a doc, then remove it", + []interface{}{change(doc1), del(doc1), current, noChange}, + []*DocumentSnapshot(nil), + }, + { + "add a doc, then add another one", + []interface{}{change(doc1), change(doc2), current, noChange}, + []*DocumentSnapshot{doc1, doc2}, + }, + { + "add a doc, then change it", + []interface{}{change(doc1), change(doc1a), current, noChange}, + []*DocumentSnapshot{doc1a}, + }, + } { + ws, err := newWatchStreamForQuery(ctx, q) + if err != nil { + t.Fatal(err) + } + request := &pb.ListenRequest{ + Database: "projects/projectID/databases/(default)", + TargetChange: &pb.ListenRequest_AddTarget{ws.target}, + } + srv.addRPC(request, test.responses) + tree, _, _, err := ws.nextSnapshot() + if err != nil { + t.Fatalf("%s: %v", test.desc, err) + } + got := treeDocs(tree) + if diff := testDiff(got, test.want); diff != "" { + t.Errorf("%s: %s", test.desc, diff) + } + } +} + +func TestWatchCancel(t *testing.T) { + // Canceling the context of a watch should result in a codes.Canceled error from the next + // call to the iterator's Next method. + ctx := context.Background() + c, srv := newMock(t) + q := Query{c: c, collectionID: "x"} + + // Cancel before open. + ctx2, cancel := context.WithCancel(ctx) + ws, err := newWatchStreamForQuery(ctx2, q) + if err != nil { + t.Fatal(err) + } + cancel() + _, _, _, err = ws.nextSnapshot() + codeEq(t, "cancel before open", codes.Canceled, err) + + request := &pb.ListenRequest{ + Database: "projects/projectID/databases/(default)", + TargetChange: &pb.ListenRequest_AddTarget{ws.target}, + } + current := &pb.ListenResponse{ResponseType: &pb.ListenResponse_TargetChange{&pb.TargetChange{ + TargetChangeType: pb.TargetChange_CURRENT, + }}} + noChange := &pb.ListenResponse{ResponseType: &pb.ListenResponse_TargetChange{&pb.TargetChange{ + TargetChangeType: pb.TargetChange_NO_CHANGE, + ReadTime: aTimestamp, + }}} + + // Cancel from gax.Sleep. We should still see a gRPC error with codes.Canceled, not a + // context.Canceled error. + ctx2, cancel = context.WithCancel(ctx) + ws, err = newWatchStreamForQuery(ctx2, q) + if err != nil { + t.Fatal(err) + } + srv.addRPC(request, []interface{}{current, noChange}) + _, _, _, _ = ws.nextSnapshot() + cancel() + // Because of how the mock works, the following results in an EOF on the stream, which + // is a non-permanent error that causes a retry. That retry ends up in gax.Sleep, which + // finds that the context is done and returns ctx.Err(), which is context.Canceled. + // Verify that we transform that context.Canceled into a gRPC Status with code Canceled. + _, _, _, err = ws.nextSnapshot() + codeEq(t, "cancel from gax.Sleep", codes.Canceled, err) + + // TODO(jba): Test that we get codes.Canceled when canceling an RPC. + // We had a test for this in a21236af, but it was flaky for unclear reasons. +} diff --git a/vendor/cloud.google.com/go/firestore/writebatch.go b/vendor/cloud.google.com/go/firestore/writebatch.go new file mode 100644 index 0000000..46b2a59 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/writebatch.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "golang.org/x/net/context" +) + +// A WriteBatch holds multiple database updates. Build a batch with the Create, Set, +// Update and Delete methods, then run it with the Commit method. Errors in Create, +// Set, Update or Delete are recorded instead of being returned immediately. The +// first such error is returned by Commit. +type WriteBatch struct { + c *Client + err error + writes []*pb.Write +} + +func (b *WriteBatch) add(ws []*pb.Write, err error) *WriteBatch { + if b.err != nil { + return b + } + if err != nil { + b.err = err + return b + } + b.writes = append(b.writes, ws...) + return b +} + +// Create adds a Create operation to the batch. +// See DocumentRef.Create for details. +func (b *WriteBatch) Create(dr *DocumentRef, data interface{}) *WriteBatch { + return b.add(dr.newCreateWrites(data)) +} + +// Set adds a Set operation to the batch. +// See DocumentRef.Set for details. +func (b *WriteBatch) Set(dr *DocumentRef, data interface{}, opts ...SetOption) *WriteBatch { + return b.add(dr.newSetWrites(data, opts)) +} + +// Delete adds a Delete operation to the batch. +// See DocumentRef.Delete for details. +func (b *WriteBatch) Delete(dr *DocumentRef, opts ...Precondition) *WriteBatch { + return b.add(dr.newDeleteWrites(opts)) +} + +// Update adds an Update operation to the batch. +// See DocumentRef.Update for details. +func (b *WriteBatch) Update(dr *DocumentRef, data []Update, opts ...Precondition) *WriteBatch { + return b.add(dr.newUpdatePathWrites(data, opts)) +} + +// Commit applies all the writes in the batch to the database atomically. Commit +// returns an error if there are no writes in the batch, if any errors occurred in +// constructing the writes, or if the Commmit operation fails. +func (b *WriteBatch) Commit(ctx context.Context) ([]*WriteResult, error) { + if b.err != nil { + return nil, b.err + } + if len(b.writes) == 0 { + return nil, errors.New("firestore: cannot commit empty WriteBatch") + } + return b.c.commit(ctx, b.writes) +} diff --git a/vendor/cloud.google.com/go/firestore/writebatch_test.go b/vendor/cloud.google.com/go/firestore/writebatch_test.go new file mode 100644 index 0000000..db38d03 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/writebatch_test.go @@ -0,0 +1,119 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "golang.org/x/net/context" +) + +func TestWriteBatch(t *testing.T) { + type update struct{ A int } + + c, srv := newMock(t) + docPrefix := c.Collection("C").Path + "/" + srv.addRPC( + &pb.CommitRequest{ + Database: c.path(), + Writes: []*pb.Write{ + { // Create + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: docPrefix + "a", + Fields: testFields, + }, + }, + CurrentDocument: &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{false}, + }, + }, + { // Set + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: docPrefix + "b", + Fields: testFields, + }, + }, + }, + { // Delete + Operation: &pb.Write_Delete{ + Delete: docPrefix + "c", + }, + }, + { // Update + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: docPrefix + "f", + Fields: map[string]*pb.Value{"*": intval(3)}, + }, + }, + UpdateMask: &pb.DocumentMask{[]string{"`*`"}}, + CurrentDocument: &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{true}, + }, + }, + }, + }, + &pb.CommitResponse{ + WriteResults: []*pb.WriteResult{ + {UpdateTime: aTimestamp}, + {UpdateTime: aTimestamp2}, + {UpdateTime: aTimestamp3}, + }, + }, + ) + gotWRs, err := c.Batch(). + Create(c.Doc("C/a"), testData). + Set(c.Doc("C/b"), testData). + Delete(c.Doc("C/c")). + Update(c.Doc("C/f"), []Update{{FieldPath: []string{"*"}, Value: 3}}). + Commit(context.Background()) + if err != nil { + t.Fatal(err) + } + wantWRs := []*WriteResult{{aTime}, {aTime2}, {aTime3}} + if !testEqual(gotWRs, wantWRs) { + t.Errorf("got %+v\nwant %+v", gotWRs, wantWRs) + } +} + +func TestWriteBatchErrors(t *testing.T) { + ctx := context.Background() + c, _ := newMock(t) + for _, test := range []struct { + desc string + batch *WriteBatch + }{ + { + "empty batch", + c.Batch(), + }, + { + "bad doc reference", + c.Batch().Create(c.Doc("a"), testData), + }, + { + "bad data", + c.Batch().Create(c.Doc("a/b"), 3), + }, + } { + if _, err := test.batch.Commit(ctx); err == nil { + t.Errorf("%s: got nil, want error", test.desc) + } + } +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go b/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go new file mode 100644 index 0000000..e366fed --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go @@ -0,0 +1,43 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package admin is an experimental, auto-generated package for the +// Google Identity and Access Management (IAM) API. +// +// Manages identity and access control for Google Cloud Platform resources, +// including the creation of service accounts, which you can use to +// authenticate to Google and make API calls. +package admin // import "cloud.google.com/go/iam/admin/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val []string) context.Context { + md, _ := metadata.FromOutgoingContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = val + return metadata.NewOutgoingContext(ctx, md) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/iam", + } +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go new file mode 100644 index 0000000..32ca067 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go @@ -0,0 +1,478 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// IamCallOptions contains the retry settings for each method of IamClient. +type IamCallOptions struct { + ListServiceAccounts []gax.CallOption + GetServiceAccount []gax.CallOption + CreateServiceAccount []gax.CallOption + UpdateServiceAccount []gax.CallOption + DeleteServiceAccount []gax.CallOption + ListServiceAccountKeys []gax.CallOption + GetServiceAccountKey []gax.CallOption + CreateServiceAccountKey []gax.CallOption + DeleteServiceAccountKey []gax.CallOption + SignBlob []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + QueryGrantableRoles []gax.CallOption +} + +func defaultIamClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("iam.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultIamCallOptions() *IamCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &IamCallOptions{ + ListServiceAccounts: retry[[2]string{"default", "idempotent"}], + GetServiceAccount: retry[[2]string{"default", "idempotent"}], + CreateServiceAccount: retry[[2]string{"default", "non_idempotent"}], + UpdateServiceAccount: retry[[2]string{"default", "idempotent"}], + DeleteServiceAccount: retry[[2]string{"default", "idempotent"}], + ListServiceAccountKeys: retry[[2]string{"default", "idempotent"}], + GetServiceAccountKey: retry[[2]string{"default", "idempotent"}], + CreateServiceAccountKey: retry[[2]string{"default", "non_idempotent"}], + DeleteServiceAccountKey: retry[[2]string{"default", "idempotent"}], + SignBlob: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + QueryGrantableRoles: retry[[2]string{"default", "non_idempotent"}], + } +} + +// IamClient is a client for interacting with Google Identity and Access Management (IAM) API. +type IamClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + iamClient adminpb.IAMClient + + // The call options for this service. + CallOptions *IamCallOptions + + // The metadata to be sent with each request. + xGoogHeader []string +} + +// NewIamClient creates a new iam client. +// +// Creates and manages service account objects. +// +// Service account is an account that belongs to your project instead +// of to an individual end user. It is used to authenticate calls +// to a Google API. +// +// To create a service account, specify the project_id and account_id +// for the account. The account_id is unique within the project, and used +// to generate the service account email address and a stable +// unique_id. +// +// All other methods can identify accounts using the format +// projects/{project}/serviceAccounts/{account}. +// Using - as a wildcard for the project will infer the project from +// the account. The account value can be the email address or the +// unique_id of the service account. +func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultIamClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &IamClient{ + conn: conn, + CallOptions: defaultIamCallOptions(), + + iamClient: adminpb.NewIAMClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *IamClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *IamClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *IamClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeader = []string{gax.XGoogHeader(kv...)} +} + +// IamProjectPath returns the path for the project resource. +func IamProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// IamServiceAccountPath returns the path for the service account resource. +func IamServiceAccountPath(project, serviceAccount string) string { + return "" + + "projects/" + + project + + "/serviceAccounts/" + + serviceAccount + + "" +} + +// IamKeyPath returns the path for the key resource. +func IamKeyPath(project, serviceAccount, key string) string { + return "" + + "projects/" + + project + + "/serviceAccounts/" + + serviceAccount + + "/keys/" + + key + + "" +} + +// ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project. +func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest, opts ...gax.CallOption) *ServiceAccountIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListServiceAccounts[0:len(c.CallOptions.ListServiceAccounts):len(c.CallOptions.ListServiceAccounts)], opts...) + it := &ServiceAccountIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*adminpb.ServiceAccount, string, error) { + var resp *adminpb.ListServiceAccountsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.ListServiceAccounts(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Accounts, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetServiceAccount gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetServiceAccount[0:len(c.CallOptions.GetServiceAccount):len(c.CallOptions.GetServiceAccount)], opts...) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.GetServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateServiceAccount creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount] +// and returns it. +func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateServiceAccount[0:len(c.CallOptions.CreateServiceAccount):len(c.CallOptions.CreateServiceAccount)], opts...) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.CreateServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateServiceAccount updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +// +// Currently, only the following fields are updatable: +// display_name . +// The etag is mandatory. +func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.UpdateServiceAccount[0:len(c.CallOptions.UpdateServiceAccount):len(c.CallOptions.UpdateServiceAccount)], opts...) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.UpdateServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteServiceAccount deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteServiceAccount[0:len(c.CallOptions.DeleteServiceAccount):len(c.CallOptions.DeleteServiceAccount)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.iamClient.DeleteServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListServiceAccountKeys lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey]. +func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest, opts ...gax.CallOption) (*adminpb.ListServiceAccountKeysResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListServiceAccountKeys[0:len(c.CallOptions.ListServiceAccountKeys):len(c.CallOptions.ListServiceAccountKeys)], opts...) + var resp *adminpb.ListServiceAccountKeysResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.ListServiceAccountKeys(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetServiceAccountKey gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] +// by key id. +func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetServiceAccountKey[0:len(c.CallOptions.GetServiceAccountKey):len(c.CallOptions.GetServiceAccountKey)], opts...) + var resp *adminpb.ServiceAccountKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.GetServiceAccountKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateServiceAccountKey creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] +// and returns it. +func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateServiceAccountKey[0:len(c.CallOptions.CreateServiceAccountKey):len(c.CallOptions.CreateServiceAccountKey)], opts...) + var resp *adminpb.ServiceAccountKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.CreateServiceAccountKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteServiceAccountKey deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]. +func (c *IamClient) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteServiceAccountKey[0:len(c.CallOptions.DeleteServiceAccountKey):len(c.CallOptions.DeleteServiceAccountKey)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.iamClient.DeleteServiceAccountKey(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// SignBlob signs a blob using a service account's system-managed private key. +func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest, opts ...gax.CallOption) (*adminpb.SignBlobResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.SignBlob[0:len(c.CallOptions.SignBlob):len(c.CallOptions.SignBlob)], opts...) + var resp *adminpb.SignBlobResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.SignBlob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// getIamPolicy returns the IAM access control policy for a +// [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// setIamPolicy sets the IAM access control policy for a +// [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions tests the specified permissions against the IAM access control policy +// for a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// QueryGrantableRoles queries roles that can be granted on a particular resource. +// A role is grantable if it can be used as the role in a binding for a policy +// for that resource. +func (c *IamClient) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest, opts ...gax.CallOption) (*adminpb.QueryGrantableRolesResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.QueryGrantableRoles[0:len(c.CallOptions.QueryGrantableRoles):len(c.CallOptions.QueryGrantableRoles)], opts...) + var resp *adminpb.QueryGrantableRolesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.QueryGrantableRoles(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ServiceAccountIterator manages a stream of *adminpb.ServiceAccount. +type ServiceAccountIterator struct { + items []*adminpb.ServiceAccount + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*adminpb.ServiceAccount, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceAccountIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceAccountIterator) Next() (*adminpb.ServiceAccount, error) { + var item *adminpb.ServiceAccount + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceAccountIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceAccountIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go new file mode 100644 index 0000000..901fcb9 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go @@ -0,0 +1,253 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin_test + +import ( + "cloud.google.com/go/iam/admin/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +func ExampleNewIamClient() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleIamClient_ListServiceAccounts() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ListServiceAccountsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListServiceAccounts(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleIamClient_GetServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.GetServiceAccountRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_CreateServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.CreateServiceAccountRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_UpdateServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ServiceAccount{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_DeleteServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.DeleteServiceAccountRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleIamClient_ListServiceAccountKeys() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ListServiceAccountKeysRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListServiceAccountKeys(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_GetServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.GetServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_CreateServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.CreateServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_DeleteServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.DeleteServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleIamClient_SignBlob() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.SignBlobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SignBlob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_TestIamPermissions() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_QueryGrantableRoles() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.QueryGrantableRolesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.QueryGrantableRoles(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go b/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go new file mode 100644 index 0000000..43ff01b --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go @@ -0,0 +1,1143 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockIamServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + adminpb.IAMServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockIamServer) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest) (*adminpb.ListServiceAccountsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ListServiceAccountsResponse), nil +} + +func (s *mockIamServer) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockIamServer) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ListServiceAccountKeysResponse), nil +} + +func (s *mockIamServer) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccountKey), nil +} + +func (s *mockIamServer) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccountKey), nil +} + +func (s *mockIamServer) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockIamServer) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.SignBlobResponse), nil +} + +func (s *mockIamServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +func (s *mockIamServer) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.QueryGrantableRolesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockIam mockIamServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + adminpb.RegisterIAMServer(serv, &mockIam) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestIamListServiceAccounts(t *testing.T) { + var nextPageToken string = "" + var accountsElement *adminpb.ServiceAccount = &adminpb.ServiceAccount{} + var accounts = []*adminpb.ServiceAccount{accountsElement} + var expectedResponse = &adminpb.ListServiceAccountsResponse{ + NextPageToken: nextPageToken, + Accounts: accounts, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamProjectPath("[PROJECT]") + var request = &adminpb.ListServiceAccountsRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccounts(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Accounts[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamListServiceAccountsError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamProjectPath("[PROJECT]") + var request = &adminpb.ListServiceAccountsRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccounts(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetServiceAccount(t *testing.T) { + var name2 string = "name2-1052831874" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag []byte = []byte("21") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name2, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.GetServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetServiceAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.GetServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamCreateServiceAccount(t *testing.T) { + var name2 string = "name2-1052831874" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag []byte = []byte("21") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name2, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamProjectPath("[PROJECT]") + var accountId string = "accountId-803333011" + var request = &adminpb.CreateServiceAccountRequest{ + Name: formattedName, + AccountId: accountId, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamCreateServiceAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamProjectPath("[PROJECT]") + var accountId string = "accountId-803333011" + var request = &adminpb.CreateServiceAccountRequest{ + Name: formattedName, + AccountId: accountId, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamUpdateServiceAccount(t *testing.T) { + var name string = "name3373707" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag2 []byte = []byte("-120") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag2, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var etag []byte = []byte("21") + var request = &adminpb.ServiceAccount{ + Etag: etag, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamUpdateServiceAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var etag []byte = []byte("21") + var request = &adminpb.ServiceAccount{ + Etag: etag, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateServiceAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamDeleteServiceAccount(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.DeleteServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestIamDeleteServiceAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.DeleteServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestIamListServiceAccountKeys(t *testing.T) { + var expectedResponse *adminpb.ListServiceAccountKeysResponse = &adminpb.ListServiceAccountKeysResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.ListServiceAccountKeysRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccountKeys(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamListServiceAccountKeysError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.ListServiceAccountKeysRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccountKeys(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetServiceAccountKey(t *testing.T) { + var name2 string = "name2-1052831874" + var privateKeyData []byte = []byte("-58") + var publicKeyData []byte = []byte("-96") + var expectedResponse = &adminpb.ServiceAccountKey{ + Name: name2, + PrivateKeyData: privateKeyData, + PublicKeyData: publicKeyData, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.GetServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetServiceAccountKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.GetServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccountKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamCreateServiceAccountKey(t *testing.T) { + var name2 string = "name2-1052831874" + var privateKeyData []byte = []byte("-58") + var publicKeyData []byte = []byte("-96") + var expectedResponse = &adminpb.ServiceAccountKey{ + Name: name2, + PrivateKeyData: privateKeyData, + PublicKeyData: publicKeyData, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.CreateServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamCreateServiceAccountKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.CreateServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccountKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamDeleteServiceAccountKey(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.DeleteServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestIamDeleteServiceAccountKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.DeleteServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccountKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestIamSignBlob(t *testing.T) { + var keyId string = "keyId-1134673157" + var signature []byte = []byte("-72") + var expectedResponse = &adminpb.SignBlobResponse{ + KeyId: keyId, + Signature: signature, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var bytesToSign []byte = []byte("45") + var request = &adminpb.SignBlobRequest{ + Name: formattedName, + BytesToSign: bytesToSign, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SignBlob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamSignBlobError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var bytesToSign []byte = []byte("45") + var request = &adminpb.SignBlobRequest{ + Name: formattedName, + BytesToSign: bytesToSign, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SignBlob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.getIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.getIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.setIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamSetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.setIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamTestIamPermissionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamQueryGrantableRoles(t *testing.T) { + var expectedResponse *adminpb.QueryGrantableRolesResponse = &adminpb.QueryGrantableRolesResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var fullResourceName string = "fullResourceName1300993644" + var request = &adminpb.QueryGrantableRolesRequest{ + FullResourceName: fullResourceName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.QueryGrantableRoles(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamQueryGrantableRolesError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var fullResourceName string = "fullResourceName1300993644" + var request = &adminpb.QueryGrantableRolesRequest{ + FullResourceName: fullResourceName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.QueryGrantableRoles(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go b/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go new file mode 100644 index 0000000..a3ff72e --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is handwritten code. These methods are implemented by hand so they can use +// the iam.Policy type. + +package admin + +import ( + "cloud.google.com/go/iam" + "golang.org/x/net/context" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// GetIamPolicy returns the IAM access control policy for a ServiceAccount. +func (c *IamClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iam.Policy, error) { + policy, err := c.getIamPolicy(ctx, req) + if err != nil { + return nil, err + } + return &iam.Policy{InternalProto: policy}, nil +} + +// SetIamPolicyRequest is the request type for the SetIamPolicy method. +type SetIamPolicyRequest struct { + Resource string + Policy *iam.Policy +} + +// SetIamPolicy sets the IAM access control policy for a ServiceAccount. +func (c *IamClient) SetIamPolicy(ctx context.Context, req *SetIamPolicyRequest) (*iam.Policy, error) { + preq := &iampb.SetIamPolicyRequest{ + Resource: req.Resource, + Policy: req.Policy.InternalProto, + } + policy, err := c.setIamPolicy(ctx, preq) + if err != nil { + return nil, err + } + return &iam.Policy{InternalProto: policy}, nil +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 0000000..37720aa --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,284 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// client abstracts the IAMPolicy API to allow multiple implementations. +type client interface { + Get(ctx context.Context, resource string) (*pb.Policy, error) + Set(ctx context.Context, resource string, p *pb.Policy) error + Test(ctx context.Context, resource string, perms []string) ([]string, error) +} + +// grpcClient implements client for the standard gRPC-based IAMPolicy service. +type grpcClient struct { + c pb.IAMPolicyClient +} + +var withRetry = gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60 * time.Second, + Multiplier: 1.3, + }) +}) + +func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + var proto *pb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + return err + }, withRetry) + if err != nil { + return nil, err + } + return proto, nil +} + +func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { + return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err + }, withRetry) +} + +func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + var res *pb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + return err + }, withRetry) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A Handle provides IAM operations for a resource. +type Handle struct { + c client + resource string +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { + return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource) +} + +// InternalNewHandleClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// client implementation. +func InternalNewHandleClient(c client, resource string) *Handle { + return &Handle{ + c: c, + resource: resource, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.Get(ctx, h.resource) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + return h.c.Set(ctx, h.resource, policy.InternalProto) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + bi := p.bindingIndex(r) + if bi < 0 { + return + } + bindings := p.InternalProto.Bindings + b := bindings[bi] + mi := memberIndex(member, b) + if mi < 0 { + return + } + // Order doesn't matter for bindings or members, so to remove, move the last item + // into the removed spot and shrink the slice. + if len(b.Members) == 1 { + // Remove binding. + last := len(bindings) - 1 + bindings[bi] = bindings[last] + bindings[last] = nil + p.InternalProto.Bindings = bindings[:last] + return + } + // Remove member. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[mi] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + i := p.bindingIndex(r) + if i < 0 { + return nil + } + return p.InternalProto.Bindings[i] +} + +func (p *Policy) bindingIndex(r RoleName) int { + if p.InternalProto == nil { + return -1 + } + for i, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return i + } + } + return -1 +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} diff --git a/vendor/cloud.google.com/go/iam/iam_test.go b/vendor/cloud.google.com/go/iam/iam_test.go new file mode 100644 index 0000000..f29caa8 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam_test.go @@ -0,0 +1,87 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iam + +import ( + "fmt" + "sort" + "testing" + + "cloud.google.com/go/internal/testutil" +) + +func TestPolicy(t *testing.T) { + p := &Policy{} + + add := func(member string, role RoleName) { + p.Add(member, role) + } + remove := func(member string, role RoleName) { + p.Remove(member, role) + } + + if msg, ok := checkMembers(p, Owner, nil); !ok { + t.Fatal(msg) + } + add("m1", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m1"}); !ok { + t.Fatal(msg) + } + add("m2", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m1", "m2"}); !ok { + t.Fatal(msg) + } + add("m1", Owner) // duplicate adds ignored + if msg, ok := checkMembers(p, Owner, []string{"m1", "m2"}); !ok { + t.Fatal(msg) + } + // No other roles populated yet. + if msg, ok := checkMembers(p, Viewer, nil); !ok { + t.Fatal(msg) + } + remove("m1", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m2"}); !ok { + t.Fatal(msg) + } + if msg, ok := checkMembers(p, Viewer, nil); !ok { + t.Fatal(msg) + } + remove("m3", Owner) // OK to remove non-existent member. + if msg, ok := checkMembers(p, Owner, []string{"m2"}); !ok { + t.Fatal(msg) + } + remove("m2", Owner) + if msg, ok := checkMembers(p, Owner, nil); !ok { + t.Fatal(msg) + } + if got, want := p.Roles(), []RoleName(nil); !testutil.Equal(got, want) { + t.Fatalf("roles: got %v, want %v", got, want) + } +} + +func checkMembers(p *Policy, role RoleName, wantMembers []string) (string, bool) { + gotMembers := p.Members(role) + sort.Strings(gotMembers) + sort.Strings(wantMembers) + if !testutil.Equal(gotMembers, wantMembers) { + return fmt.Sprintf("got %v, want %v", gotMembers, wantMembers), false + } + for _, m := range wantMembers { + if !p.HasRole(m, role) { + return fmt.Sprintf("member %q should have role %s but does not", m, role), false + } + } + return "", true +} diff --git a/vendor/cloud.google.com/go/import_test.go b/vendor/cloud.google.com/go/import_test.go new file mode 100644 index 0000000..839fae4 --- /dev/null +++ b/vendor/cloud.google.com/go/import_test.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "go/parser" + "go/token" + "os" + "path/filepath" + "strconv" + "testing" +) + +func TestContextImport(t *testing.T) { + t.Parallel() + + whiteList := map[string]bool{ + "storage/go17.go": true, + } + + err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) != ".go" || whiteList[path] { + return nil + } + + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly) + if err != nil { + return err + } + + for _, imp := range file.Imports { + impPath, err := strconv.Unquote(imp.Path.Value) + if err != nil { + return err + } + if impPath == "context" { + t.Errorf(`file %q import "context", want "golang.org/x/net/context"`, path) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go new file mode 100644 index 0000000..797809a --- /dev/null +++ b/vendor/cloud.google.com/go/internal/annotate.go @@ -0,0 +1,54 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc/status" +) + +// Annotate prepends msg to the error message in err, attempting +// to preserve other information in err, like an error code. +// +// Annotate panics if err is nil. +// +// Annotate knows about these error types: +// - "google.golang.org/grpc/status".Status +// - "google.golang.org/api/googleapi".Error +// If the error is not one of these types, Annotate behaves +// like +// fmt.Errorf("%s: %v", msg, err) +func Annotate(err error, msg string) error { + if err == nil { + panic("Annotate called with nil") + } + if s, ok := status.FromError(err); ok { + p := s.Proto() + p.Message = msg + ": " + p.Message + return status.ErrorProto(p) + } + if g, ok := err.(*googleapi.Error); ok { + g.Message = msg + ": " + g.Message + return g + } + return fmt.Errorf("%s: %v", msg, err) +} + +// Annotatef uses format and args to format a string, then calls Annotate. +func Annotatef(err error, format string, args ...interface{}) error { + return Annotate(err, fmt.Sprintf(format, args...)) +} diff --git a/vendor/cloud.google.com/go/internal/annotate_test.go b/vendor/cloud.google.com/go/internal/annotate_test.go new file mode 100644 index 0000000..36cea5c --- /dev/null +++ b/vendor/cloud.google.com/go/internal/annotate_test.go @@ -0,0 +1,65 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + "testing" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const wantMessage = "prefix: msg" + +func TestAnnotateGRPC(t *testing.T) { + // grpc Status error + err := status.Error(codes.NotFound, "msg") + err = Annotate(err, "prefix") + got, ok := status.FromError(err) + if !ok { + t.Fatalf("got %T, wanted a status", got) + } + if g, w := got.Code(), codes.NotFound; g != w { + t.Errorf("got code %v, want %v", g, w) + } + if g, w := got.Message(), wantMessage; g != w { + t.Errorf("got message %q, want %q", g, w) + } +} + +func TestAnnotateGoogleapi(t *testing.T) { + // googleapi error + var err error = &googleapi.Error{Code: 403, Message: "msg"} + err = Annotate(err, "prefix") + got2, ok := err.(*googleapi.Error) + if !ok { + t.Fatalf("got %T, wanted a googleapi.Error", got2) + } + if g, w := got2.Code, 403; g != w { + t.Errorf("got code %d, want %d", g, w) + } + if g, w := got2.Message, wantMessage; g != w { + t.Errorf("got message %q, want %q", g, w) + } +} + +func TestAnnotateUnknownError(t *testing.T) { + err := Annotate(errors.New("msg"), "prefix") + if g, w := err.Error(), wantMessage; g != w { + t.Errorf("got message %q, want %q", g, w) + } +} diff --git a/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go new file mode 100644 index 0000000..2bea8a1 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go @@ -0,0 +1,58 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package atomiccache provides a map-based cache that supports very fast +// reads. +package atomiccache + +import ( + "sync" + "sync/atomic" +) + +type mapType map[interface{}]interface{} + +// Cache is a map-based cache that supports fast reads via use of atomics. +// Writes are slow, requiring a copy of the entire cache. +// The zero Cache is an empty cache, ready for use. +type Cache struct { + val atomic.Value // mapType + mu sync.Mutex // used only by writers +} + +// Get returns the value of the cache at key. If there is no value, +// getter is called to provide one, and the cache is updated. +// The getter function may be called concurrently. It should be pure, +// returning the same value for every call. +func (c *Cache) Get(key interface{}, getter func() interface{}) interface{} { + mp, _ := c.val.Load().(mapType) + if v, ok := mp[key]; ok { + return v + } + + // Compute value without lock. + // Might duplicate effort but won't hold other computations back. + newV := getter() + + c.mu.Lock() + mp, _ = c.val.Load().(mapType) + newM := make(mapType, len(mp)+1) + for k, v := range mp { + newM[k] = v + } + newM[key] = newV + c.val.Store(newM) + c.mu.Unlock() + return newV +} diff --git a/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go new file mode 100644 index 0000000..33105b3 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomiccache + +import ( + "fmt" + "testing" +) + +func TestGet(t *testing.T) { + var c Cache + called := false + get := func(k interface{}) interface{} { + return c.Get(k, func() interface{} { + called = true + return fmt.Sprintf("v%d", k) + }) + } + got := get(1) + if want := "v1"; got != want { + t.Errorf("got %v, want %v", got, want) + } + if !called { + t.Error("getter not called, expected a call") + } + called = false + got = get(1) + if want := "v1"; got != want { + t.Errorf("got %v, want %v", got, want) + } + if called { + t.Error("getter unexpectedly called") + } +} diff --git a/vendor/cloud.google.com/go/internal/btree/README.md b/vendor/cloud.google.com/go/internal/btree/README.md new file mode 100644 index 0000000..601ff54 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/README.md @@ -0,0 +1,11 @@ +This package is a fork of github.com/jba/btree at commit +d4edd57f39b8425fc2c631047ff4dc6024d82a4f, which itself was a fork of +github.com/google/btree at 316fb6d3f031ae8f4d457c6c5186b9e3ded70435. + +This directory makes the following modifications: + +- Updated copyright notice. +- removed LICENSE (it is the same as the repo-wide license, Apache 2.0) +- Removed examples_test.go and .travis.yml. +- Added this file. + diff --git a/vendor/cloud.google.com/go/internal/btree/benchmarks_test.go b/vendor/cloud.google.com/go/internal/btree/benchmarks_test.go new file mode 100644 index 0000000..850eb58 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/benchmarks_test.go @@ -0,0 +1,268 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package btree + +import ( + "fmt" + "sort" + "testing" +) + +const benchmarkTreeSize = 10000 + +var degrees = []int{2, 8, 32, 64} + +func BenchmarkInsert(b *testing.B) { + insertP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + i++ + if i >= b.N { + return + } + } + } + }) + } +} + +func BenchmarkDeleteInsert(b *testing.B) { + insertP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + m := insertP[i%benchmarkTreeSize] + tr.Delete(m.Key) + tr.Set(m.Key, m.Value) + } + }) + } +} + +func BenchmarkDeleteInsertCloneOnce(b *testing.B) { + insertP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + } + tr = tr.Clone() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m := insertP[i%benchmarkTreeSize] + tr.Delete(m.Key) + tr.Set(m.Key, m.Value) + } + }) + } +} + +func BenchmarkDeleteInsertCloneEachTime(b *testing.B) { + insertP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + tr = tr.Clone() + m := insertP[i%benchmarkTreeSize] + tr.Delete(m.Key) + tr.Set(m.Key, m.Value) + } + }) + } +} + +func BenchmarkDelete(b *testing.B) { + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + b.StopTimer() + tr := New(d, less) + for _, v := range insertP { + tr.Set(v.Key, v.Value) + } + b.StartTimer() + for _, m := range removeP { + tr.Delete(m.Key) + i++ + if i >= b.N { + return + } + } + if tr.Len() > 0 { + panic(tr.Len()) + } + } + }) + } +} + +func BenchmarkGet(b *testing.B) { + insertP := perm(benchmarkTreeSize) + getP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + b.StopTimer() + tr := New(d, less) + for _, v := range insertP { + tr.Set(v.Key, v.Value) + } + b.StartTimer() + for _, m := range getP { + tr.Get(m.Key) + i++ + if i >= b.N { + return + } + } + } + }) + } +} + +func BenchmarkGetWithIndex(b *testing.B) { + insertP := perm(benchmarkTreeSize) + getP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + b.StopTimer() + tr := New(d, less) + for _, v := range insertP { + tr.Set(v.Key, v.Value) + } + b.StartTimer() + for _, m := range getP { + tr.GetWithIndex(m.Key) + i++ + if i >= b.N { + return + } + } + } + }) + } +} + +func BenchmarkGetCloneEachTime(b *testing.B) { + insertP := perm(benchmarkTreeSize) + getP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + b.StopTimer() + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + } + b.StartTimer() + for _, m := range getP { + tr = tr.Clone() + tr.Get(m.Key) + i++ + if i >= b.N { + return + } + } + } + }) + } +} + +func BenchmarkFind(b *testing.B) { + for _, d := range degrees { + var items []item + for i := 0; i < 2*d; i++ { + items = append(items, item{i, i}) + } + b.Run(fmt.Sprintf("size=%d", len(items)), func(b *testing.B) { + for _, alg := range []struct { + name string + fun func(Key, []item) (int, bool) + }{ + {"binary", findBinary}, + {"linear", findLinear}, + } { + b.Run(alg.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + for j := 0; j < len(items); j++ { + alg.fun(items[j].key, items) + } + } + }) + } + }) + } +} + +func findBinary(k Key, s []item) (int, bool) { + i := sort.Search(len(s), func(i int) bool { return less(k, s[i].key) }) + // i is the smallest index of s for which key.Less(s[i].Key), or len(s). + if i > 0 && !less(s[i-1], k) { + return i - 1, true + } + return i, false +} + +func findLinear(k Key, s []item) (int, bool) { + var i int + for i = 0; i < len(s); i++ { + if less(k, s[i].key) { + break + } + } + if i > 0 && !less(s[i-1].key, k) { + return i - 1, true + } + return i, false +} + +type byInts []item + +func (a byInts) Len() int { + return len(a) +} + +func (a byInts) Less(i, j int) bool { + return a[i].key.(int) < a[j].key.(int) +} + +func (a byInts) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} diff --git a/vendor/cloud.google.com/go/internal/btree/btree.go b/vendor/cloud.google.com/go/internal/btree/btree.go new file mode 100644 index 0000000..7dfd78e --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/btree.go @@ -0,0 +1,1018 @@ +// Copyright 2014 Google Inc. +// Modified 2018 by Jonathan Amsterdam (jbamsterdam@gmail.com) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// This implementation is based on google/btree (http://github.com/google/btree), and +// much of the code is taken from there. But the API has been changed significantly, +// particularly around iteration, and support for indexing by position has been +// added. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +package btree + +import ( + "fmt" + "sort" + "sync" +) + +// Key represents a key into the tree. +type Key interface{} + +type Value interface{} + +// item is a key-value pair. +type item struct { + key Key + value Value +} + +type lessFunc func(interface{}, interface{}) bool + +// New creates a new B-Tree with the given degree and comparison function. +// +// New(2, less), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The less function tests whether the current item is less than the given argument. +// It must provide a strict weak ordering. +// If !less(a, b) && !less(b, a), we treat this to mean a == b (i.e. the tree +// can hold only one of a or b). +func New(degree int, less func(interface{}, interface{}) bool) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + less: less, + cow: ©OnWriteContext{}, + } +} + +// items stores items in a node. +type items []item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, m item) { + *s = append(*s, item{}) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = m +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) item { + m := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = item{} + *s = (*s)[:len(*s)-1] + return m +} + +// pop removes and returns the last element in the list. +func (s *items) pop() item { + index := len(*s) - 1 + out := (*s)[index] + (*s)[index] = item{} + *s = (*s)[:index] + return out +} + +var nilItems = make(items, 16) + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where an item with key should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(k Key, less lessFunc) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { return less(k, s[i].key) }) + // i is the smallest index of s for which k.Less(s[i].Key), or len(s). + if i > 0 && !less(s[i-1].key, k) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +var nilChildren = make(children, 16) + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + size int // number of items in the subtree: len(items) + sum over i of children[i].size + cow *copyOnWriteContext +} + +func (n *node) computeSize() int { + sz := len(n.items) + for _, c := range n.children { + sz += c.size + } + return sz +} + +func (n *node) checkSize() { + sz := n.computeSize() + if n.size != sz { + panic(fmt.Sprintf("n.size = %d, computed size = %d", n.size, sz)) + } +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + out.size = n.size + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + n.size = n.computeSize() + next.size = next.computeSize() + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + // The size of n doesn't change. + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, its value will be returned. +// +// If computeIndex is true, the third return value is the index of the value with respect to n. +func (n *node) insert(m item, maxItems int, less lessFunc, computeIndex bool) (old Value, present bool, idx int) { + i, found := n.items.find(m.key, less) + if found { + out := n.items[i] + n.items[i] = m + if computeIndex { + idx = n.itemIndex(i) + } + return out.value, true, idx + } + if len(n.children) == 0 { + n.items.insertAt(i, m) + n.size++ + return old, false, i + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case less(m.key, inTree.key): + // no change, we want first split node + case less(inTree.key, m.key): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = m + if computeIndex { + idx = n.itemIndex(i) + } + return out.value, true, idx + } + } + old, present, idx = n.mutableChild(i).insert(m, maxItems, less, computeIndex) + if !present { + n.size++ + } + if computeIndex { + idx += n.partialSize(i) + } + return old, present, idx +} + +// get finds the given key in the subtree and returns the corresponding item, along with a boolean reporting +// whether it was found. +// If computeIndex is true, it also returns the index of the key relative to the node's subtree. +func (n *node) get(k Key, computeIndex bool, less lessFunc) (item, bool, int) { + i, found := n.items.find(k, less) + if found { + return n.items[i], true, n.itemIndex(i) + } + if len(n.children) > 0 { + m, found, idx := n.children[i].get(k, computeIndex, less) + if computeIndex && found { + idx += n.partialSize(i) + } + return m, found, idx + } + return item{}, false, -1 +} + +// itemIndex returns the index w.r.t. n of the ith item in n. +func (n *node) itemIndex(i int) int { + if len(n.children) == 0 { + return i + } + // Get the size of the node up to but not including the child to the right of + // item i. Subtract 1 because the index is 0-based. + return n.partialSize(i+1) - 1 +} + +// Returns the size of the non-leaf node up to but not including child i. +func (n *node) partialSize(i int) int { + var sz int + for j, c := range n.children { + if j == i { + break + } + sz += c.size + 1 + } + return sz +} + +// cursorStackForKey returns a stack of cursors for the key, along with whether the key was found and the index. +func (n *node) cursorStackForKey(k Key, cs cursorStack, less lessFunc) (cursorStack, bool, int) { + i, found := n.items.find(k, less) + cs.push(cursor{n, i}) + idx := i + if found { + if len(n.children) > 0 { + idx = n.partialSize(i+1) - 1 + } + return cs, true, idx + } + if len(n.children) > 0 { + cs, found, idx := n.children[i].cursorStackForKey(k, cs, less) + return cs, found, idx + n.partialSize(i) + } + return cs, false, idx +} + +// at returns the item at the i'th position in the subtree rooted at n. +// It assumes i is in range. +func (n *node) at(i int) item { + if len(n.children) == 0 { + return n.items[i] + } + for j, c := range n.children { + if i < c.size { + return c.at(i) + } + i -= c.size + if i == 0 { + return n.items[j] + } + i-- + } + panic("impossible") +} + +// cursorStackForIndex returns a stack of cursors for the index. +// It assumes i is in range. +func (n *node) cursorStackForIndex(i int, cs cursorStack) cursorStack { + if len(n.children) == 0 { + return cs.push(cursor{n, i}) + } + for j, c := range n.children { + if i < c.size { + return c.cursorStackForIndex(i, cs.push(cursor{n, j})) + } + i -= c.size + if i == 0 { + return cs.push(cursor{n, j}) + } + i-- + } + panic("impossible") +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(key Key, minItems int, typ toRemove, less lessFunc) (item, bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + n.size-- + return n.items.pop(), true + + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + n.size-- + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(key, less) + if len(n.children) == 0 { + if found { + n.size-- + return n.items.removeAt(i), true + } + return item{}, false + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, key, minItems, typ, less) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i], _ = child.remove(nil, minItems, removeMax, less) + n.size-- + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + m, removed := child.remove(key, minItems, typ, less) + if removed { + n.size-- + } + return m, removed +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, key Key, minItems int, typ toRemove, less lessFunc) (item, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + stealFrom.size-- + child.items.insertAt(0, n.items[i-1]) + child.size++ + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + c := stealFrom.children.pop() + stealFrom.size -= c.size + child.children.insertAt(0, c) + child.size += c.size + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + stealFrom.size-- + child.items = append(child.items, n.items[i]) + child.size++ + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + c := stealFrom.children.removeAt(0) + stealFrom.size -= c.size + child.children = append(child.children, c) + child.size += c.size + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + child.size = child.computeSize() + n.cow.freeNode(mergeChild) + } + return n.remove(key, minItems, typ, less) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + less lessFunc + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership. A tree with a cow +// context equivalent to a node's cow context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct{ byte } // non-empty, because empty structs may have same addr + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() *BTree { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +var nodePool = sync.Pool{New: func() interface{} { return new(node) }} + +func (c *copyOnWriteContext) newNode() *node { + n := nodePool.Get().(*node) + n.cow = c + return n +} + +func (c *copyOnWriteContext) freeNode(n *node) { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + nodePool.Put(n) + } +} + +// Set sets the given key to the given value in the tree. If the key is present in +// the tree, its value is changed and the old value is returned along with a second +// return value of true. If the key is not in the tree, it is added, and the second +// return value is false. +func (t *BTree) Set(k Key, v Value) (old Value, present bool) { + old, present, _ = t.set(k, v, false) + return old, present +} + +func (t *BTree) SetWithIndex(k Key, v Value) (old Value, present bool, index int) { + return t.set(k, v, true) +} + +func (t *BTree) set(k Key, v Value, computeIndex bool) (old Value, present bool, idx int) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item{k, v}) + t.root.size = 1 + return old, false, 0 + } + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + sz := t.root.size + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + t.root.size = sz + } + + return t.root.insert(item{k, v}, t.maxItems(), t.less, computeIndex) +} + +// Delete removes the item with the given key, returning its value. The second return value +// reports whether the key was found. +func (t *BTree) Delete(k Key) (Value, bool) { + m, removed := t.deleteItem(k, removeItem) + return m.value, removed +} + +// DeleteMin removes the smallest item in the tree and returns its key and value. +// If the tree is empty, it returns zero values. +func (t *BTree) DeleteMin() (Key, Value) { + item, _ := t.deleteItem(nil, removeMin) + return item.key, item.value +} + +// DeleteMax removes the largest item in the tree and returns its key and value. +// If the tree is empty, it returns zero values. +func (t *BTree) DeleteMax() (Key, Value) { + item, _ := t.deleteItem(nil, removeMax) + return item.key, item.value +} + +func (t *BTree) deleteItem(key Key, typ toRemove) (item, bool) { + if t.root == nil || len(t.root.items) == 0 { + return item{}, false + } + t.root = t.root.mutableFor(t.cow) + out, removed := t.root.remove(key, t.minItems(), typ, t.less) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + return out, removed +} + +// Get returns the value for the given key in the tree, or the zero value if the +// key is not in the tree. +// +// To distinguish a zero value from a key that is not present, use GetWithIndex. +func (t *BTree) Get(k Key) Value { + var z Value + if t.root == nil { + return z + } + item, ok, _ := t.root.get(k, false, t.less) + if !ok { + return z + } + return item.value +} + +// GetWithIndex returns the value and index for the given key in the tree, or the +// zero value and -1 if the key is not in the tree. +func (t *BTree) GetWithIndex(k Key) (Value, int) { + var z Value + if t.root == nil { + return z, -1 + } + item, _, index := t.root.get(k, true, t.less) + return item.value, index +} + +// At returns the key and value at index i. The minimum item has index 0. +// If i is outside the range [0, t.Len()), At panics. +func (t *BTree) At(i int) (Key, Value) { + if i < 0 || i >= t.Len() { + panic("btree: index out of range") + } + item := t.root.at(i) + return item.key, item.value +} + +// Has reports whether the given key is in the tree. +func (t *BTree) Has(k Key) bool { + if t.root == nil { + return false + } + _, ok, _ := t.root.get(k, false, t.less) + return ok +} + +// Min returns the smallest key in the tree and its value. If the tree is empty, it +// returns zero values. +func (t *BTree) Min() (Key, Value) { + var k Key + var v Value + if t.root == nil { + return k, v + } + n := t.root + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return k, v + } + return n.items[0].key, n.items[0].value +} + +// Max returns the largest key in the tree and its value. If the tree is empty, both +// return values are zero values. +func (t *BTree) Max() (Key, Value) { + var k Key + var v Value + if t.root == nil { + return k, v + } + n := t.root + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return k, v + } + m := n.items[len(n.items)-1] + return m.key, m.value +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + if t.root == nil { + return 0 + } + return t.root.size +} + +// Before returns an iterator positioned just before k. After the first call to Next, +// the Iterator will be at k, or at the key just greater than k if k is not in the tree. +// Subsequent calls to Next will traverse the tree's items in ascending order. +func (t *BTree) Before(k Key) *Iterator { + if t.root == nil { + return &Iterator{} + } + var cs cursorStack + cs, found, idx := t.root.cursorStackForKey(k, cs, t.less) + // If we found the key, the cursor stack is pointing to it. Since that is + // the first element we want, don't advance the iterator on the initial call to Next. + // If we haven't found the key, then the top of the cursor stack is either pointing at the + // item just after k, in which case we do not want to move the iterator; or the index + // is past the end of the items slice, in which case we do. + var stay bool + top := cs[len(cs)-1] + if found { + stay = true + } else if top.index < len(top.node.items) { + stay = true + } else { + idx-- + } + return &Iterator{ + cursors: cs, + stay: stay, + descending: false, + Index: idx, + } +} + +// After returns an iterator positioned just after k. After the first call to Next, +// the Iterator will be at k, or at the key just less than k if k is not in the tree. +// Subsequent calls to Next will traverse the tree's items in descending order. +func (t *BTree) After(k Key) *Iterator { + if t.root == nil { + return &Iterator{} + } + var cs cursorStack + cs, found, idx := t.root.cursorStackForKey(k, cs, t.less) + // If we found the key, the cursor stack is pointing to it. Since that is + // the first element we want, don't advance the iterator on the initial call to Next. + // If we haven't found the key, the the cursor stack is pointing just after the first item, + // so we do want to advance. + return &Iterator{ + cursors: cs, + stay: found, + descending: true, + Index: idx, + } +} + +// BeforeIndex returns an iterator positioned just before the item with the given index. +// The iterator will traverse the tree's items in ascending order. +// If i is not in the range [0, tr.Len()], BeforeIndex panics. +// Note that it is not an error to provide an index of tr.Len(). +func (t *BTree) BeforeIndex(i int) *Iterator { + return t.indexIterator(i, false) +} + +// AfterIndex returns an iterator positioned just after the item with the given index. +// The iterator will traverse the tree's items in descending order. +// If i is not in the range [0, tr.Len()], AfterIndex panics. +// Note that it is not an error to provide an index of tr.Len(). +func (t *BTree) AfterIndex(i int) *Iterator { + return t.indexIterator(i, true) +} + +func (t *BTree) indexIterator(i int, descending bool) *Iterator { + if i < 0 || i > t.Len() { + panic("btree: index out of range") + } + if i == t.Len() { + return &Iterator{} + } + var cs cursorStack + return &Iterator{ + cursors: t.root.cursorStackForIndex(i, cs), + stay: true, + descending: descending, + Index: i, + } +} + +// An Iterator supports traversing the items in the tree. +type Iterator struct { + Key Key + Value Value + // Index is the position of the item in the tree viewed as a sequence. + // The minimum item has index zero. + Index int + + cursors cursorStack // stack of nodes with indices; last element is the top + stay bool // don't do anything on the first call to Next. + descending bool // traverse the items in descending order +} + +// Next advances the Iterator to the next item in the tree. If Next returns true, +// the Iterator's Key, Value and Index fields refer to the next item. If Next returns +// false, there are no more items and the values of Key, Value and Index are undefined. +// +// If the tree is modified during iteration, the behavior is undefined. +func (it *Iterator) Next() bool { + var more bool + switch { + case len(it.cursors) == 0: + more = false + case it.stay: + it.stay = false + more = true + case it.descending: + more = it.dec() + default: + more = it.inc() + } + if !more { + return false + } + top := it.cursors[len(it.cursors)-1] + item := top.node.items[top.index] + it.Key = item.key + it.Value = item.value + return true +} + +// When inc returns true, the top cursor on the stack refers to the new current item. +func (it *Iterator) inc() bool { + // Useful invariants for understanding this function: + // - Leaf nodes have zero children, and zero or more items. + // - Nonleaf nodes have one more child than item, and children[i] < items[i] < children[i+1]. + // - The current item in the iterator is top.node.items[top.index]. + + it.Index++ + // If we are at a non-leaf node, the current item is items[i], so + // now we want to continue with children[i+1], which must exist + // by the node invariant. We want the minimum item in that child's subtree. + top := it.cursors.incTop(1) + for len(top.node.children) > 0 { + top = cursor{top.node.children[top.index], 0} + it.cursors.push(top) + } + // Here, we are at a leaf node. top.index points to + // the new current item, if it's within the items slice. + for top.index >= len(top.node.items) { + // We've gone through everything in this node. Pop it off the stack. + it.cursors.pop() + // If the stack is now empty,we're past the last item in the tree. + if it.cursors.empty() { + return false + } + top = it.cursors.top() + // The new top's index points to a child, which we've just finished + // exploring. The next item is the one at the same index in the items slice. + } + // Here, the top cursor on the stack points to the new current item. + return true +} + +func (it *Iterator) dec() bool { + // See the invariants for inc, above. + it.Index-- + top := it.cursors.top() + // If we are at a non-leaf node, the current item is items[i], so + // now we want to continue with children[i]. We want the maximum item in that child's subtree. + for len(top.node.children) > 0 { + c := top.node.children[top.index] + top = cursor{c, len(c.items)} + it.cursors.push(top) + } + top = it.cursors.incTop(-1) + // Here, we are at a leaf node. top.index points to + // the new current item, if it's within the items slice. + for top.index < 0 { + // We've gone through everything in this node. Pop it off the stack. + it.cursors.pop() + // If the stack is now empty,we're past the last item in the tree. + if it.cursors.empty() { + return false + } + // The new top's index points to a child, which we've just finished + // exploring. That child is to the right of the item we want to advance to, + // so decrement the index. + top = it.cursors.incTop(-1) + } + return true +} + +// A cursor is effectively a pointer into a node. A stack of cursors identifies an item in the tree, +// and makes it possible to move to the next or previous item efficiently. +// +// If the cursor is on the top of the stack, its index points into the node's items slice, selecting +// the current item. Otherwise, the index points into the children slice and identifies the child +// that is next in the stack. +type cursor struct { + node *node + index int +} + +// A cursorStack is a stack of cursors, representing a path of nodes from the root of the tree. +type cursorStack []cursor + +func (s *cursorStack) push(c cursor) cursorStack { + *s = append(*s, c) + return *s +} + +func (s *cursorStack) pop() cursor { + last := len(*s) - 1 + t := (*s)[last] + *s = (*s)[:last] + return t +} + +func (s *cursorStack) top() cursor { + return (*s)[len(*s)-1] +} + +func (s *cursorStack) empty() bool { + return len(*s) == 0 +} + +// incTop increments top's index by n and returns it. +func (s *cursorStack) incTop(n int) cursor { + (*s)[len(*s)-1].index += n // Don't call top: modify the original, not a copy. + return s.top() +} diff --git a/vendor/cloud.google.com/go/internal/btree/btree_test.go b/vendor/cloud.google.com/go/internal/btree/btree_test.go new file mode 100644 index 0000000..0a12104 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/btree_test.go @@ -0,0 +1,422 @@ +// Copyright 2014 Google Inc. +// Modified 2018 by Jonathan Amsterdam (jbamsterdam@gmail.com) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package btree + +import ( + "flag" + "fmt" + "math/rand" + "os" + "sort" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +func init() { + seed := time.Now().Unix() + fmt.Println(seed) + rand.Seed(seed) +} + +type itemWithIndex struct { + Key Key + Value Value + Index int +} + +// perm returns a random permutation of n Int items in the range [0, n). +func perm(n int) []itemWithIndex { + var out []itemWithIndex + for _, v := range rand.Perm(n) { + out = append(out, itemWithIndex{v, v, v}) + } + return out +} + +// rang returns an ordered list of Int items in the range [0, n). +func rang(n int) []itemWithIndex { + var out []itemWithIndex + for i := 0; i < n; i++ { + out = append(out, itemWithIndex{i, i, i}) + } + return out +} + +// all extracts all items from an iterator. +func all(it *Iterator) []itemWithIndex { + var out []itemWithIndex + for it.Next() { + out = append(out, itemWithIndex{it.Key, it.Value, it.Index}) + } + return out +} + +// rangerev returns a reversed ordered list of Int items in the range [0, n). +func rangrev(n int) []itemWithIndex { + var out []itemWithIndex + for i := n - 1; i >= 0; i-- { + out = append(out, itemWithIndex{i, i, i}) + } + return out +} + +func reverse(s []itemWithIndex) { + for i := 0; i < len(s)/2; i++ { + s[i], s[len(s)-i-1] = s[len(s)-i-1], s[i] + } +} + +var btreeDegree = flag.Int("degree", 32, "B-Tree degree") + +func TestBTree(t *testing.T) { + tr := New(*btreeDegree, less) + const treeSize = 10000 + for i := 0; i < 10; i++ { + if min, _ := tr.Min(); min != nil { + t.Fatalf("empty min, got %+v", min) + } + if max, _ := tr.Max(); max != nil { + t.Fatalf("empty max, got %+v", max) + } + for _, m := range perm(treeSize) { + if _, ok := tr.Set(m.Key, m.Value); ok { + t.Fatal("set found item", m) + } + } + for _, m := range perm(treeSize) { + _, ok, idx := tr.SetWithIndex(m.Key, m.Value) + if !ok { + t.Fatal("set didn't find item", m) + } + if idx != m.Index { + t.Fatalf("got index %d, want %d", idx, m.Index) + } + } + mink, minv := tr.Min() + if want := 0; mink != want || minv != want { + t.Fatalf("min: want %+v, got %+v, %+v", want, mink, minv) + } + maxk, maxv := tr.Max() + if want := treeSize - 1; maxk != want || maxv != want { + t.Fatalf("max: want %+v, got %+v, %+v", want, maxk, maxv) + } + got := all(tr.BeforeIndex(0)) + want := rang(treeSize) + if !cmp.Equal(got, want) { + t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) + } + + for _, m := range perm(treeSize) { + if _, removed := tr.Delete(m.Key); !removed { + t.Fatalf("didn't find %v", m) + } + } + if got = all(tr.BeforeIndex(0)); len(got) > 0 { + t.Fatalf("some left!: %v", got) + } + } +} + +func TestAt(t *testing.T) { + tr := New(*btreeDegree, less) + for _, m := range perm(100) { + tr.Set(m.Key, m.Value) + } + for i := 0; i < tr.Len(); i++ { + gotk, gotv := tr.At(i) + if want := i; gotk != want || gotv != want { + t.Fatalf("At(%d) = (%v, %v), want (%v, %v)", i, gotk, gotv, want, want) + } + } +} + +func TestGetWithIndex(t *testing.T) { + tr := New(*btreeDegree, less) + for _, m := range perm(100) { + tr.Set(m.Key, m.Value) + } + for i := 0; i < tr.Len(); i++ { + gotv, goti := tr.GetWithIndex(i) + wantv, wanti := i, i + if gotv != wantv || goti != wanti { + t.Errorf("GetWithIndex(%d) = (%v, %v), want (%v, %v)", + i, gotv, goti, wantv, wanti) + } + } + _, got := tr.GetWithIndex(100) + if want := -1; got != want { + t.Errorf("got %d, want %d", got, want) + } +} + +func TestSetWithIndex(t *testing.T) { + tr := New(4, less) // use a small degree to cover more cases + var contents []int + for _, m := range perm(100) { + _, _, idx := tr.SetWithIndex(m.Key, m.Value) + contents = append(contents, m.Index) + sort.Ints(contents) + want := -1 + for i, c := range contents { + if c == m.Index { + want = i + break + } + } + if idx != want { + t.Fatalf("got %d, want %d", idx, want) + } + } +} + +func TestDeleteMin(t *testing.T) { + tr := New(3, less) + for _, m := range perm(100) { + tr.Set(m.Key, m.Value) + } + var got []itemWithIndex + for i := 0; tr.Len() > 0; i++ { + k, v := tr.DeleteMin() + got = append(got, itemWithIndex{k, v, i}) + } + if want := rang(100); !cmp.Equal(got, want) { + t.Fatalf("got: %v\nwant: %v", got, want) + } +} + +func TestDeleteMax(t *testing.T) { + tr := New(3, less) + for _, m := range perm(100) { + tr.Set(m.Key, m.Value) + } + var got []itemWithIndex + for tr.Len() > 0 { + k, v := tr.DeleteMax() + got = append(got, itemWithIndex{k, v, tr.Len()}) + } + reverse(got) + if want := rang(100); !cmp.Equal(got, want) { + t.Fatalf("got: %v\nwant: %v", got, want) + } +} + +func TestIterator(t *testing.T) { + const size = 10 + + tr := New(2, less) + // Empty tree. + for i, it := range []*Iterator{ + tr.BeforeIndex(0), + tr.Before(3), + tr.After(3), + } { + if got, want := it.Next(), false; got != want { + t.Errorf("empty, #%d: got %t, want %t", i, got, want) + } + } + + // Root with zero children. + tr.Set(1, nil) + tr.Delete(1) + if !(tr.root != nil && len(tr.root.children) == 0 && len(tr.root.items) == 0) { + t.Fatal("wrong shape tree") + } + for i, it := range []*Iterator{ + tr.BeforeIndex(0), + tr.Before(3), + tr.After(3), + } { + if got, want := it.Next(), false; got != want { + t.Errorf("zero root, #%d: got %t, want %t", i, got, want) + } + } + + // Tree with size elements. + p := perm(size) + for _, v := range p { + tr.Set(v.Key, v.Value) + } + + it := tr.BeforeIndex(0) + got := all(it) + want := rang(size) + if !cmp.Equal(got, want) { + t.Fatalf("got %+v\nwant %+v\n", got, want) + } + + for i, w := range want { + it := tr.Before(w.Key) + got = all(it) + wn := want[w.Key.(int):] + if !cmp.Equal(got, wn) { + t.Fatalf("got %+v\nwant %+v\n", got, wn) + } + + it = tr.BeforeIndex(i) + got = all(it) + if !cmp.Equal(got, wn) { + t.Fatalf("got %+v\nwant %+v\n", got, wn) + } + + it = tr.After(w.Key) + got = all(it) + wn = append([]itemWithIndex(nil), want[:w.Key.(int)+1]...) + reverse(wn) + if !cmp.Equal(got, wn) { + t.Fatalf("got %+v\nwant %+v\n", got, wn) + } + + it = tr.AfterIndex(i) + got = all(it) + if !cmp.Equal(got, wn) { + t.Fatalf("got %+v\nwant %+v\n", got, wn) + } + } + + // Non-existent keys. + tr = New(2, less) + for _, v := range p { + tr.Set(v.Key.(int)*2, v.Value) + } + // tr has only even keys: 0, 2, 4, ... Iterate from odd keys. + for i := -1; i <= size+1; i += 2 { + it := tr.Before(i) + got := all(it) + var want []itemWithIndex + for j := (i + 1) / 2; j < size; j++ { + want = append(want, itemWithIndex{j * 2, j, j}) + } + if !cmp.Equal(got, want) { + tr.print(os.Stdout) + t.Fatalf("%d: got %+v\nwant %+v\n", i, got, want) + } + + it = tr.After(i) + got = all(it) + want = nil + for j := (i - 1) / 2; j >= 0; j-- { + want = append(want, itemWithIndex{j * 2, j, j}) + } + if !cmp.Equal(got, want) { + t.Fatalf("%d: got %+v\nwant %+v\n", i, got, want) + } + } +} + +func TestMixed(t *testing.T) { + // Test random, mixed insertions and deletions. + const maxSize = 1000 + tr := New(3, less) + has := map[int]bool{} + for i := 0; i < 10000; i++ { + r := rand.Intn(maxSize) + if r >= tr.Len() { + old, ok := tr.Set(r, r) + if has[r] != ok { + t.Fatalf("%d: has=%t, ok=%t", r, has[r], ok) + } + if ok && old.(int) != r { + t.Fatalf("%d: bad old", r) + } + has[r] = true + if got, want := tr.Get(r), r; got != want { + t.Fatalf("Get(%d) = %d, want %d", r, got, want) + } + } else { + // Expoit random map iteration order. + var d int + for d = range has { + break + } + old, removed := tr.Delete(d) + if !removed { + t.Fatalf("%d not found", d) + } + if old.(int) != d { + t.Fatalf("%d: bad old", d) + } + delete(has, d) + } + } +} + +const cloneTestSize = 10000 + +func cloneTest(t *testing.T, b *BTree, start int, p []itemWithIndex, wg *sync.WaitGroup, treec chan<- *BTree) { + treec <- b + for i := start; i < cloneTestSize; i++ { + b.Set(p[i].Key, p[i].Value) + if i%(cloneTestSize/5) == 0 { + wg.Add(1) + go cloneTest(t, b.Clone(), i+1, p, wg, treec) + } + } + wg.Done() +} + +func TestCloneConcurrentOperations(t *testing.T) { + b := New(*btreeDegree, less) + treec := make(chan *BTree) + p := perm(cloneTestSize) + var wg sync.WaitGroup + wg.Add(1) + go cloneTest(t, b, 0, p, &wg, treec) + var trees []*BTree + donec := make(chan struct{}) + go func() { + for t := range treec { + trees = append(trees, t) + } + close(donec) + }() + wg.Wait() + close(treec) + <-donec + want := rang(cloneTestSize) + for i, tree := range trees { + if !cmp.Equal(want, all(tree.BeforeIndex(0))) { + t.Errorf("tree %v mismatch", i) + } + } + toRemove := rang(cloneTestSize)[cloneTestSize/2:] + for i := 0; i < len(trees)/2; i++ { + tree := trees[i] + wg.Add(1) + go func() { + for _, m := range toRemove { + tree.Delete(m.Key) + } + wg.Done() + }() + } + wg.Wait() + for i, tree := range trees { + var wantpart []itemWithIndex + if i < len(trees)/2 { + wantpart = want[:cloneTestSize/2] + } else { + wantpart = want + } + if got := all(tree.BeforeIndex(0)); !cmp.Equal(wantpart, got) { + t.Errorf("tree %v mismatch, want %v got %v", i, len(want), len(got)) + } + } +} + +func less(a, b interface{}) bool { return a.(int) < b.(int) } diff --git a/vendor/cloud.google.com/go/internal/btree/debug.go b/vendor/cloud.google.com/go/internal/btree/debug.go new file mode 100644 index 0000000..b983cd0 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/debug.go @@ -0,0 +1,37 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package btree + +import ( + "fmt" + "io" + "strings" +) + +func (t *BTree) print(w io.Writer) { + t.root.print(w, 0) +} + +func (n *node) print(w io.Writer, level int) { + indent := strings.Repeat(" ", level) + if n == nil { + fmt.Fprintf(w, "%s\n", indent) + return + } + fmt.Fprintf(w, "%s%v\n", indent, n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} diff --git a/vendor/cloud.google.com/go/internal/fields/fields.go b/vendor/cloud.google.com/go/internal/fields/fields.go new file mode 100644 index 0000000..882820f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fields.go @@ -0,0 +1,468 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fields provides a view of the fields of a struct that follows the Go +// rules, amended to consider tags and case insensitivity. +// +// Usage +// +// First define a function that interprets tags: +// +// func parseTag(st reflect.StructTag) (name string, keep bool, other interface{}, err error) { ... } +// +// The function's return values describe whether to ignore the field +// completely or provide an alternate name, as well as other data from the +// parse that is stored to avoid re-parsing. +// +// Then define a function to validate the type: +// +// func validate(t reflect.Type) error { ... } +// +// Then, if necessary, define a function to specify leaf types - types +// which should be considered one field and not be recursed into: +// +// func isLeafType(t reflect.Type) bool { ... } +// +// eg: +// +// func isLeafType(t reflect.Type) bool { +// return t == reflect.TypeOf(time.Time{}) +// } +// +// Next, construct a Cache, passing your functions. As its name suggests, a +// Cache remembers validation and field information for a type, so subsequent +// calls with the same type are very fast. +// +// cache := fields.NewCache(parseTag, validate, isLeafType) +// +// To get the fields of a struct type as determined by the above rules, call +// the Fields method: +// +// fields, err := cache.Fields(reflect.TypeOf(MyStruct{})) +// +// The return value can be treated as a slice of Fields. +// +// Given a string, such as a key or column name obtained during unmarshalling, +// call Match on the list of fields to find a field whose name is the best +// match: +// +// field := fields.Match(name) +// +// Match looks for an exact match first, then falls back to a case-insensitive +// comparison. +package fields + +import ( + "bytes" + "errors" + "reflect" + "sort" + "strings" + + "cloud.google.com/go/internal/atomiccache" +) + +// A Field records information about a struct field. +type Field struct { + Name string // effective field name + NameFromTag bool // did Name come from a tag? + Type reflect.Type // field type + Index []int // index sequence, for reflect.Value.FieldByIndex + ParsedTag interface{} // third return value of the parseTag function + + nameBytes []byte + equalFold func(s, t []byte) bool +} + +type ParseTagFunc func(reflect.StructTag) (name string, keep bool, other interface{}, err error) + +type ValidateFunc func(reflect.Type) error + +type LeafTypesFunc func(reflect.Type) bool + +// A Cache records information about the fields of struct types. +// +// A Cache is safe for use by multiple goroutines. +type Cache struct { + parseTag ParseTagFunc + validate ValidateFunc + leafTypes LeafTypesFunc + cache atomiccache.Cache // from reflect.Type to cacheValue +} + +// NewCache constructs a Cache. +// +// Its first argument should be a function that accepts +// a struct tag and returns four values: an alternative name for the field +// extracted from the tag, a boolean saying whether to keep the field or ignore +// it, additional data that is stored with the field information to avoid +// having to parse the tag again, and an error. +// +// Its second argument should be a function that accepts a reflect.Type and +// returns an error if the struct type is invalid in any way. For example, it +// may check that all of the struct field tags are valid, or that all fields +// are of an appropriate type. +func NewCache(parseTag ParseTagFunc, validate ValidateFunc, leafTypes LeafTypesFunc) *Cache { + if parseTag == nil { + parseTag = func(reflect.StructTag) (string, bool, interface{}, error) { + return "", true, nil, nil + } + } + if validate == nil { + validate = func(reflect.Type) error { + return nil + } + } + if leafTypes == nil { + leafTypes = func(reflect.Type) bool { + return false + } + } + + return &Cache{ + parseTag: parseTag, + validate: validate, + leafTypes: leafTypes, + } +} + +// A fieldScan represents an item on the fieldByNameFunc scan work list. +type fieldScan struct { + typ reflect.Type + index []int +} + +// Fields returns all the exported fields of t, which must be a struct type. It +// follows the standard Go rules for embedded fields, modified by the presence +// of tags. The result is sorted lexicographically by index. +// +// These rules apply in the absence of tags: +// Anonymous struct fields are treated as if their inner exported fields were +// fields in the outer struct (embedding). The result includes all fields that +// aren't shadowed by fields at higher level of embedding. If more than one +// field with the same name exists at the same level of embedding, it is +// excluded. An anonymous field that is not of struct type is treated as having +// its type as its name. +// +// Tags modify these rules as follows: +// A field's tag is used as its name. +// An anonymous struct field with a name given in its tag is treated as +// a field having that name, rather than an embedded struct (the struct's +// fields will not be returned). +// If more than one field with the same name exists at the same level of embedding, +// but exactly one of them is tagged, then the tagged field is reported and the others +// are ignored. +func (c *Cache) Fields(t reflect.Type) (List, error) { + if t.Kind() != reflect.Struct { + panic("fields: Fields of non-struct type") + } + return c.cachedTypeFields(t) +} + +// A List is a list of Fields. +type List []Field + +// Match returns the field in the list whose name best matches the supplied +// name, nor nil if no field does. If there is a field with the exact name, it +// is returned. Otherwise the first field (sorted by index) whose name matches +// case-insensitively is returned. +func (l List) Match(name string) *Field { + return l.MatchBytes([]byte(name)) +} + +// MatchBytes is identical to Match, except that the argument is a byte slice. +func (l List) MatchBytes(name []byte) *Field { + var f *Field + for i := range l { + ff := &l[i] + if bytes.Equal(ff.nameBytes, name) { + return ff + } + if f == nil && ff.equalFold(ff.nameBytes, name) { + f = ff + } + } + return f +} + +type cacheValue struct { + fields List + err error +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +// This code has been copied and modified from +// https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/encode.go. +func (c *Cache) cachedTypeFields(t reflect.Type) (List, error) { + cv := c.cache.Get(t, func() interface{} { + if err := c.validate(t); err != nil { + return cacheValue{nil, err} + } + f, err := c.typeFields(t) + return cacheValue{List(f), err} + }).(cacheValue) + return cv.fields, cv.err +} + +func (c *Cache) typeFields(t reflect.Type) ([]Field, error) { + fields, err := c.listFields(t) + if err != nil { + return nil, err + } + sort.Sort(byName(fields)) + // Delete all fields that are hidden by the Go rules for embedded fields. + + // The fields are sorted in primary order of name, secondary order of field + // index length. So the first field with a given name is the dominant one. + var out []Field + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.Name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.Name != name { + break + } + } + // Find the dominant field, if any, out of all fields that have the same name. + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + sort.Sort(byIndex(out)) + return out, nil +} + +func (c *Cache) listFields(t reflect.Type) ([]Field, error) { + // This uses the same condition that the Go language does: there must be a unique instance + // of the match at a given depth level. If there are multiple instances of a match at the + // same depth, they annihilate each other and inhibit any possible match at a lower level. + // The algorithm is breadth first search, one depth level at a time. + + // The current and next slices are work queues: + // current lists the fields to visit on this depth level, + // and next lists the fields on the next lower level. + current := []fieldScan{} + next := []fieldScan{{typ: t}} + + // nextCount records the number of times an embedded type has been + // encountered and considered for queueing in the 'next' slice. + // We only queue the first one, but we increment the count on each. + // If a struct type T can be reached more than once at a given depth level, + // then it annihilates itself and need not be considered at all when we + // process that next depth level. + var nextCount map[reflect.Type]int + + // visited records the structs that have been considered already. + // Embedded pointer fields can create cycles in the graph of + // reachable embedded types; visited avoids following those cycles. + // It also avoids duplicated effort: if we didn't find the field in an + // embedded type T at level 2, we won't find it in one at level 4 either. + visited := map[reflect.Type]bool{} + + var fields []Field // Fields found. + + for len(next) > 0 { + current, next = next, current[:0] + count := nextCount + nextCount = nil + + // Process all the fields at this depth, now listed in 'current'. + // The loop queues embedded fields found in 'next', for processing during the next + // iteration. The multiplicity of the 'current' field counts is recorded + // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. + for _, scan := range current { + t := scan.typ + if visited[t] { + // We've looked through this type before, at a higher level. + // That higher level would shadow the lower level we're now at, + // so this one can't be useful to us. Ignore it. + continue + } + visited[t] = true + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + exported := (f.PkgPath == "") + + // If a named field is unexported, ignore it. An anonymous + // unexported field is processed, because it may contain + // exported fields, which are visible. + if !exported && !f.Anonymous { + continue + } + + // Examine the tag. + tagName, keep, other, err := c.parseTag(f.Tag) + if err != nil { + return nil, err + } + if !keep { + continue + } + if c.leafTypes(f.Type) { + fields = append(fields, newField(f, tagName, other, scan.index, i)) + continue + } + + var ntyp reflect.Type + if f.Anonymous { + // Anonymous field of type T or *T. + ntyp = f.Type + if ntyp.Kind() == reflect.Ptr { + ntyp = ntyp.Elem() + } + } + + // Record fields with a tag name, non-anonymous fields, or + // anonymous non-struct fields. + if tagName != "" || ntyp == nil || ntyp.Kind() != reflect.Struct { + if !exported { + continue + } + fields = append(fields, newField(f, tagName, other, scan.index, i)) + if count[t] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Queue embedded struct fields for processing with next level, + // but only if the embedded types haven't already been queued. + if nextCount[ntyp] > 0 { + nextCount[ntyp] = 2 // exact multiple doesn't matter + continue + } + if nextCount == nil { + nextCount = map[reflect.Type]int{} + } + nextCount[ntyp] = 1 + if count[t] > 1 { + nextCount[ntyp] = 2 // exact multiple doesn't matter + } + var index []int + index = append(index, scan.index...) + index = append(index, i) + next = append(next, fieldScan{ntyp, index}) + } + } + } + return fields, nil +} + +func newField(f reflect.StructField, tagName string, other interface{}, index []int, i int) Field { + name := tagName + if name == "" { + name = f.Name + } + sf := Field{ + Name: name, + NameFromTag: tagName != "", + Type: f.Type, + ParsedTag: other, + nameBytes: []byte(name), + } + sf.equalFold = foldFunc(sf.nameBytes) + sf.Index = append(sf.Index, index...) + sf.Index = append(sf.Index, i) + return sf +} + +// byName sorts fields using the following criteria, in order: +// 1. name +// 2. embedding depth +// 3. tag presence (preferring a tagged field) +// 4. index sequence. +type byName []Field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].Name != x[j].Name { + return x[i].Name < x[j].Name + } + if len(x[i].Index) != len(x[j].Index) { + return len(x[i].Index) < len(x[j].Index) + } + if x[i].NameFromTag != x[j].NameFromTag { + return x[i].NameFromTag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []Field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + xi := x[i].Index + xj := x[j].Index + ln := len(xi) + if l := len(xj); l < ln { + ln = l + } + for k := 0; k < ln; k++ { + if xi[k] != xj[k] { + return xi[k] < xj[k] + } + } + return len(xi) < len(xj) +} + +// dominantField looks through the fields, all of which are known to have the +// same name, to find the single field that dominates the others using Go's +// embedding rules, modified by the presence of tags. If there are multiple +// top-level fields, the boolean will be false: This condition is an error in +// Go and we skip all the fields. +func dominantField(fs []Field) (Field, bool) { + // The fields are sorted in increasing index-length order, then by presence of tag. + // That means that the first field is the dominant one. We need only check + // for error cases: two fields at top level, either both tagged or neither tagged. + if len(fs) > 1 && len(fs[0].Index) == len(fs[1].Index) && fs[0].NameFromTag == fs[1].NameFromTag { + return Field{}, false + } + return fs[0], true +} + +// ParseStandardTag extracts the sub-tag named by key, then parses it using the +// de facto standard format introduced in encoding/json: +// "-" means "ignore this tag". It must occur by itself. (parseStandardTag returns an error +// in this case, whereas encoding/json accepts the "-" even if it is not alone.) +// "" provides an alternative name for the field +// ",opt1,opt2,..." specifies options after the name. +// The options are returned as a []string. +func ParseStandardTag(key string, t reflect.StructTag) (name string, keep bool, options []string, err error) { + s := t.Get(key) + parts := strings.Split(s, ",") + if parts[0] == "-" { + if len(parts) > 1 { + return "", false, nil, errors.New(`"-" field tag with options`) + } + return "", false, nil, nil + } + if len(parts) > 1 { + options = parts[1:] + } + return parts[0], true, options, nil +} diff --git a/vendor/cloud.google.com/go/internal/fields/fields_test.go b/vendor/cloud.google.com/go/internal/fields/fields_test.go new file mode 100644 index 0000000..925765d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fields_test.go @@ -0,0 +1,563 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" +) + +type embed1 struct { + Em1 int + Dup int // annihilates with embed2.Dup + Shadow int + embed3 +} + +type embed2 struct { + Dup int + embed3 + embed4 +} + +type embed3 struct { + Em3 int // annihilated because embed3 is in both embed1 and embed2 + embed5 +} + +type embed4 struct { + Em4 int + Dup int // annihilation of Dup in embed1, embed2 hides this Dup + *embed1 // ignored because it occurs at a higher level +} + +type embed5 struct { + x int +} + +type Anonymous int + +type S1 struct { + Exported int + unexported int + Shadow int // shadows S1.Shadow + embed1 + *embed2 + Anonymous +} + +type Time struct { + time.Time +} + +var intType = reflect.TypeOf(int(0)) + +func field(name string, tval interface{}, index ...int) *Field { + return &Field{ + Name: name, + Type: reflect.TypeOf(tval), + Index: index, + ParsedTag: []string(nil), + } +} + +func tfield(name string, tval interface{}, index ...int) *Field { + return &Field{ + Name: name, + Type: reflect.TypeOf(tval), + Index: index, + NameFromTag: true, + ParsedTag: []string(nil), + } +} + +func TestFieldsNoTags(t *testing.T) { + c := NewCache(nil, nil, nil) + got, err := c.Fields(reflect.TypeOf(S1{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + field("Exported", int(0), 0), + field("Shadow", int(0), 2), + field("Em1", int(0), 3, 0), + field("Em4", int(0), 4, 2, 0), + field("Anonymous", Anonymous(0), 5), + } + for _, f := range want { + f.ParsedTag = nil + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestAgainstJSONEncodingNoTags(t *testing.T) { + // Demonstrates that this package produces the same set of fields as encoding/json. + s1 := S1{ + Exported: 1, + unexported: 2, + Shadow: 3, + embed1: embed1{ + Em1: 4, + Dup: 5, + Shadow: 6, + embed3: embed3{ + Em3: 7, + embed5: embed5{x: 8}, + }, + }, + embed2: &embed2{ + Dup: 9, + embed3: embed3{ + Em3: 10, + embed5: embed5{x: 11}, + }, + embed4: embed4{ + Em4: 12, + Dup: 13, + embed1: &embed1{Em1: 14}, + }, + }, + Anonymous: Anonymous(15), + } + var want S1 + want.embed2 = &embed2{} // need this because reflection won't create it + jsonRoundTrip(t, s1, &want) + var got S1 + got.embed2 = &embed2{} + fields, err := NewCache(nil, nil, nil).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, s1) + if !testutil.Equal(got, want, + cmp.AllowUnexported(S1{}, embed1{}, embed2{}, embed3{}, embed4{}, embed5{})) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +// Tests use of LeafTypes parameter to NewCache +func TestAgainstJSONEncodingEmbeddedTime(t *testing.T) { + timeLeafFn := func(t reflect.Type) bool { + return t == reflect.TypeOf(time.Time{}) + } + // Demonstrates that this package can produce the same set of + // fields as encoding/json for a struct with an embedded time.Time. + now := time.Now().UTC() + myt := Time{ + now, + } + var want Time + jsonRoundTrip(t, myt, &want) + var got Time + fields, err := NewCache(nil, nil, timeLeafFn).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, myt) + if !testutil.Equal(got, want) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +type S2 struct { + NoTag int + XXX int `json:"tag"` // tag name takes precedence + Anonymous `json:"anon"` // anonymous non-structs also get their name from the tag + unexported int `json:"tag"` + Embed `json:"em"` // embedded structs with tags become fields + Tag int + YYY int `json:"Tag"` // tag takes precedence over untagged field of the same name + Empty int `json:""` // empty tag is noop + tEmbed1 + tEmbed2 +} + +type Embed struct { + Em int +} + +type tEmbed1 struct { + Dup int + X int `json:"Dup2"` +} + +type tEmbed2 struct { + Y int `json:"Dup"` // takes precedence over tEmbed1.Dup because it is tagged + Z int `json:"Dup2"` // same name as tEmbed1.X and both tagged, so ignored +} + +func jsonTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + return ParseStandardTag("json", t) +} + +func validateFunc(t reflect.Type) (err error) { + if t.Kind() != reflect.Struct { + return errors.New("non-struct type used") + } + + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() == reflect.Slice { + return fmt.Errorf("slice field found at field %s on struct %s", t.Field(i).Name, t.Name()) + } + } + + return nil +} + +func TestFieldsWithTags(t *testing.T) { + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + field("NoTag", int(0), 0), + tfield("tag", int(0), 1), + tfield("anon", Anonymous(0), 2), + tfield("em", Embed{}, 4), + tfield("Tag", int(0), 6), + field("Empty", int(0), 7), + tfield("Dup", int(0), 8, 0), + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestAgainstJSONEncodingWithTags(t *testing.T) { + // Demonstrates that this package produces the same set of fields as encoding/json. + s2 := S2{ + NoTag: 1, + XXX: 2, + Anonymous: 3, + Embed: Embed{ + Em: 4, + }, + tEmbed1: tEmbed1{ + Dup: 5, + X: 6, + }, + tEmbed2: tEmbed2{ + Y: 7, + Z: 8, + }, + } + var want S2 + jsonRoundTrip(t, s2, &want) + var got S2 + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, s2) + if !testutil.Equal(got, want, cmp.AllowUnexported(S2{})) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +func TestUnexportedAnonymousNonStruct(t *testing.T) { + // An unexported anonymous non-struct field should not be recorded. + // This is currently a bug in encoding/json. + // https://github.com/golang/go/issues/18009 + type ( + u int + v int + S struct { + u + v `json:"x"` + int + } + ) + + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestUnexportedAnonymousStruct(t *testing.T) { + // An unexported anonymous struct with a tag is ignored. + // This is currently a bug in encoding/json. + // https://github.com/golang/go/issues/18009 + type ( + s1 struct{ X int } + S2 struct { + s1 `json:"Y"` + } + ) + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestDominantField(t *testing.T) { + // With fields sorted by index length and then by tag presence, + // the dominant field is always the first. Make sure all error + // cases are caught. + for _, test := range []struct { + fields []Field + wantOK bool + }{ + // A single field is OK. + {[]Field{{Index: []int{0}}}, true}, + {[]Field{{Index: []int{0}, NameFromTag: true}}, true}, + // A single field at top level is OK. + {[]Field{{Index: []int{0}}, {Index: []int{1, 0}}}, true}, + {[]Field{{Index: []int{0}}, {Index: []int{1, 0}, NameFromTag: true}}, true}, + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1, 0}, NameFromTag: true}}, true}, + // A single tagged field is OK. + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1}}}, true}, + // Two untagged fields at the same level is an error. + {[]Field{{Index: []int{0}}, {Index: []int{1}}}, false}, + // Two tagged fields at the same level is an error. + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1}, NameFromTag: true}}, false}, + } { + _, gotOK := dominantField(test.fields) + if gotOK != test.wantOK { + t.Errorf("%v: got %t, want %t", test.fields, gotOK, test.wantOK) + } + } +} + +func TestIgnore(t *testing.T) { + type S struct { + X int `json:"-"` + } + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestParsedTag(t *testing.T) { + type S struct { + X int `json:"name,omitempty"` + } + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + {Name: "name", NameFromTag: true, Type: intType, + Index: []int{0}, ParsedTag: []string{"omitempty"}}, + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestValidateFunc(t *testing.T) { + type MyInvalidStruct struct { + A string + B []int + } + + _, err := NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyInvalidStruct{})) + if err == nil { + t.Fatal("expected error, got nil") + } + + type MyValidStruct struct { + A string + B int + } + _, err = NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyValidStruct{})) + if err != nil { + t.Fatalf("expected nil, got error: %s\n", err) + } +} + +func compareFields(got []Field, want []*Field) (msg string, ok bool) { + if len(got) != len(want) { + return fmt.Sprintf("got %d fields, want %d", len(got), len(want)), false + } + for i, g := range got { + w := *want[i] + if !fieldsEqual(&g, &w) { + return fmt.Sprintf("got\n%+v\nwant\n%+v", g, w), false + } + } + return "", true +} + +// Need this because Field contains a function, which cannot be compared even +// by testutil.Equal. +func fieldsEqual(f1, f2 *Field) bool { + if f1 == nil || f2 == nil { + return f1 == f2 + } + return f1.Name == f2.Name && + f1.NameFromTag == f2.NameFromTag && + f1.Type == f2.Type && + testutil.Equal(f1.ParsedTag, f2.ParsedTag) +} + +// Set the fields of dst from those of src. +// dst must be a pointer to a struct value. +// src must be a struct value. +func setFields(fields []Field, dst, src interface{}) { + vsrc := reflect.ValueOf(src) + vdst := reflect.ValueOf(dst).Elem() + for _, f := range fields { + fdst := vdst.FieldByIndex(f.Index) + fsrc := vsrc.FieldByIndex(f.Index) + fdst.Set(fsrc) + } +} + +func jsonRoundTrip(t *testing.T, in, out interface{}) { + bytes, err := json.Marshal(in) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(bytes, out); err != nil { + t.Fatal(err) + } +} + +type S3 struct { + S4 + Abc int + AbC int + Tag int + X int `json:"Tag"` + unexported int +} + +type S4 struct { + ABc int + Y int `json:"Abc"` // ignored because of top-level Abc +} + +func TestMatchingField(t *testing.T) { + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + name string + want *Field + }{ + // Exact match wins. + {"Abc", field("Abc", int(0), 1)}, + {"AbC", field("AbC", int(0), 2)}, + {"ABc", field("ABc", int(0), 0, 0)}, + // If there are multiple matches but no exact match or tag, + // the first field wins, lexicographically by index. + // Here, "ABc" is at a deeper embedding level, but since S4 appears + // first in S3, its index precedes the other fields of S3. + {"abc", field("ABc", int(0), 0, 0)}, + // Tag name takes precedence over untagged field of the same name. + {"Tag", tfield("Tag", int(0), 4)}, + // Unexported fields disappear. + {"unexported", nil}, + // Untagged embedded structs disappear. + {"S4", nil}, + } { + if got := fields.Match(test.name); !fieldsEqual(got, test.want) { + t.Errorf("match %q:\ngot %+v\nwant %+v", test.name, got, test.want) + } + } +} + +func TestAgainstJSONMatchingField(t *testing.T) { + s3 := S3{ + S4: S4{ABc: 1, Y: 2}, + Abc: 3, + AbC: 4, + Tag: 5, + X: 6, + unexported: 7, + } + var want S3 + jsonRoundTrip(t, s3, &want) + v := reflect.ValueOf(want) + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + name string + got int + }{ + {"Abc", 3}, + {"AbC", 4}, + {"ABc", 1}, + {"abc", 1}, + {"Tag", 6}, + } { + f := fields.Match(test.name) + if f == nil { + t.Fatalf("%s: no match", test.name) + } + w := v.FieldByIndex(f.Index).Interface() + if test.got != w { + t.Errorf("%s: got %d, want %d", test.name, test.got, w) + } + } +} + +func TestTagErrors(t *testing.T) { + called := false + c := NewCache(func(t reflect.StructTag) (string, bool, interface{}, error) { + called = true + s := t.Get("f") + if s == "bad" { + return "", false, nil, errors.New("error") + } + return s, true, nil, nil + }, nil, nil) + + type T struct { + X int `f:"ok"` + Y int `f:"bad"` + } + + _, err := c.Fields(reflect.TypeOf(T{})) + if !called { + t.Fatal("tag parser not called") + } + if err == nil { + t.Error("want error, got nil") + } + // Second time, we should cache the error. + called = false + _, err = c.Fields(reflect.TypeOf(T{})) + if called { + t.Fatal("tag parser called on second time") + } + if err == nil { + t.Error("want error, got nil") + } +} diff --git a/vendor/cloud.google.com/go/internal/fields/fold.go b/vendor/cloud.google.com/go/internal/fields/fold.go new file mode 100644 index 0000000..10a6818 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fold.go @@ -0,0 +1,156 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +// This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold.go. +// Only the license and package were changed. + +import ( + "bytes" + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'Å¿' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See https://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/vendor/cloud.google.com/go/internal/fields/fold_test.go b/vendor/cloud.google.com/go/internal/fields/fold_test.go new file mode 100644 index 0000000..eadded1 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fold_test.go @@ -0,0 +1,129 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +// This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold_test.go. +// Only the license and package were changed. + +import ( + "bytes" + "strings" + "testing" + "unicode/utf8" +) + +var foldTests = []struct { + fn func(s, t []byte) bool + s, t string + want bool +}{ + {equalFoldRight, "", "", true}, + {equalFoldRight, "a", "a", true}, + {equalFoldRight, "", "a", false}, + {equalFoldRight, "a", "", false}, + {equalFoldRight, "a", "A", true}, + {equalFoldRight, "AB", "ab", true}, + {equalFoldRight, "AB", "ac", false}, + {equalFoldRight, "sbkKc", "Å¿bKKc", true}, + {equalFoldRight, "SbKkc", "Å¿bKKc", true}, + {equalFoldRight, "SbKkc", "Å¿bKK", false}, + {equalFoldRight, "e", "é", false}, + {equalFoldRight, "s", "S", true}, + + {simpleLetterEqualFold, "", "", true}, + {simpleLetterEqualFold, "abc", "abc", true}, + {simpleLetterEqualFold, "abc", "ABC", true}, + {simpleLetterEqualFold, "abc", "ABCD", false}, + {simpleLetterEqualFold, "abc", "xxx", false}, + + {asciiEqualFold, "a_B", "A_b", true}, + {asciiEqualFold, "aa@", "aa`", false}, // verify 0x40 and 0x60 aren't case-equivalent +} + +func TestFold(t *testing.T) { + for i, tt := range foldTests { + if got := tt.fn([]byte(tt.s), []byte(tt.t)); got != tt.want { + t.Errorf("%d. %q, %q = %v; want %v", i, tt.s, tt.t, got, tt.want) + } + truth := strings.EqualFold(tt.s, tt.t) + if truth != tt.want { + t.Errorf("strings.EqualFold doesn't agree with case %d", i) + } + } +} + +func TestFoldAgainstUnicode(t *testing.T) { + const bufSize = 5 + buf1 := make([]byte, 0, bufSize) + buf2 := make([]byte, 0, bufSize) + var runes []rune + for i := 0x20; i <= 0x7f; i++ { + runes = append(runes, rune(i)) + } + runes = append(runes, kelvin, smallLongEss) + + funcs := []struct { + name string + fold func(s, t []byte) bool + letter bool // must be ASCII letter + simple bool // must be simple ASCII letter (not 'S' or 'K') + }{ + { + name: "equalFoldRight", + fold: equalFoldRight, + }, + { + name: "asciiEqualFold", + fold: asciiEqualFold, + simple: true, + }, + { + name: "simpleLetterEqualFold", + fold: simpleLetterEqualFold, + simple: true, + letter: true, + }, + } + + for _, ff := range funcs { + for _, r := range runes { + if r >= utf8.RuneSelf { + continue + } + if ff.letter && !isASCIILetter(byte(r)) { + continue + } + if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') { + continue + } + for _, r2 := range runes { + buf1 := append(buf1[:0], 'x') + buf2 := append(buf2[:0], 'x') + buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)] + buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)] + buf1 = append(buf1, 'x') + buf2 = append(buf2, 'x') + want := bytes.EqualFold(buf1, buf2) + if got := ff.fold(buf1, buf2); got != want { + t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want) + } + } + } + } +} + +func isASCIILetter(b byte) bool { + return ('A' <= b && b <= 'Z') || ('a' <= b && b <= 'z') +} diff --git a/vendor/cloud.google.com/go/internal/kokoro/build.sh b/vendor/cloud.google.com/go/internal/kokoro/build.sh new file mode 100755 index 0000000..869be0d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/kokoro/build.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Fail on any error +set -eo pipefail + +# Display commands being run +set -x + +# cd to project dir on Kokoro instance +cd git/gocloud + +go version + +# Set $GOPATH +export GOPATH="$HOME/go" +GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go +mkdir -p $GOCLOUD_HOME + +# Move code into $GOPATH and get dependencies +cp -R ./* $GOCLOUD_HOME +cd $GOCLOUD_HOME +go get -v ./... + +# # Don't run integration tests until we can protect against code from +# # untrusted forks reading and storing our service account key. +# cd internal/kokoro +# # Don't print out encryption keys, etc +# set +x +# key=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_key) +# iv=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_iv) +# pass=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_pass) + +# openssl aes-256-cbc -K $key -iv $iv -pass pass:$pass -in kokoro-key.json.enc -out key.json -d +# set -x + +# export GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" +# export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" +# cd $GOCLOUD_HOME + +# Run tests and tee output to log file, to be pushed to GCS as artifact. +go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_CHANGE_NUMBER.txt + +# Make sure README.md is up to date. +make -C internal/readme test diff diff --git a/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc b/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc new file mode 100644 index 0000000000000000000000000000000000000000..b23885469fcf2d3e8fa42629fc8af94274761689 GIT binary patch literal 2448 zcmV;B32*jOVQh3|WM5y>qZ<-mbg%i}GwZgTCa$aR^oFv=WL!zlR6Ol+2_D~inxLR$(aw?a6VSu@6lC|T1CaNO1Whe+WipxV ztYtRwuSpO!F`P;TWYkxVqt7-N5*4dJwlgdJ$X=C+zb+T%#2vKK6J59^+u+x7px=6X zFxhgMjht1{C!jCCBs{8v)>2I4Byk!u>Xgp zIRTdo-~EGF-jF$vyZnIR*23b&(U(RjGkwKk=z~AgdCbW>tVOuBiB><-@q8lmM6nok zH4x30juG*7ouwkZR~^&gS4jVfms9Y^Rbx=8x7?Ivmkw{4YLG8E^!n+8nZF8I;n1U7 z{tuIkrpAD{$>XRF3&lq+$}arB6xPADnByt)U`@BUhvoEyGujn=d{srtZc8e)ZGs_R{=L8Y>Pz0KR6=P+81%gyF?ftViOmCy0MQbQU;;^ zC6*U&<2q*Q_@CVfH>qpn*%9kTj`jQ;W95-QwdMnE?Nls31&MmVwbcB?qikW}p`qD% zLXR&?2=v#Oa*i90r~;!i^~RBjiAoMXV{Sf=BiITEzC!HlBePZXW06DjojP(Ro{|Xg z^*owrfH;TYoHaFKy>X2%#Y?z?G2+ve)ZQTM7>HYuk=Y6o{*&{3hnnV&UhuLfCF8KpI@}gp>X5l-%&tc{N;!k^JjndK-%olcREx z!VhSjR;Y~w7Dw5KW5}&=-{(Ihya|LxgN~YZY^(XZ-~zs+%?%eaFpX&z*U3NZ$UvOo zA@JY!ah*7`*G|bhrweCYTT?wE2R8s?Rk^In{y$X;OW>Z+u2@|esL;zzTBFX+={i|n zX}I6MqfN>wog^$a0+0QUrA=RK0ssIl6<4trL9gO;Q`yJj`*G-2XRCb7$tqE8kvM-9 z-qK4pj5lA8*lMXH`kPe??d64%U}T^piys$=bw0_XWqgZoqcz84kEN<8XkC z2?e*+-P3yHm4MR&H=RhUX%_@U`W%+>l}N(+Hj|*~e~*I8hz5R9nG93QbuMik-B zAYCqEm7ib%?S_JEB57VZ#BV>))s*Qy>DmK8fU9@}odW8a`LdTX8G@%vsQ`6nEYF0u z@qDgz7u$>Y%8gFiUKS(&z}Vd7l7_kZ*f0$hwwc^XFH3BgZbodNnmmDH$*Bh0+Tip# zM;=6_pc`x#V-y)zCV}Uw@*w-IO;KV2XJz&HQ6D_KR8xF2LDjy!JU$$)?|rbxBt>q` zIySN>m<#O|{v4<7>upx=Aw$*_W*|l~FcJ(QzD%nW)*7F_`MRHt#KTao*xd8ciNI9m z!?%97f&ShW4QV3Fh`JQVx{R|j+~U*K?!mrxHPjZl^1l2$2{~dqyPfY+&BZ?&b{pfi zhI(22cx)l*feb!atqpx89vdvvr+eFk!vZEcZ&GD<8TePgz-0B?(`4-<7(wLbQ(!{o z%howZD$*xD5TdOoJ$g{@(-OXUZ5_rxU7c#)v_~_;%x}u5BDI@9z={O^npK}7Kl<*2 z#a_i@6zT#59p2BOgt;aKr09Ml{92_IlE6@SCkhVq6*{}q-5a;_LI{-~tpV#m9X-Kgz(S;O&T;P5;K-FbEeWPBPe!5F0Od~n{7w*7$+ zTlYg_u|eqSSu{dt0y;;-?(2Q5bY*th0WaWjIJMiTMLIY0BHn?~KUbE^0C}F2OFsWW zY-P}O%tcEnDrn{fg042LlT4{+hX3_!IsH9#7^@2P@YpZ6#&RyivXG0HudJtpUtrv% z7)0}UoA~Zl%`xc!u+3)S0 zQ<1>X79Ked2Y-qMngv#q)BEbF$Z(AwO3N`>7n0S;c33e5gB8o@u`|d4Nzi4z6XzU+ zA*$27gnJ7WMBJFgH_-S*d*~xI%ayOsR^rxe9E46>=ARc?y>mpkM=%-boVUAr+h(@y zXO;RF&9}@|Y5`Wwmh;LFdmq*}(`NP)^T9;YRHk=B4ydnZK~jur z$)D;YGn-WNv4>yo#i^5QW=o_YQ`~|v+gK5P1$)QcPr3Bs{3hLx6E8Br2(hNSslXhwF;1N(3?>DZ1F#bQy$K1F%5?M`fV zwNstwa=aZz&QB|^`Ce>z*5|;9@5oA^ZJ@MS;R7mBR3CAOA+8*t3oorKxTeau+)08) z2q~b{#_?dhcV$J@8#n+^7u7|i2#=(4Rxh+q4>-x)BL@qceqj$@PnQ9f;a;h5WR0eO z8m*WzbP0(#iyTU#=7|EkN({{Jif=_+9HEuKs*~1rYB maxLevel { + fmt.Fprintln(w, "pretty: max nested depth exceeded") + return + } + indent := strings.Repeat(Indent, s.level) + fmt.Fprintf(w, "%s%s", indent, s.prefix) + if isNil(v) { + fmt.Fprintf(w, "nil%s", s.suffix) + return + } + if v.Type().Kind() == reflect.Interface { + v = v.Elem() + } + if v.Type() == typeOfTime { + fmt.Fprintf(w, "%s%s", v.Interface(), s.suffix) + return + } + for v.Type().Kind() == reflect.Ptr { + fmt.Fprintf(w, "&") + v = v.Elem() + } + switch v.Type().Kind() { + default: + fmt.Fprintf(w, "%s%s", short(v), s.suffix) + + case reflect.Array: + fmt.Fprintf(w, "%s{\n", v.Type()) + for i := 0; i < v.Len(); i++ { + fprint(w, v.Index(i), state{ + level: s.level + 1, + prefix: "", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + fmt.Fprintf(w, "%s}", indent) + + case reflect.Slice: + fmt.Fprintf(w, "%s{", v.Type()) + if v.Len() > 0 { + fmt.Fprintln(w) + for i := 0; i < v.Len(); i++ { + fprint(w, v.Index(i), state{ + level: s.level + 1, + prefix: "", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + + case reflect.Map: + fmt.Fprintf(w, "%s{", v.Type()) + if v.Len() > 0 { + fmt.Fprintln(w) + keys := v.MapKeys() + maybeSort(keys, v.Type().Key()) + for _, key := range keys { + val := v.MapIndex(key) + if s.defaults || !isDefault(val) { + fprint(w, val, state{ + level: s.level + 1, + prefix: short(key) + ": ", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + + case reflect.Struct: + t := v.Type() + fmt.Fprintf(w, "%s{\n", t) + for i := 0; i < t.NumField(); i++ { + f := v.Field(i) + if s.defaults || !isDefault(f) { + fprint(w, f, state{ + level: s.level + 1, + prefix: t.Field(i).Name + ": ", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + } +} + +func isNil(v reflect.Value) bool { + if !v.IsValid() { + return true + } + switch v.Type().Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + default: + return false + } +} + +func isDefault(v reflect.Value) bool { + if !v.IsValid() { + return true + } + t := v.Type() + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + default: + if !v.CanInterface() { + return false + } + return t.Comparable() && v.Interface() == reflect.Zero(t).Interface() + } +} + +// short returns a short, one-line string for v. +func short(v reflect.Value) string { + if !v.IsValid() { + return "nil" + } + if v.Type().Kind() == reflect.String { + return fmt.Sprintf("%q", v) + } + return fmt.Sprintf("%v", v) +} + +func indent(w io.Writer, level int) { + for i := 0; i < level; i++ { + io.WriteString(w, Indent) // ignore errors + } +} + +func maybeSort(vs []reflect.Value, t reflect.Type) { + if less := lessFunc(t); less != nil { + sort.Sort(&sorter{vs, less}) + } +} + +// lessFunc returns a function that implements the "<" operator +// for the given type, or nil if the type doesn't support "<" . +func lessFunc(t reflect.Type) func(v1, v2 interface{}) bool { + switch t.Kind() { + case reflect.String: + return func(v1, v2 interface{}) bool { return v1.(string) < v2.(string) } + case reflect.Int: + return func(v1, v2 interface{}) bool { return v1.(int) < v2.(int) } + case reflect.Int8: + return func(v1, v2 interface{}) bool { return v1.(int8) < v2.(int8) } + case reflect.Int16: + return func(v1, v2 interface{}) bool { return v1.(int16) < v2.(int16) } + case reflect.Int32: + return func(v1, v2 interface{}) bool { return v1.(int32) < v2.(int32) } + case reflect.Int64: + return func(v1, v2 interface{}) bool { return v1.(int64) < v2.(int64) } + case reflect.Uint: + return func(v1, v2 interface{}) bool { return v1.(uint) < v2.(uint) } + case reflect.Uint8: + return func(v1, v2 interface{}) bool { return v1.(uint8) < v2.(uint8) } + case reflect.Uint16: + return func(v1, v2 interface{}) bool { return v1.(uint16) < v2.(uint16) } + case reflect.Uint32: + return func(v1, v2 interface{}) bool { return v1.(uint32) < v2.(uint32) } + case reflect.Uint64: + return func(v1, v2 interface{}) bool { return v1.(uint64) < v2.(uint64) } + case reflect.Float32: + return func(v1, v2 interface{}) bool { return v1.(float32) < v2.(float32) } + case reflect.Float64: + return func(v1, v2 interface{}) bool { return v1.(float64) < v2.(float64) } + default: + return nil + } +} + +type sorter struct { + vs []reflect.Value + less func(v1, v2 interface{}) bool +} + +func (s *sorter) Len() int { return len(s.vs) } +func (s *sorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s *sorter) Less(i, j int) bool { return s.less(s.vs[i].Interface(), s.vs[j].Interface()) } diff --git a/vendor/cloud.google.com/go/internal/pretty/pretty_test.go b/vendor/cloud.google.com/go/internal/pretty/pretty_test.go new file mode 100644 index 0000000..6e2ff8b --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pretty/pretty_test.go @@ -0,0 +1,105 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "fmt" + "strings" + "testing" +) + +type S struct { + X int + Y bool + z *string +} + +func TestSprint(t *testing.T) { + Indent = "~" + i := 17 + + for _, test := range []struct { + value interface{} + want string + }{ + // primitives and pointer + {nil, "nil"}, + {3, "3"}, + {9.8, "9.8"}, + {true, "true"}, + {"foo", `"foo"`}, + {&i, "&17"}, + // array and slice + {[3]int{1, 2, 3}, "[3]int{\n~1,\n~2,\n~3,\n}"}, + {[]int{1, 2, 3}, "[]int{\n~1,\n~2,\n~3,\n}"}, + {[]int{}, "[]int{}"}, + {[]string{"foo"}, "[]string{\n~\"foo\",\n}"}, + // map + {map[int]bool{}, "map[int]bool{}"}, + {map[int]bool{1: true, 2: false, 3: true}, + "map[int]bool{\n~1: true,\n~3: true,\n}"}, + // struct + {S{}, "pretty.S{\n}"}, + {S{3, true, ptr("foo")}, + "pretty.S{\n~X: 3,\n~Y: true,\n~z: &\"foo\",\n}"}, + // interface + {[]interface{}{&i}, "[]interface {}{\n~&17,\n}"}, + // nesting + {[]S{{1, false, ptr("a")}, {2, true, ptr("b")}}, + `[]pretty.S{ +~pretty.S{ +~~X: 1, +~~z: &"a", +~}, +~pretty.S{ +~~X: 2, +~~Y: true, +~~z: &"b", +~}, +}`}, + } { + got := fmt.Sprintf("%v", Value(test.value)) + if got != test.want { + t.Errorf("%v: got:\n%q\nwant:\n%q", test.value, got, test.want) + } + } +} + +func TestWithDefaults(t *testing.T) { + Indent = "~" + for _, test := range []struct { + value interface{} + want string + }{ + {map[int]bool{1: true, 2: false, 3: true}, + "map[int]bool{\n~1: true,\n~2: false,\n~3: true,\n}"}, + {S{}, "pretty.S{\n~X: 0,\n~Y: false,\n~z: nil,\n}"}, + } { + got := fmt.Sprintf("%+v", Value(test.value)) + if got != test.want { + t.Errorf("%v: got:\n%q\nwant:\n%q", test.value, got, test.want) + } + } +} + +func TestBadVerb(t *testing.T) { + got := fmt.Sprintf("%d", Value(8)) + want := "%!d(" + if !strings.HasPrefix(got, want) { + t.Errorf("got %q, want prefix %q", got, want) + } +} + +func ptr(s string) *string { return &s } diff --git a/vendor/cloud.google.com/go/internal/protostruct/protostruct.go b/vendor/cloud.google.com/go/internal/protostruct/protostruct.go new file mode 100644 index 0000000..5c6f326 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/protostruct/protostruct.go @@ -0,0 +1,56 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package protostruct supports operations on the protocol buffer Struct message. +package protostruct + +import ( + pb "github.com/golang/protobuf/ptypes/struct" +) + +// DecodeToMap converts a pb.Struct to a map from strings to Go types. +// DecodeToMap panics if s is invalid. +func DecodeToMap(s *pb.Struct) map[string]interface{} { + if s == nil { + return nil + } + m := map[string]interface{}{} + for k, v := range s.Fields { + m[k] = decodeValue(v) + } + return m +} + +func decodeValue(v *pb.Value) interface{} { + switch k := v.Kind.(type) { + case *pb.Value_NullValue: + return nil + case *pb.Value_NumberValue: + return k.NumberValue + case *pb.Value_StringValue: + return k.StringValue + case *pb.Value_BoolValue: + return k.BoolValue + case *pb.Value_StructValue: + return DecodeToMap(k.StructValue) + case *pb.Value_ListValue: + s := make([]interface{}, len(k.ListValue.Values)) + for i, e := range k.ListValue.Values { + s[i] = decodeValue(e) + } + return s + default: + panic("protostruct: unknown kind") + } +} diff --git a/vendor/cloud.google.com/go/internal/protostruct/protostruct_test.go b/vendor/cloud.google.com/go/internal/protostruct/protostruct_test.go new file mode 100644 index 0000000..224cf20 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/protostruct/protostruct_test.go @@ -0,0 +1,58 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package protostruct supports operations on the protocol buffer Struct message. +package protostruct + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + pb "github.com/golang/protobuf/ptypes/struct" +) + +func TestDecodeToMap(t *testing.T) { + if got := DecodeToMap(nil); !testutil.Equal(got, map[string]interface{}(nil)) { + t.Errorf("DecodeToMap(nil) = %v, want nil", got) + } + nullv := &pb.Value{&pb.Value_NullValue{}} + stringv := &pb.Value{&pb.Value_StringValue{"x"}} + boolv := &pb.Value{&pb.Value_BoolValue{true}} + numberv := &pb.Value{&pb.Value_NumberValue{2.7}} + in := &pb.Struct{Fields: map[string]*pb.Value{ + "n": nullv, + "s": stringv, + "b": boolv, + "f": numberv, + "l": &pb.Value{&pb.Value_ListValue{&pb.ListValue{ + []*pb.Value{nullv, stringv, boolv, numberv}, + }}}, + "S": &pb.Value{&pb.Value_StructValue{&pb.Struct{Fields: map[string]*pb.Value{ + "n1": nullv, + "b1": boolv, + }}}}, + }} + want := map[string]interface{}{ + "n": nil, + "s": "x", + "b": true, + "f": 2.7, + "l": []interface{}{nil, "x", true, 2.7}, + "S": map[string]interface{}{"n1": nil, "b1": true}, + } + got := DecodeToMap(in) + if diff := testutil.Diff(got, want); diff != "" { + t.Error(diff) + } +} diff --git a/vendor/cloud.google.com/go/internal/readme/Makefile b/vendor/cloud.google.com/go/internal/readme/Makefile new file mode 100644 index 0000000..1c1f4e1 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/Makefile @@ -0,0 +1,48 @@ +# Rebuild the README.md file at repo root by inserting code samples +# from compilable go files. + +SHELL=/bin/bash + +GOCLOUD_HOME=$(GOPATH)/src/cloud.google.com/go +README=$(GOCLOUD_HOME)/README.md + +.PHONY: readme test test-good test-bad-go test-bad-md + +readme: + @tmp=$$(mktemp); \ + awk -f snipmd.awk snippets.go $(README) > $$tmp; \ + mv $$tmp $(README) + +diff: + diff $(README) <(awk -f snipmd.awk snippets.go $(README)) + +test: test-good test-bad-go test-bad-md + @echo PASS + +test-good: + @echo testdata/good.md + @cd testdata >& /dev/null; \ + diff -u want.md <(awk -f ../snipmd.awk snips.go good.md) + @echo "testdata/want.md (round trip)" + @cd testdata >& /dev/null; \ + diff -u want.md <(awk -f ../snipmd.awk snips.go want.md) + +test-bad-go: + @for f in testdata/bad-*.go; do \ + echo $$f; \ + if awk -f snipmd.awk $$f >& /dev/null; then \ + echo "$f succeeded, want failure"; \ + exit 1; \ + fi; \ + done + +test-bad-md: + @for f in testdata/bad-*.md; do \ + echo $$f; \ + if awk -f snipmd.awk testdata/snips.go $$f >& /dev/null; then \ + echo "$f succeeded, want failure"; \ + exit 1; \ + fi; \ + done + + diff --git a/vendor/cloud.google.com/go/internal/readme/snipmd.awk b/vendor/cloud.google.com/go/internal/readme/snipmd.awk new file mode 100644 index 0000000..3a52383 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/snipmd.awk @@ -0,0 +1,123 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# snipmd inserts code snippets from Go source files into a markdown file. +# +# Call with one or more .go files and a .md file: +# +# awk -f snipmd.awk foo.go bar.go template.md +# +# In the Go files, start a snippet with +# //[ NAME +# and end it with +# //] +# +# In the markdown, write +# [snip]:# NAME +# to insert the snippet NAME just below that line. +# If there is already a code block after the [snip]:# line, it will be +# replaced, so a previous output can be used as input. +# +# The following transformations are made to the Go code: +# - The first tab of each line is removed. +# - Trailing blank lines are removed. +# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...` + + +/^[ \t]*\/\/\[/ { # start snippet in Go file + if (inGo()) { + if ($2 == "") { + die("missing snippet name") + } + curSnip = $2 + next + } +} + +/^[ \t]*\/\/]/ { # end snippet in Go file + if (inGo()) { + if (curSnip != "") { + # Remove all but one trailing newline. + gsub(/\n+$/, "\n", snips[curSnip]) + curSnip = "" + next + } else { + die("//] without corresponding //[") + } + } +} + +ENDFILE { + if (curSnip != "") { + die("unclosed snippet: " curSnip) + } +} + +# Skip code blocks in the input that immediately follow [snip]:# lines, +# because we just inserted the snippet. Supports round-tripping. +/^```go$/,/^```$/ { + if (inMarkdown() && afterSnip) { + next + } +} + +# Matches every line. +{ + if (curSnip != "") { + line = $0 + # Remove initial tab, if any. + if (line ~ /^\t/) { + line = substr(line, 2) + } + # Replace ELLIPSIS. + gsub(/_ = ELLIPSIS/, "...", line) + gsub(/ELLIPSIS/, "...", line) + + snips[curSnip] = snips[curSnip] line "\n" + } else if (inMarkdown()) { + afterSnip = 0 + # Copy .md to output. + print + } +} + +$1 ~ /\[snip\]:#/ { # Snippet marker in .md file. + if (inMarkdown()) { + # We expect '[snip]:#' to be followed by '(NAME)' + if ($2 !~ /\(.*\)/) { + die("bad snip spec: " $0) + } + name = substr($2, 2, length($2)-2) + if (snips[name] == "") { + die("no snippet named " name) + } + printf("```go\n%s```\n", snips[name]) + afterSnip = 1 + } +} + + +function inMarkdown() { + return match(FILENAME, /\.md$/) +} + +function inGo() { + return match(FILENAME, /\.go$/) +} + + +function die(msg) { + printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr" + exit 1 +} diff --git a/vendor/cloud.google.com/go/internal/readme/snippets.go b/vendor/cloud.google.com/go/internal/readme/snippets.go new file mode 100644 index 0000000..049710d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/snippets.go @@ -0,0 +1,241 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file holds samples that are embedded into README.md. + +// This file has to compile, but need not execute. +// If it fails to compile, fix it, then run `make` to regenerate README.md. + +package readme + +import ( + "fmt" + "io/ioutil" + "log" + "time" + + "cloud.google.com/go/bigquery" + "cloud.google.com/go/datastore" + "cloud.google.com/go/logging" + "cloud.google.com/go/pubsub" + "cloud.google.com/go/spanner" + "cloud.google.com/go/storage" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var ctx context.Context + +const END = 0 + +func auth() { + //[ auth + client, err := storage.NewClient(ctx) + //] + _ = client + _ = err +} + +func auth2() { + //[ auth-JSON + client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) + //] + _ = client + _ = err +} + +func auth3() { + var ELLIPSIS oauth2.TokenSource + //[ auth-ts + tokenSource := ELLIPSIS + client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) + //] + _ = client + _ = err +} + +func datastoreSnippets() { + //[ datastore-1 + client, err := datastore.NewClient(ctx, "my-project-id") + if err != nil { + log.Fatal(err) + } + //] + + //[ datastore-2 + type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time + } + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + } + posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) + } + //] +} + +func storageSnippets() { + //[ storage-1 + client, err := storage.NewClient(ctx) + if err != nil { + log.Fatal(err) + } + //] + + //[ storage-2 + // Read the object1 from bucket. + rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) + if err != nil { + log.Fatal(err) + } + defer rc.Close() + body, err := ioutil.ReadAll(rc) + if err != nil { + log.Fatal(err) + } + //] + _ = body +} + +func pubsubSnippets() { + //[ pubsub-1 + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + //] + + const ELLIPSIS = 0 + + //[ pubsub-2 + // Publish "hello world" on topic1. + topic := client.Topic("topic1") + res := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), + }) + // The publish happens asynchronously. + // Later, you can get the result from res: + _ = ELLIPSIS + msgID, err := res.Get(ctx) + if err != nil { + log.Fatal(err) + } + + // Use a callback to receive messages via subscription1. + sub := client.Subscription("subscription1") + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + fmt.Println(m.Data) + m.Ack() // Acknowledge that we've consumed the message. + }) + if err != nil { + log.Println(err) + } + //] + _ = msgID +} + +func bqSnippets() { + //[ bq-1 + c, err := bigquery.NewClient(ctx, "my-project-ID") + if err != nil { + // TODO: Handle error. + } + //] + + //[ bq-2 + // Construct a query. + q := c.Query(` + SELECT year, SUM(number) + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year +`) + // Execute the query. + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + // Iterate through the results. + for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) + } + //] +} + +func loggingSnippets() { + //[ logging-1 + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + //] + //[ logging-2 + logger := client.Logger("my-log") + logger.Log(logging.Entry{Payload: "something happened!"}) + //] + + //[ logging-3 + err = client.Close() + if err != nil { + // TODO: Handle error. + } + //] +} + +func spannerSnippets() { + //[ spanner-1 + client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") + if err != nil { + log.Fatal(err) + } + //] + + //[ spanner-2 + // Simple Reads And Writes + _, err = client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) + if err != nil { + log.Fatal(err) + } + row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) + if err != nil { + log.Fatal(err) + } + //] + _ = row +} diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go b/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go new file mode 100644 index 0000000..5b96a50 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go @@ -0,0 +1,23 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readme + +import "fmt" + +func f() { + //[ + fmt.Println() + //] +} diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go b/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go new file mode 100644 index 0000000..d69fbcb --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go @@ -0,0 +1,19 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readme + +func f() { + //] +} diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md b/vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md new file mode 100644 index 0000000..e3a1a3c --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md @@ -0,0 +1,2 @@ +[snip]:# (unknown) + diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md b/vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md new file mode 100644 index 0000000..2f46ad8 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md @@ -0,0 +1 @@ +[snip]:# missing-parens diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go b/vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go new file mode 100644 index 0000000..486ce42 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go @@ -0,0 +1,21 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readme + +// unclosed snippet + +func f() { + //[ X +} diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/good.md b/vendor/cloud.google.com/go/internal/readme/testdata/good.md new file mode 100644 index 0000000..c300afe --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/good.md @@ -0,0 +1,18 @@ +This template is for testing snipmd.awk. + +Put the first snippet here. + +[snip]:# (first) + +And now the second. +[snip]:# (second) + +A top-level snippet. + +[snip]:# (top-level) + +```go +// A code block that is not included. +``` + +And we're done. diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/snips.go b/vendor/cloud.google.com/go/internal/readme/testdata/snips.go new file mode 100644 index 0000000..cddb5f9 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/snips.go @@ -0,0 +1,39 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readme + +import ( + "errors" + "fmt" +) + +func f() { + ELLIPSIS := 3 + //[ first + fmt.Println("hello") + x := ELLIPSIS + //] + + //[ second + if x > 2 { + _ = ELLIPSIS + } + //] +} + +//[ top-level +var ErrBad = errors.New("bad") + +//] diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/want.md b/vendor/cloud.google.com/go/internal/readme/testdata/want.md new file mode 100644 index 0000000..176bd06 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/want.md @@ -0,0 +1,30 @@ +This template is for testing snipmd.awk. + +Put the first snippet here. + +[snip]:# (first) +```go +fmt.Println("hello") +x := ... +``` + +And now the second. +[snip]:# (second) +```go +if x > 2 { + ... +} +``` + +A top-level snippet. + +[snip]:# (top-level) +```go +var ErrBad = errors.New("bad") +``` + +```go +// A code block that is not included. +``` + +And we're done. diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 0000000..e1f9aaa --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,55 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with an error that +// includes both ctx.Error() and the last error returned by f. +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return Annotatef(lastErr, "retry failed with %v; last error", cerr) + } + return cerr + } + } +} diff --git a/vendor/cloud.google.com/go/internal/retry_test.go b/vendor/cloud.google.com/go/internal/retry_test.go new file mode 100644 index 0000000..534c0fe --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry_test.go @@ -0,0 +1,89 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + "fmt" + "testing" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestRetry(t *testing.T) { + ctx := context.Background() + // Without a context deadline, retry will run until the function + // says not to retry any more. + n := 0 + endRetry := errors.New("end retry") + err := retry(ctx, gax.Backoff{}, + func() (bool, error) { + n++ + if n < 10 { + return false, nil + } + return true, endRetry + }, + func(context.Context, time.Duration) error { return nil }) + if got, want := err, endRetry; got != want { + t.Errorf("got %v, want %v", err, endRetry) + } + if n != 10 { + t.Errorf("n: got %d, want %d", n, 10) + } + + // If the context has a deadline, sleep will return an error + // and end the function. + n = 0 + err = retry(ctx, gax.Backoff{}, + func() (bool, error) { return false, nil }, + func(context.Context, time.Duration) error { + n++ + if n < 10 { + return nil + } + return context.DeadlineExceeded + }) + if err == nil { + t.Error("got nil, want error") + } +} + +func TestRetryPreserveError(t *testing.T) { + // Retry tries to preserve the type and other information from + // the last error returned by the function. + err := retry(context.Background(), gax.Backoff{}, + func() (bool, error) { + return false, status.Error(codes.NotFound, "not found") + }, + func(context.Context, time.Duration) error { + return context.DeadlineExceeded + }) + got, ok := status.FromError(err) + if !ok { + t.Fatalf("got %T, wanted a status", got) + } + if g, w := got.Code(), codes.NotFound; g != w { + t.Errorf("got code %v, want %v", g, w) + } + wantMessage := fmt.Sprintf("retry failed with %v; last error: not found", context.DeadlineExceeded) + if g, w := got.Message(), wantMessage; g != w { + t.Errorf("got message %q, want %q", g, w) + } +} diff --git a/vendor/cloud.google.com/go/internal/snipdoc/README.md b/vendor/cloud.google.com/go/internal/snipdoc/README.md new file mode 100644 index 0000000..5d28429 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/snipdoc/README.md @@ -0,0 +1,29 @@ +# Snipdoc + +Snipdoc is a simple tool for maintaining package documentation that contains +code samples. + +1. Create a subdirectory of your package to hold the following files. "internal" + is a good name. + +2. Write a template file (for example, "doc.template") with the text of your package documentation. The file +should look exactly like you want your doc.go file to look, except for code +snippets. +Instead of embedding a code snippet, write a line consisting solely of + + [NAME] + + for your choice of NAME. + +3. Write a snippets file (for example, "doc-snippets.go") as a valid Go source + file. Begin each snippet you'd like to appear in your package docs with + `//[ NAME` and end it with `//]`. + +4. Construct your doc.go file with the command + ``` + awk -f snipdoc.awk doc-snippets.go doc.template + ``` + The file "sample-makefile" in this directory verifies that the + snippets file compiles and safely constructs a doc.go file. + + diff --git a/vendor/cloud.google.com/go/internal/snipdoc/sample-makefile b/vendor/cloud.google.com/go/internal/snipdoc/sample-makefile new file mode 100644 index 0000000..6769cb3 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/snipdoc/sample-makefile @@ -0,0 +1,16 @@ +# Build doc.go from template and snippets. + +SHELL=/bin/bash + +../doc.go: build doc-snippets.go doc.template snipdoc.awk + @tmp=$$(mktemp) && \ + awk -f snipdoc.awk doc-snippets.go doc.template > $$tmp && \ + chmod +w $@ && \ + mv $$tmp $@ && \ + chmod -w $@ + @echo "wrote $@" + +.PHONY: build + +build: + go build doc-snippets.go diff --git a/vendor/cloud.google.com/go/internal/snipdoc/snipdoc.awk b/vendor/cloud.google.com/go/internal/snipdoc/snipdoc.awk new file mode 100644 index 0000000..ebb7e21 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/snipdoc/snipdoc.awk @@ -0,0 +1,116 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# snipdoc merges code snippets from Go source files into a template to +# produce another go file (typically doc.go). +# +# Call with one or more .go files and a template file. +# +# awk -f snipmd.awk foo.go bar.go doc.template +# +# In the Go files, start a snippet with +# //[ NAME +# and end it with +# //] +# +# In the template, write +# [NAME] +# on a line by itself to insert the snippet NAME on that line. +# +# The following transformations are made to the Go code: +# - Trailing blank lines are removed. +# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...` + + +/^[ \t]*\/\/\[/ { # start snippet in Go file + if (inGo()) { + if ($2 == "") { + die("missing snippet name") + } + curSnip = $2 + next + } +} + +/^[ \t]*\/\/]/ { # end snippet in Go file + if (inGo()) { + if (curSnip != "") { + # Remove all trailing newlines. + gsub(/[\t\n]+$/, "", snips[curSnip]) + curSnip = "" + next + } else { + die("//] without corresponding //[") + } + } +} + +ENDFILE { + if (curSnip != "") { + die("unclosed snippet: " curSnip) + } +} + +/^\[.*\]$/ { # Snippet marker in template file. + if (inTemplate()) { + name = substr($1, 2, length($1)-2) + if (snips[name] == "") { + die("no snippet named " name) + } + printf("%s\n", snips[name]) + afterSnip = 1 + next + } +} + +# Matches every line. +{ + if (curSnip != "") { + # If the first line in the snip has no indent, add the indent. + if (snips[curSnip] == "") { + if (index($0, "\t") == 1) { + extraIndent = "" + } else { + extraIndent = "\t" + } + } + + line = $0 + # Replace ELLIPSIS. + gsub(/_ = ELLIPSIS/, "...", line) + gsub(/ELLIPSIS/, "...", line) + + snips[curSnip] = snips[curSnip] extraIndent line "\n" + } else if (inTemplate()) { + afterSnip = 0 + # Copy to output. + print + } +} + + + +function inTemplate() { + return match(FILENAME, /\.template$/) +} + +function inGo() { + return match(FILENAME, /\.go$/) +} + + +function die(msg) { + printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr" + exit 1 +} diff --git a/vendor/cloud.google.com/go/internal/testutil/cmp.go b/vendor/cloud.google.com/go/internal/testutil/cmp.go new file mode 100644 index 0000000..9dcef3d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/cmp.go @@ -0,0 +1,53 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "math" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" +) + +var ( + alwaysEqual = cmp.Comparer(func(_, _ interface{}) bool { return true }) + + defaultCmpOptions = []cmp.Option{ + // Use proto.Equal for protobufs + cmp.Comparer(proto.Equal), + // NaNs compare equal + cmp.FilterValues(func(x, y float64) bool { + return math.IsNaN(x) && math.IsNaN(y) + }, alwaysEqual), + cmp.FilterValues(func(x, y float32) bool { + return math.IsNaN(float64(x)) && math.IsNaN(float64(y)) + }, alwaysEqual), + } +) + +// Equal tests two values for equality. +func Equal(x, y interface{}, opts ...cmp.Option) bool { + // Put default options at the end. Order doesn't matter. + opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...) + return cmp.Equal(x, y, opts...) +} + +// Diff reports the differences between two values. +// Diff(x, y) == "" iff Equal(x, y). +func Diff(x, y interface{}, opts ...cmp.Option) string { + // Put default options at the end. Order doesn't matter. + opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...) + return cmp.Diff(x, y, opts...) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/context.go b/vendor/cloud.google.com/go/internal/testutil/context.go new file mode 100644 index 0000000..4dd89bb --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/context.go @@ -0,0 +1,95 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil contains helper functions for writing tests. +package testutil + +import ( + "fmt" + "io/ioutil" + "log" + "os" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" +) + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +) + +// ProjID returns the project ID to use in integration tests, or the empty +// string if none is configured. +func ProjID() string { + return os.Getenv(envProjID) +} + +// TokenSource returns the OAuth2 token source to use in integration tests, +// or nil if none is configured. It uses the standard environment variable +// for tests in this repo. +func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { + return TokenSourceEnv(ctx, envPrivateKey, scopes...) +} + +// TokenSourceEnv returns the OAuth2 token source to use in integration tests. or nil +// if none is configured. It tries to get credentials from the filename in the +// environment variable envVar. If the environment variable is unset, TokenSourceEnv +// will try to find 'Application Default Credentials'. Else, TokenSourceEnv will +// return nil. TokenSourceEnv will log.Fatal if the token source is specified but +// missing or invalid. +func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2.TokenSource { + key := os.Getenv(envVar) + if key == "" { // Try for application default credentials. + ts, err := google.DefaultTokenSource(ctx, scopes...) + if err != nil { + log.Println("No 'Application Default Credentials' found.") + return nil + } + return ts + } + conf, err := jwtConfigFromFile(key, scopes) + if err != nil { + log.Fatal(err) + } + return conf.TokenSource(ctx) +} + +// JWTConfig reads the JSON private key file whose name is in the default +// environment variable, and returns the jwt.Config it contains. It ignores +// scopes. +// If the environment variable is empty, it returns (nil, nil). +func JWTConfig() (*jwt.Config, error) { + return jwtConfigFromFile(os.Getenv(envPrivateKey), nil) +} + +// jwtConfigFromFile reads the given JSON private key file, and returns the +// jwt.Config it contains. +// If the filename is empty, it returns (nil, nil). +func jwtConfigFromFile(filename string, scopes []string) (*jwt.Config, error) { + if filename == "" { + return nil, nil + } + jsonKey, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("Cannot read the JSON key file, err: %v", err) + } + conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) + if err != nil { + return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) + } + return conf, nil +} diff --git a/vendor/cloud.google.com/go/internal/testutil/go18.go b/vendor/cloud.google.com/go/internal/testutil/go18.go new file mode 100644 index 0000000..e7933dc --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/go18.go @@ -0,0 +1,64 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package testutil + +import ( + "log" + "time" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" +) + +type TestExporter struct { + Spans []*trace.SpanData + Stats chan *view.Data +} + +func NewTestExporter() *TestExporter { + te := &TestExporter{Stats: make(chan *view.Data)} + + view.RegisterExporter(te) + view.SetReportingPeriod(time.Millisecond) + if err := view.Register(ocgrpc.ClientRequestCountView); err != nil { + log.Fatal(err) + } + + trace.RegisterExporter(te) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + return te +} + +func (te *TestExporter) ExportSpan(s *trace.SpanData) { + te.Spans = append(te.Spans, s) +} + +func (te *TestExporter) ExportView(vd *view.Data) { + if len(vd.Rows) > 0 { + select { + case te.Stats <- vd: + default: + } + } +} + +func (te *TestExporter) Unregister() { + view.UnregisterExporter(te) + trace.UnregisterExporter(te) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server.go b/vendor/cloud.google.com/go/internal/testutil/server.go new file mode 100644 index 0000000..66300fa --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server.go @@ -0,0 +1,105 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "net" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// A Server is an in-process gRPC server, listening on a system-chosen port on +// the local loopback interface. Servers are for testing only and are not +// intended to be used in production code. +// +// To create a server, make a new Server, register your handlers, then call +// Start: +// +// srv, err := NewServer() +// ... +// mypb.RegisterMyServiceServer(srv.Gsrv, &myHandler) +// .... +// srv.Start() +// +// Clients should connect to the server with no security: +// +// conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) +// ... +type Server struct { + Addr string + l net.Listener + Gsrv *grpc.Server +} + +// NewServer creates a new Server. The Server will be listening for gRPC connections +// at the address named by the Addr field, without TLS. +func NewServer(opts ...grpc.ServerOption) (*Server, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + s := &Server{ + Addr: l.Addr().String(), + l: l, + Gsrv: grpc.NewServer(opts...), + } + return s, nil +} + +// Start causes the server to start accepting incoming connections. +// Call Start after registering handlers. +func (s *Server) Start() { + go s.Gsrv.Serve(s.l) +} + +// Close shuts down the server. +func (s *Server) Close() { + s.Gsrv.Stop() + s.l.Close() +} + +// PageBounds converts an incoming page size and token from an RPC request into +// slice bounds and the outgoing next-page token. +// +// PageBounds assumes that the complete, unpaginated list of items exists as a +// single slice. In addition to the page size and token, PageBounds needs the +// length of that slice. +// +// PageBounds's first two return values should be used to construct a sub-slice of +// the complete, unpaginated slice. E.g. if the complete slice is s, then +// s[from:to] is the desired page. Its third return value should be set as the +// NextPageToken field of the RPC response. +func PageBounds(pageSize int, pageToken string, length int) (from, to int, nextPageToken string, err error) { + from, to = 0, length + if pageToken != "" { + from, err = strconv.Atoi(pageToken) + if err != nil { + return 0, 0, "", status.Errorf(codes.InvalidArgument, "bad page token: %v", err) + } + if from >= length { + return length, length, "", nil + } + } + if pageSize > 0 && from+pageSize < length { + to = from + pageSize + nextPageToken = strconv.Itoa(to) + } + return from, to, nextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server_test.go b/vendor/cloud.google.com/go/internal/testutil/server_test.go new file mode 100644 index 0000000..72b5b5b --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server_test.go @@ -0,0 +1,79 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func TestNewServer(t *testing.T) { + srv, err := NewServer() + if err != nil { + t.Fatal(err) + } + srv.Start() + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + conn.Close() + srv.Close() +} + +func TestPageBounds(t *testing.T) { + const length = 10 + for _, test := range []struct { + size int + tok string + wantFrom int + wantTo int + wantTok string + }{ + {5, "", + 0, 5, "5"}, + {11, "", + 0, 10, ""}, + {5, "2", + 2, 7, "7"}, + {5, "8", + 8, 10, ""}, + {11, "8", + 8, 10, ""}, + {1, "11", + 10, 10, ""}, + } { + gotFrom, gotTo, gotTok, err := PageBounds(test.size, test.tok, length) + if err != nil { + t.Fatal(err) + } + if got, want := gotFrom, test.wantFrom; got != want { + t.Errorf("%+v: from: got %d, want %d", test, got, want) + } + if got, want := gotTo, test.wantTo; got != want { + t.Errorf("%+v: to: got %d, want %d", test, got, want) + } + if got, want := gotTok, test.wantTok; got != want { + t.Errorf("%+v: got %q, want %q", test, got, want) + } + } + + _, _, _, err := PageBounds(4, "xyz", 5) + if grpc.Code(err) != codes.InvalidArgument { + t.Errorf("want invalid argument, got <%v>", err) + } +} diff --git a/vendor/cloud.google.com/go/internal/testutil/unique.go b/vendor/cloud.google.com/go/internal/testutil/unique.go new file mode 100644 index 0000000..ff0de4a --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/unique.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file supports generating unique IDs so that multiple test executions +// don't interfere with each other, and cleaning up old entities that may +// remain if tests exit early. + +package testutil + +import ( + "fmt" + "regexp" + "strconv" + "sync" + "time" +) + +var startTime = time.Now().UTC() + +// A UIDSpace manages a set of unique IDs distinguished by a prefix. +type UIDSpace struct { + Prefix string + Sep rune + re *regexp.Regexp + mu sync.Mutex + count int +} + +func NewUIDSpace(prefix string) *UIDSpace { + return NewUIDSpaceSep(prefix, '-') +} + +func NewUIDSpaceSep(prefix string, sep rune) *UIDSpace { + re := fmt.Sprintf(`^%s%[2]c(\d{4})(\d{2})(\d{2})%[2]c(\d+)%[2]c\d+$`, + regexp.QuoteMeta(prefix), sep) + return &UIDSpace{ + Prefix: prefix, + Sep: sep, + re: regexp.MustCompile(re), + } +} + +// New generates a new unique ID . The ID consists of the UIDSpace's prefix, a +// timestamp, and a counter value. All unique IDs generated in the same test +// execution will have the same timestamp. +// +// Aside from the characters in the prefix, IDs contain only letters, numbers +// and sep. +func (s *UIDSpace) New() string { return s.newID(startTime) } + +func (s *UIDSpace) newID(t time.Time) string { + s.mu.Lock() + c := s.count + s.count++ + s.mu.Unlock() + // Write the time as a date followed by nanoseconds from midnight of that date. + // That makes it easier to see the approximate time of the ID when it is displayed. + y, m, d := t.Date() + ns := t.Sub(time.Date(y, m, d, 0, 0, 0, 0, time.UTC)) + // Zero-pad the counter for lexical sort order for IDs with the same timestamp. + return fmt.Sprintf("%s%c%04d%02d%02d%c%d%c%04d", + s.Prefix, s.Sep, y, m, d, s.Sep, ns, s.Sep, c) +} + +// Timestamp extracts the timestamp of uid, which must have been generated by +// s. The second return value is true on success, false if there was a problem. +func (s *UIDSpace) Timestamp(uid string) (time.Time, bool) { + subs := s.re.FindStringSubmatch(uid) + if subs == nil { + return time.Time{}, false + } + y, err1 := strconv.Atoi(subs[1]) + m, err2 := strconv.Atoi(subs[2]) + d, err3 := strconv.Atoi(subs[3]) + ns, err4 := strconv.Atoi(subs[4]) + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + return time.Time{}, false + } + return time.Date(y, time.Month(m), d, 0, 0, 0, ns, time.UTC), true +} + +// Older reports whether uid was created by m and has a timestamp older than +// the current time by at least d. +func (s *UIDSpace) Older(uid string, d time.Duration) bool { + ts, ok := s.Timestamp(uid) + if !ok { + return false + } + return time.Since(ts) > d +} diff --git a/vendor/cloud.google.com/go/internal/testutil/unique_test.go b/vendor/cloud.google.com/go/internal/testutil/unique_test.go new file mode 100644 index 0000000..5a39833 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/unique_test.go @@ -0,0 +1,69 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + "time" +) + +func TestNew(t *testing.T) { + s := NewUIDSpace("prefix") + tm := time.Date(2017, 1, 6, 0, 0, 0, 21, time.UTC) + got := s.newID(tm) + want := "prefix-20170106-21-0000" + if got != want { + t.Errorf("got %q, want %q", got, want) + } + + s2 := NewUIDSpaceSep("prefix2", '_') + got = s2.newID(tm) + want = "prefix2_20170106_21_0000" + if got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestTimestamp(t *testing.T) { + s := NewUIDSpace("unique-ID") + uid := s.New() + got, ok := s.Timestamp(uid) + if !ok { + t.Fatal("got ok = false, want true") + } + if !startTime.Equal(got) { + t.Errorf("got %s, want %s", got, startTime) + } + + got, ok = s.Timestamp("unique-ID-20160308-123-8") + if !ok { + t.Fatal("got false, want true") + } + if want := time.Date(2016, 3, 8, 0, 0, 0, 123, time.UTC); !want.Equal(got) { + t.Errorf("got %s, want %s", got, want) + } + if _, ok = s.Timestamp("invalid-time-1234"); ok { + t.Error("got true, want false") + } +} + +func TestOlder(t *testing.T) { + s := NewUIDSpace("uid") + // A non-matching ID returns false. + id2 := NewUIDSpace("different-prefix").New() + if got, want := s.Older(id2, time.Second), false; got != want { + t.Errorf("got %t, want %t", got, want) + } +} diff --git a/vendor/cloud.google.com/go/internal/trace/go18.go b/vendor/cloud.google.com/go/internal/trace/go18.go new file mode 100644 index 0000000..1da4126 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/go18.go @@ -0,0 +1,83 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package trace + +import ( + "go.opencensus.io/trace" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc/status" +) + +func StartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name) + return ctx +} + +func EndSpan(ctx context.Context, err error) { + span := trace.FromContext(ctx) + if err != nil { + span.SetStatus(toStatus(err)) + } + span.End() +} + +// ToStatus interrogates an error and converts it to an appropriate +// OpenCensus status. +func toStatus(err error) trace.Status { + if err2, ok := err.(*googleapi.Error); ok { + return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} + } else if s, ok := status.FromError(err); ok { + return trace.Status{Code: int32(s.Code()), Message: s.Message()} + } else { + return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} + } +} + +// TODO (deklerk): switch to using OpenCensus function when it becomes available. +// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto +func httpStatusCodeToOCCode(httpStatusCode int) int32 { + switch httpStatusCode { + case 200: + return int32(code.Code_OK) + case 499: + return int32(code.Code_CANCELLED) + case 500: + return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS + case 400: + return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE + case 504: + return int32(code.Code_DEADLINE_EXCEEDED) + case 404: + return int32(code.Code_NOT_FOUND) + case 409: + return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED + case 403: + return int32(code.Code_PERMISSION_DENIED) + case 401: + return int32(code.Code_UNAUTHENTICATED) + case 429: + return int32(code.Code_RESOURCE_EXHAUSTED) + case 501: + return int32(code.Code_UNIMPLEMENTED) + case 503: + return int32(code.Code_UNAVAILABLE) + default: + return int32(code.Code_UNKNOWN) + } +} diff --git a/vendor/cloud.google.com/go/internal/trace/go18_test.go b/vendor/cloud.google.com/go/internal/trace/go18_test.go new file mode 100644 index 0000000..3f9f030 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/go18_test.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package trace + +import ( + "errors" + "net/http" + "testing" + + "cloud.google.com/go/internal/testutil" + octrace "go.opencensus.io/trace" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestToStatus(t *testing.T) { + for _, testcase := range []struct { + input error + want octrace.Status + }{ + { + errors.New("some random error"), + octrace.Status{Code: int32(code.Code_UNKNOWN), Message: "some random error"}, + }, + { + &googleapi.Error{Code: http.StatusConflict, Message: "some specific googleapi http error"}, + octrace.Status{Code: int32(code.Code_ALREADY_EXISTS), Message: "some specific googleapi http error"}, + }, + { + status.Error(codes.DataLoss, "some specific grpc error"), + octrace.Status{Code: int32(code.Code_DATA_LOSS), Message: "some specific grpc error"}, + }, + } { + got := toStatus(testcase.input) + if r := testutil.Diff(got, testcase.want); r != "" { + t.Errorf("got -, want +:\n%s", r) + } + } +} diff --git a/vendor/cloud.google.com/go/internal/trace/not_go18.go b/vendor/cloud.google.com/go/internal/trace/not_go18.go new file mode 100644 index 0000000..50c1657 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/not_go18.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package trace + +import ( + "golang.org/x/net/context" +) + +// OpenCensus only supports go 1.8 and higher. + +func StartSpan(ctx context.Context, _ string) context.Context { + return ctx +} + +func EndSpan(context.Context, error) { +} diff --git a/vendor/cloud.google.com/go/internal/tracecontext/tracecontext.go b/vendor/cloud.google.com/go/internal/tracecontext/tracecontext.go new file mode 100644 index 0000000..bfc77ba --- /dev/null +++ b/vendor/cloud.google.com/go/internal/tracecontext/tracecontext.go @@ -0,0 +1,83 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracecontext provides encoders and decoders for Stackdriver Trace contexts. +package tracecontext + +import "encoding/binary" + +const ( + versionID = 0 + traceIDField = 0 + spanIDField = 1 + optsField = 2 + + traceIDLen = 16 + spanIDLen = 8 + optsLen = 1 + + // Len represents the length of trace context. + Len = 1 + 1 + traceIDLen + 1 + spanIDLen + 1 + optsLen +) + +// Encode encodes trace ID, span ID and options into dst. The number of bytes +// written will be returned. If len(dst) isn't big enough to fit the trace context, +// a negative number is returned. +func Encode(dst []byte, traceID []byte, spanID uint64, opts byte) (n int) { + if len(dst) < Len { + return -1 + } + var offset = 0 + putByte := func(b byte) { dst[offset] = b; offset++ } + putUint64 := func(u uint64) { binary.LittleEndian.PutUint64(dst[offset:], u); offset += 8 } + + putByte(versionID) + putByte(traceIDField) + for _, b := range traceID { + putByte(b) + } + putByte(spanIDField) + putUint64(spanID) + putByte(optsField) + putByte(opts) + + return offset +} + +// Decode decodes the src into a trace ID, span ID and options. If src doesn't +// contain a valid trace context, ok = false is returned. +func Decode(src []byte) (traceID []byte, spanID uint64, opts byte, ok bool) { + if len(src) < Len { + return traceID, spanID, 0, false + } + var offset = 0 + readByte := func() byte { b := src[offset]; offset++; return b } + readUint64 := func() uint64 { v := binary.LittleEndian.Uint64(src[offset:]); offset += 8; return v } + + if readByte() != versionID { + return traceID, spanID, 0, false + } + for offset < len(src) { + switch readByte() { + case traceIDField: + traceID = src[offset : offset+traceIDLen] + offset += traceIDLen + case spanIDField: + spanID = readUint64() + case optsField: + opts = readByte() + } + } + return traceID, spanID, opts, true +} diff --git a/vendor/cloud.google.com/go/internal/tracecontext/tracecontext_test.go b/vendor/cloud.google.com/go/internal/tracecontext/tracecontext_test.go new file mode 100644 index 0000000..95d3b88 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/tracecontext/tracecontext_test.go @@ -0,0 +1,136 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracecontext + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" +) + +var validData = []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1} + +func TestDecode(t *testing.T) { + tests := []struct { + name string + data []byte + wantTraceID []byte + wantSpanID uint64 + wantOpts byte + wantOk bool + }{ + { + name: "nil data", + data: nil, + wantTraceID: nil, + wantSpanID: 0, + wantOpts: 0, + wantOk: false, + }, + { + name: "short data", + data: []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77}, + wantTraceID: nil, + wantSpanID: 0, + wantOpts: 0, + wantOk: false, + }, + { + name: "wrong field number", + data: []byte{0, 1, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77}, + wantTraceID: nil, + wantSpanID: 0, + wantOpts: 0, + wantOk: false, + }, + { + name: "valid data", + data: validData, + wantTraceID: []byte{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}, + wantSpanID: 0x6867666564636261, + wantOpts: 1, + wantOk: true, + }, + } + for _, tt := range tests { + gotTraceID, gotSpanID, gotOpts, gotOk := Decode(tt.data) + if !testutil.Equal(gotTraceID, tt.wantTraceID) { + t.Errorf("%s: Decode() gotTraceID = %v, want %v", tt.name, gotTraceID, tt.wantTraceID) + } + if gotSpanID != tt.wantSpanID { + t.Errorf("%s: Decode() gotSpanID = %v, want %v", tt.name, gotSpanID, tt.wantSpanID) + } + if gotOpts != tt.wantOpts { + t.Errorf("%s: Decode() gotOpts = %v, want %v", tt.name, gotOpts, tt.wantOpts) + } + if gotOk != tt.wantOk { + t.Errorf("%s: Decode() gotOk = %v, want %v", tt.name, gotOk, tt.wantOk) + } + } +} + +func TestEncode(t *testing.T) { + tests := []struct { + name string + dst []byte + traceID []byte + spanID uint64 + opts byte + wantN int + wantData []byte + }{ + { + name: "short data", + dst: make([]byte, 0), + traceID: []byte("00112233445566"), + spanID: 0x6867666564636261, + opts: 1, + wantN: -1, + wantData: make([]byte, 0), + }, + { + name: "valid data", + dst: make([]byte, Len), + traceID: []byte{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}, + spanID: 0x6867666564636261, + opts: 1, + wantN: Len, + wantData: validData, + }, + } + for _, tt := range tests { + gotN := Encode(tt.dst, tt.traceID, tt.spanID, tt.opts) + if gotN != tt.wantN { + t.Errorf("%s: n = %v, want %v", tt.name, gotN, tt.wantN) + } + if gotData := tt.dst; !testutil.Equal(gotData, tt.wantData) { + t.Errorf("%s: dst = %v, want %v", tt.name, gotData, tt.wantData) + } + } +} + +func BenchmarkDecode(b *testing.B) { + for i := 0; i < b.N; i++ { + Decode(validData) + } +} + +func BenchmarkEncode(b *testing.B) { + for i := 0; i < b.N; i++ { + traceID := make([]byte, 16) + var opts byte + Encode(validData, traceID, 0, opts) + } +} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh new file mode 100755 index 0000000..fecf1f0 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/update_version.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +today=$(date +%Y%m%d) + +sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE + diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 0000000..f5c23a5 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20180226" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 +} diff --git a/vendor/cloud.google.com/go/internal/version/version_test.go b/vendor/cloud.google.com/go/internal/version/version_test.go new file mode 100644 index 0000000..7e032f0 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version_test.go @@ -0,0 +1,35 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "testing" + +func TestGoVer(t *testing.T) { + for _, tst := range []struct { + in, want string + }{ + {"go1.8", "1.8.0"}, + {"go1.7.3", "1.7.3"}, + {"go1.8.typealias", "1.8.0-typealias"}, + {"go1.8beta1", "1.8.0-beta1"}, + {"go1.8rc2", "1.8.0-rc2"}, + {"devel +824f981dd4b7 Tue Apr 29 21:41:54 2014 -0400", "824f981dd4b7"}, + {"foo bar zipzap", ""}, + } { + if got := goVer(tst.in); got != tst.want { + t.Errorf("goVer(%q) = %q, want %q", tst.in, got, tst.want) + } + } +} diff --git a/vendor/cloud.google.com/go/issue_template.md b/vendor/cloud.google.com/go/issue_template.md new file mode 100644 index 0000000..e2ccef3 --- /dev/null +++ b/vendor/cloud.google.com/go/issue_template.md @@ -0,0 +1,17 @@ +(delete this for feature requests) + +## Client + +e.g. PubSub + +## Describe Your Environment + +e.g. Alpine Docker on GKE + +## Expected Behavior + +e.g. Messages arrive really fast. + +## Actual Behavior + +e.g. Messages arrive really slowly. \ No newline at end of file diff --git a/vendor/cloud.google.com/go/keys.tar.enc b/vendor/cloud.google.com/go/keys.tar.enc new file mode 100644 index 0000000000000000000000000000000000000000..c54408c93a2b7d36467ceced9e697a7e6be66db5 GIT binary patch literal 10256 zcmV+rDDT%d%4FE-XGE3p;$)uBB?9V+ouudaKhoM}hbuNiA~pZ{E8E-hBY`-Dn_Q%D z)1eTB-6xjC`t3rjAH(j)i+}xJ0#_k#k2Ei-53?7pb(EE)s?ceMM>7zAAv>$SzP9-h zHi8>*N=*@`J_+b(a1jXZ(+M=#xZ;N&knOE6gw@5_dZB+sYusIMbmvbdeU>he9Zmq3 z7IfMkHTjLY+Jn_1Zy14{?#2fgMv-vh_Qa|bOItot{b4e5?QEPEqMPX*xdx>5w zTt*8it3>fa)awDKDNh0HtzYlzJ@1<@O1)#R#Fzw2S5FB;2IhW)5i#@B)p-qFz;$z?e{Z-huragATmXz zSTOC<2gDF{%_s}F^%zNV6N~igmTwHb1mBT*^CA^@soq^ z+<#m8D%(S_$p5}UYl@NJoJBV!g zc?^JRPT@FROn^^CBj~Dy0VyPXaTviGoF$seUez{s&nn4wmvaTbrTv`zaFmIEs8-bT z-Sbw%Fj}r!&$18`7|;b`!$zoOD1QAtz>~%g{DVJRm(|bbb**QdepSwaVs#bKhm-+ zc_CBe!I}>4=DyBd6Omd71Ktp`6nhA^$3@GdbA=U3t)2S?nqpLS!Wxla?bWRJ2YHAH z@Df|>tCsw(=GxQXg1;oz`r77C(uGgsGGUaPCM^9K1nhz~>Uim_t%z;Ud3W?cZ#~g9 z)+_0w$>bCua;W+Rnj}EDfXBFn8{yz7uyDfL&2IZ9t+2Xb{@iana-TFz?ic-2KCR=a zpX;Nq#W~!?bXJdp;LRQo>I<}bs5WSJhGZ5$!zEtr;lcaph1p;f1gj{AODENdep|_6 z?#}50L@7Gw(Kg;u3%0iQ<^XAGtbDi~s8_ROq~9(eVl^5eC>F>|;Z{U`0%K$c=7n!~ zaVy7Y8gbPa)sO!@(+qeqrTAHlZntqJMFfE-_icOnY3Zt+?Ll%L3y@I#1sTi{*zdb< zFJA28ZwTxgzNZ=tK7W=1*z1XZo$mhW>u`OuFDMHpLK76y=jo}^vXUE_T_YDo)-j*v z>nHKlA2k14n}!u%<DpyyKCHc>9Dm!D3O~<^1!z&M zx^bZ|4LkR5o~AJB@y=+bpTT#q`eZP5Pzi#wNq1&F`{%j-_o+IJ39Qa7;ttdY#ekn9 zRDymxZq)uPZW2oMD`d!%9Vwy%;xB7y8Ou!=N?8Z+KjoYk%5pT)pzXGC~rMB#16J&}C&~&Li+e>{P>ZDThyl`m|SJix~w$9%GCpFkcO{PR0TBpXNdn_^2mcdJ;;^5lPA;< zdW{p!%(!4YGN1I7e5|b{k42|owjY<`?10CCRL-h;-2_JslMUN1UXpN(JgX%-d1nM%y*W=r72P~k4o z?9AmJtr7{J#GyEe=X68u__?HvbD}zd#6=N=So*5a<7yNCgtFvoZ6X@_Os9ESREtxx zY_vzNJgca!y*$ZCuJ{zSB3P^W8uQ!GdKU-0TB`|4MjOG`Qd!8O30Wf}c(B;p{P6Wp zD!7UDS(PCf3piqXn2p^t&vKK)JNVIn%R4LuHXk#6g2Va*AKLv7gW1@|EGq2(vIqzv zC3M+ilY&N-u_bdhcB_{khbo2oB1ou`wF-H_33M__b<;;4Z9o_meKvKfA+U^JZlxq1F)5c73St7fs>0@K5J?EHJP=N@ zI!-=!^J5riIY#r7kBBJ%9@svn*J;(BH~$?6Ne$SL^AJ& z|E9^+ZAD_lGJb@z{?6KK9+qrXYev>%Lwc- z#SyG5FHLKzvyzveqMmSl1``^qr@-X$6n}tn4?x5))?&@~Rx;r3<2D4;MW7T=meKuj zR5J`_zckAHW+b@5`mN(70i3DVYhty}{kaW=r&EqIj;kV=(gz6ShmX8Jb?L4pwZ~)g zg2Yp+%J-#Qd_*cF7~M_h9w}A@mZv!CI5O}kIWG*dO@QHZXUQ^_b2*~0ivGMGOnbL7 zSePL`G{Czy5&5)B@~|`FV9)=Iw6+J2#+=^Rr`8AuHK}dX{?}R>UHUR7c3b1U@{437 z&nzV=x*+N)uMH#o0J?b(K2Y@5uLoF(K8hCU18x|z)sIt3d%TineP)`_cQud;z5+rFMmr_H+e1Ze zaL;M0h-v|A)N4a0CjUC?43r88z^-JOG`oUHh82hGIM>I}cqrFFt?_Xap=lJW(=m41 zjxh>qf&p8!w_1$~TK$|VM7b&N5>a%@EM%D~wGkYdbBRb2bvQn z65?`Uh5+1cBV$Bh+)lxH$fi}m`~gNU@&rWjbbYJH z``*)UGVsbc&K9A>$xO>5QFV+btPpB&f)!;eWjhcyg&$=1OuNYGbl=1wBb&09Wz4Nu zk;3IcUq~CcS2^)LN!Q3v(h1yUhrgmY5$H)AX1wl4xB ziC12Tr9IZuDZe!$k=)}G8iBMo;URV0uI7^G5nwVf|3+6yMTDZg7ygmq0{^u{bbv64 z`$$@mZLb>_QnOZKgcYgTpy6Yz&f@B4^F1@*bBS578`^3 z{)00y3}rzArlWG)=njUp>mN^U0qj-~>c&__-}tYf??|ntj~9}LLh9>)rw7P<3{6D4fpnnf0B!5WpdcnDc?c4Mo427}3y=lT^*z19nRYh0^bu;hikJLh)un zA2)Q0A1aR5M@4cpi(*sscB)7RarN*=ppr!JH^D)S~d`99;&g-j4h;pj-I_2s?~T0^0*U7}&j7YS znQ6ebPqs6C?GELt+(0d~djDz*)kU7F|_OK0WvjBczgOJAVl*VY~ zew1ENHvN6L*$Rq&EIWxYRG^qwTvXEUraD03#YQdE1a9;v%CrqpB?6(YuJ@v)abLRN z4RaZAd+c4T82CN9)5dsmGsDJ^?!r(X2~EqFeWUEDEz+7TOe1$Y&>O0Mtc_ZW1bTf5 z_GwJ(S?o*6%`|U4E^}%XsKe#qy?NND3{NE4Cj~1YNfJ%$*0<35lxsaTHtKJa`)aih z{gk%2w#chF_vMbi=kh;bROmspkG7cCLeKZL-p?xo0T^&!J1zJk37IC15%zfhKr*?O zB{(g?z$}&=u>w0jZ!nF<{H08V%Ljc7K0RA@fvLrf$_K)mzY-)JoIUQQmt8$}`&#Gw zS`=d~Zm^W~+pK>%1u(pdoPWT*UjmD=g}?T`x7=w#^%C5h=I?~n67kd6{UmAcMeGsD z-2t(#2b=83S^Gguxg~*xt^5m$^7ap%YJRbDbj)jzz9kc_=wyG%`B~_YUk2Rt&m+YZ zQp4Sns)1a_Pr^F|syj^4meXW_ce)LW0969-dm}|#-zDwtOBZM#WgHFB*Mmugft&m% zkM6A0f{{+xbP5900`p(?q+nC7L^L|D<{S@ar#$~LX~Qez{!>CYKJtBY zdZV6=ah-z$oH3+F66iZMDlMg9U+azmU%c}65tPVuh{eh^RW<7C>8-8LK^`4trY)=| z&dZYzpQwT9K3nk+D~mW2S2<|M3vc%+UIyOf4&|b63<=SJ4!sxQ0)QOdP}Xbkh(#A5 zp!bI3*&z^p`U(2@^^Re)OzW&0hchNFAhQ?>7rqDE7VE{n(>Ke%F1v7z)DK}j>hyT* zSCDi*CxWm(d^J^`_8G+m<~Q(!b!Ir+UGxD-nEp?Qbj9M`sHnnnSF|C|>0FIp!m(|$A^4xe3?E;E_ZJB8eXvVCny>l%B1hfPNK`dMV3Sf{i>vy^MMGzr@pRY$G!aYTxW5ytW^CjMkw`}7jgQ&K#?Dz;3 z@uoo4!Y1eMUun$2vxdbY-Ku z(e?Y73Xc7$9>WBz1lZIjPmSuq_q;-Klcp2kV3V7EqU)Mn=9lDK~XgBUQ&amv52 zx;0w8mYJV={o!&f4rc}5Do5@fM}Nmew>9m}K+ zkmuez$a%1$?&?jQ~|rD-iejJaHYQd|HcOmo3IJ) zaE+2%n}N5Ec*Tq|+XimH{d4;WuNH$|8PIKlG;99&z6ZLzZ`3nE+&j0}U0D{bs_L-W z0wd0O4*h&!@WOG{jRbx+o==YMlQ}V#kp%m|!LQ%(P!`V{>NxsOVBtjEb%%=_y_9$v za7(VfVGVPPV{1al(HnPLz18*HU~SgoQmc}=xUM2fv0j86u>4&vD$qMc+;R-^QDy{@ za)E=gaZ;6Mh6p!N>5!^mI85}_^cEJ>m{6g?%A4bzKkcXTz?yyNB1|b#cPL%u&2I?b z`JfD*B_RqRiKxJphax@mQ(+XhnQgUzo90RZ_T^;VXng`Wrp+w_b<`kwQYAJ%3@*pT z)9J@h_ocUk+-r&p3vHe##RdP4O>NIv8wO?10Y?bGt}OInrZ8`HrE23maFr!~>nMVm z+5p!YWsS8K7%Q_%*TSkKW zZ|u4$D{zG52B8i6$2}gN*WhL=!oL6kP$b|dpujKGoVWYY51>g(yFz=oXQ`dqhbs(` zLLcl>+Rw<>-zo=K&ip{9-bTAyY9+T7&9j%KH1U54jyhvJ*ZS(0 zGXF?vTsAye2K^6DSU?(Rc%Z%T6%JPuHE~BJZE2k?m=3h60%{jyaij|6*YKJEf2fi84{XKF57(aA{pX0&URv2kKdekL<@ zsdD@wA?0~6_q^mRN zJ-ON!F>A&yBATCVNY&R@!Qd=?DD}qORWp}x`-5K95}8sxa^P03?hqM9x9PTcnFD|U zF)Q5VWa{}#Y81o79t7!dY4iekBNS5k3}{#NGJ@ZRFaXcf@5dY5*C92&SXSfqH{Klp z#15kl21f`$$t;6T%ac5c^D8j&)gPQ$Np8*0p28@xs;oVkq9P6Z0mIQ|H2=Oz8)b9L zFnP{$Wv9>zG(pll*G_vcu@9F1&lCL!_Y#!0Zgx84_)cSY9;=>Sy&Wdflx=Qg^6I%z z+K`Pyyo6pXYaMQmqM{Y)k(<*7`}3!`6_1^wl+i4!KX`}5qVyW#k~ZWnlGUH`rl{z|#og&2ZZrhd z@>nz$D@K;f{+@`ecgbJDk8n7+@XeqW%93HBGD-p4>K~EflaXSDIcjCte&{29?fZS> zjQDB%f@A24i_Pkh{M)|niy})S@umuj)fYDq(2g}fTqSw4Fv%yLoj%KNkabdQ3I@RQ z#`glL(>t%eAS2vwZeuEnA!r7~Q4t62PY75RFA%>_6l5lHD6g2>4ztj$?mG2fr;#kJ9ltNHAEHf1Hc_&I+4?Jf*2!m<4`WRmdVO{c6Lci8nfhIurItki0q z2uqvlHY^UhEx)A5;uFOK1zVbHIRORe2s(fJi@(0`A!6a@n-69oBTVCdVAcl_DJV1| z%q)}O~01yRqlo60#D?r>c; zw+LDyQ`G(7F=QFmH*!4LTt-j1WopoQU(x8L0A)S=#>c--+k z^udb{OAvnC$hP?qA?|;DVuM-?>5Z76yH^H5F(Ct5>GQx@gt=sNm`AXW{vPHo1iV5; z#ok__P;nbv@k`UtUKpFwzxEV2S7j#S`hq5#$nphU#7%U`^q=TCCy|BsQ-4FWtBt;5 z5lg1xr(5kyc?cHPh}yK|%6IGmt7b}`hwegg#Asf33g1ie(%{k+weflY)$~f^?alCW z3Gb~c@R~cF7(=6Nq48~_B$G3cse9(DMDYpE@GkPlg0VFI1kWqJ^g=cJ62&PRLhIb+ zm9>n?^rv2cmb9T?m(9)KPY}> zV4amq=dG7byHc&iu^pn6qT-Y;VH`K2!>?EByg}S+q;)QTExZ73a8CFdAZ= zJDXGG@@Iw=tw1dIp%(1h;qUcMoBBk!GcA2$Bi+X9Ktf1@liH=D_2+3}^B!;oU;3y` z=}ws9(;*@fhX$ad859=^%+iyXGPg5&*T@I3f1_5{`XqxL5b=#lJSd2pHV;2^H_PuN-b1#X@v zOalvs?7N1WkriZlHBCXYF1Tr?a#o(`b_lv+E1(*n->RG95fP&2W4f`bY91$coOo`HUymltOKZ<&U~l7h5Gx^l_0;EuM9dv$dUd zK~xZLFR84Cacx*Ek8$Zksq8$;4IL=KR16!op&U&=gpqup${L3ys$GY-Qs=uZ>W2=t z_1kvZfrUYeCl2OfN8n2>-7Bik8cHh4#_VnJ=f;IA#K;_m*y8k9;>O`-@~)iox5GQv zNkrMGH67FuX&IV%?@cb^|E~vZ4$kfv+OgvYsm_jtZq3UE6D?dE>H+1gpI1IDK@esM zl1r3dIVRALnR0w5Nm5O9F0u(gf6%>j{B#&9-GPPm;fY7TM_HCB22GSXj$m zIkO=TG>?IKTN% z?c5tk4D78k46*mQlO2wC@{)hJm;bQv9VT@=z z52g=a90@rXmXpz#?Z{25(jQB~|EVZ3Y~n*&;6Oq+ROAg2xx;SrIVN^-OBviXSHSB| zv5#ug&T8x1ywY0FW4q<8!nIy%<#?C;3CiLMT*#49ASeKxP1_N{=!O0A%p_lzh6l$y ze!EwGvsK#qM%MKRul!?BrY99K<{Tm4{}q+Df<>4fBszF0YKoMst2ro|#k5W^8UWbG zIWVC1jj{Babn;2P1?sQt6=H8A=rqY=xAmqZ8~HkNm<4ASTvX?aL zJp+Dt`u&BjUQY|nz5v4gv-89i4`OfK1;7>0X8%qVH0{0@lN@26Y{PIlLt- zylUx##ov|6-<=}cEN)?w^JNZD9MogG!z=3De$lR(1Pwr7?+&*83cSRU`=5xfH@4gJS2!c?+l*BK97R?^pnANZD!J_tNc>Xs8?G6}~NF?V)3)^~O~!P}S1jwV$iV&PrqN z19JV{h(0sF+8B&j1suhG7Hr9xgr3T}0*K%m6YBXuB_s-czIp?o##_7*xDREqzN%Kl zNV~KYHb;%_QaBn$#MmE-5ydy0RzFp<&N2z7W6l};d&jcLWiu3{2FzI$wUN$=Pmbn1 zt@UR-n9lI58SNOkyiiEwk3A`wK%0Bq(Qj{&;i7n$g3QS&xkqs1-Os=tsfxs_fDZ3k z%OF3R$~XVaGpc<|xP@|S9^rlClUXgbtoDimos*dZCa#%~)hmQyw`AhQel4D89Ttd8 zWE(M3K_JLO`6yN@E!l4tbOw*w3)Bo-KelG7!Qf8A1e&^1o^Vh7Kg#hI4hJ~T+lZ5$ z>>{^w9IE&>1MSK*rT526L2}x+VSX#FpP2s?KaVjN;%*R;Uh&zxhJzk^Wdjo~<@glj zsPoY!vVF`RU-!9}go-G%Hg-D-GlDs4CK0^q*bw-Eg0l=BYB<6}F0fd1Iw zB1tF27B$%!k8B!w06En(S&jG9p)tgw^RK^{{?KY#GDt-e-r2iG^QusFbA|gAmpFj( zz1?ajcKCvf?=rs=(L75&ubCZyluky9Zq%~J3IOmA#BV>2anKf%kD0_dxA~g`YMC}H zzWFYK31-LFX}uTf2NzcsLzOjQq3}F$->x8uzYq{cpxex}8k&AjBKbD0*LHoC;d=Kt zeLV?otF=HCNYQWjph}pR+-9S>{MD)-MpvZiri%9AoKj8Ewd3wq`k^qcpgS816j{9S zAFmtRD;^$JwD`Sy4fgJ2-SP%g*LrgNgtXAT_xyf!J%L`ZD}?#jI3+5y0*qW3nBDfG zuid}anyzn$dw#Fkr{lbSPK(oAPYXj>faM7o&?0fP;2?Labw>2HtSqW%u^6pX+n&o( zskce>)_;SWUHi@*mKtvv@ z;LaGT)#$z^4A-ETCcp$7{>fjh#+y(`YsB+xs7@yR1Pp$h7gSXP_%~OWw`d>*+|H5R z+ahN;BqWl??#FnMwAS&rPA(6%_Cr zf#xf>)S62xWGvUC+1#iXJwap&WlQ{vx93Q%$FrTIRfEv>QNVa(RhNhH=_%|w-2Z;j zg>-WT2OWJ(n#q_B%?4{}cyaHHAYejfy1>75=6G918Z?uIS3P%&b*V^IPcq2RX14O6 z6%67B^|GtUeCt$4`MgLsR&L;yUZh92+o#!Z`>-V4fa(J4ejdWqL6Uh3R!x~|1qA~k zC?{j@0aPoHP#el+L>b~bL;NTCuo>6#(jM$^qx+GxuIiq;Z}vR2kF9Ap$mWQ;L~ueZjbYUtRN z_Mn)W%w^WIvCQ#eCN82;?SS`r!_hrZb7dgFvb`@Aa!8T(l-r zMey26L9@dxr@EgpBfm$4aL)pn<>}zMo-JH#8g3J4%C_cRSlX`Z2J~PgiLwi5oXXrH zZ@rm{UUp$+{}3@m7j&dAV0eYbIf;A)Y6wpyDMnB6HSY}G^TS)%Bxi+y*4VYuJ*6Zuv{ zzCpNgC%HyRX!s@|DpK}y+gN9#%HwmG!eodpS4t|$x0P7{iA3*%Q18Ussq-}u0bghs zLY(c)DTR5a9`%|+;IIHK>?S23MMzX;-BWU%IBomnM12~=oS!!&rEv;Uj8D86G9XK literal 0 HcmV?d00001 diff --git a/vendor/cloud.google.com/go/language/apiv1/AnalyzeSentiment_smoke_test.go b/vendor/cloud.google.com/go/language/apiv1/AnalyzeSentiment_smoke_test.go new file mode 100644 index 0000000..1233913 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/AnalyzeSentiment_smoke_test.go @@ -0,0 +1,73 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestLanguageServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var content string = "Hello, world!" + var type_ languagepb.Document_Type = languagepb.Document_PLAIN_TEXT + var document = &languagepb.Document{ + Source: &languagepb.Document_Content{ + Content: content, + }, + Type: type_, + } + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + if _, err := c.AnalyzeSentiment(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/language/apiv1/doc.go b/vendor/cloud.google.com/go/language/apiv1/doc.go new file mode 100644 index 0000000..7685ff3 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/doc.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package language is an auto-generated package for the +// Google Cloud Natural Language API. + +// +// Google Cloud Natural Language API provides natural language understanding +// technologies to developers. Examples include sentiment analysis, entity +// recognition, and text annotations. +package language // import "cloud.google.com/go/language/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/language/apiv1/language_client.go b/vendor/cloud.google.com/go/language/apiv1/language_client.go new file mode 100644 index 0000000..2244d70 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/language_client.go @@ -0,0 +1,229 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnalyzeSentiment []gax.CallOption + AnalyzeEntities []gax.CallOption + AnalyzeEntitySentiment []gax.CallOption + AnalyzeSyntax []gax.CallOption + ClassifyText []gax.CallOption + AnnotateText []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("language.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + AnalyzeSentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeEntities: retry[[2]string{"default", "idempotent"}], + AnalyzeEntitySentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeSyntax: retry[[2]string{"default", "idempotent"}], + ClassifyText: retry[[2]string{"default", "idempotent"}], + AnnotateText: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Natural Language API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client languagepb.LanguageServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new language service client. +// +// Provides text analysis operations such as sentiment analysis and entity +// recognition. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: languagepb.NewLanguageServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnalyzeSentiment analyzes the sentiment of the provided text. +func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSentimentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeSentiment[0:len(c.CallOptions.AnalyzeSentiment):len(c.CallOptions.AnalyzeSentiment)], opts...) + var resp *languagepb.AnalyzeSentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeSentiment(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntities finds named entities (currently proper names and common nouns) in the text +// along with entity types, salience, mentions for each entity, and +// other properties. +func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitiesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeEntities[0:len(c.CallOptions.AnalyzeEntities):len(c.CallOptions.AnalyzeEntities)], opts...) + var resp *languagepb.AnalyzeEntitiesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeEntities(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntitySentiment finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes +// sentiment associated with each entity and its mentions. +func (c *Client) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitySentimentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeEntitySentiment[0:len(c.CallOptions.AnalyzeEntitySentiment):len(c.CallOptions.AnalyzeEntitySentiment)], opts...) + var resp *languagepb.AnalyzeEntitySentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeEntitySentiment(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeSyntax analyzes the syntax of the text and provides sentence boundaries and +// tokenization along with part of speech tags, dependency trees, and other +// properties. +func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSyntaxResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeSyntax[0:len(c.CallOptions.AnalyzeSyntax):len(c.CallOptions.AnalyzeSyntax)], opts...) + var resp *languagepb.AnalyzeSyntaxResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeSyntax(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ClassifyText classifies a document into categories. +func (c *Client) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest, opts ...gax.CallOption) (*languagepb.ClassifyTextResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ClassifyText[0:len(c.CallOptions.ClassifyText):len(c.CallOptions.ClassifyText)], opts...) + var resp *languagepb.ClassifyTextResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ClassifyText(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnnotateText a convenience method that provides all the features that analyzeSentiment, +// analyzeEntities, and analyzeSyntax provide in one call. +func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest, opts ...gax.CallOption) (*languagepb.AnnotateTextResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnnotateText[0:len(c.CallOptions.AnnotateText):len(c.CallOptions.AnnotateText)], opts...) + var resp *languagepb.AnnotateTextResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateText(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go b/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go new file mode 100644 index 0000000..893d203 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go @@ -0,0 +1,141 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language_test + +import ( + "cloud.google.com/go/language/apiv1" + "golang.org/x/net/context" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnalyzeSentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntities() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitiesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntities(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntitySentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitySentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntitySentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeSyntax() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSyntaxRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSyntax(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ClassifyText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.ClassifyTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ClassifyText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnnotateText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnnotateTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnnotateText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/language/apiv1/mock_test.go b/vendor/cloud.google.com/go/language/apiv1/mock_test.go new file mode 100644 index 0000000..c046858 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/mock_test.go @@ -0,0 +1,518 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockLanguageServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + languagepb.LanguageServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockLanguageServer) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitiesResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest) (*languagepb.AnalyzeEntitySentimentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitySentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSyntaxResponse), nil +} + +func (s *mockLanguageServer) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest) (*languagepb.ClassifyTextResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.ClassifyTextResponse), nil +} + +func (s *mockLanguageServer) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnnotateTextResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockLanguage mockLanguageServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + languagepb.RegisterLanguageServiceServer(serv, &mockLanguage) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestLanguageServiceAnalyzeSentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSentimentError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntities(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitiesResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntitySentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitySentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitySentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntitySentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitySentimentError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitySentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntitySentiment(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeSyntax(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSyntaxResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceClassifyText(t *testing.T) { + var expectedResponse *languagepb.ClassifyTextResponse = &languagepb.ClassifyTextResponse{} + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.ClassifyTextRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ClassifyText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceClassifyTextError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.ClassifyTextRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ClassifyText(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnnotateText(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnnotateTextResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnnotateTextError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/AnalyzeSentiment_smoke_test.go b/vendor/cloud.google.com/go/language/apiv1beta2/AnalyzeSentiment_smoke_test.go new file mode 100644 index 0000000..7c2b1bd --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/AnalyzeSentiment_smoke_test.go @@ -0,0 +1,73 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestLanguageServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var content string = "Hello, world!" + var type_ languagepb.Document_Type = languagepb.Document_PLAIN_TEXT + var document = &languagepb.Document{ + Source: &languagepb.Document_Content{ + Content: content, + }, + Type: type_, + } + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + if _, err := c.AnalyzeSentiment(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/doc.go b/vendor/cloud.google.com/go/language/apiv1beta2/doc.go new file mode 100644 index 0000000..b6802e7 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/doc.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package language is an auto-generated package for the +// Google Cloud Natural Language API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Google Cloud Natural Language API provides natural language understanding +// technologies to developers. Examples include sentiment analysis, entity +// recognition, and text annotations. +package language // import "cloud.google.com/go/language/apiv1beta2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/language_client.go b/vendor/cloud.google.com/go/language/apiv1beta2/language_client.go new file mode 100644 index 0000000..9859f48 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/language_client.go @@ -0,0 +1,229 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnalyzeSentiment []gax.CallOption + AnalyzeEntities []gax.CallOption + AnalyzeEntitySentiment []gax.CallOption + AnalyzeSyntax []gax.CallOption + ClassifyText []gax.CallOption + AnnotateText []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("language.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + AnalyzeSentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeEntities: retry[[2]string{"default", "idempotent"}], + AnalyzeEntitySentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeSyntax: retry[[2]string{"default", "idempotent"}], + ClassifyText: retry[[2]string{"default", "idempotent"}], + AnnotateText: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Natural Language API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client languagepb.LanguageServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new language service client. +// +// Provides text analysis operations such as sentiment analysis and entity +// recognition. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: languagepb.NewLanguageServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnalyzeSentiment analyzes the sentiment of the provided text. +func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSentimentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeSentiment[0:len(c.CallOptions.AnalyzeSentiment):len(c.CallOptions.AnalyzeSentiment)], opts...) + var resp *languagepb.AnalyzeSentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeSentiment(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntities finds named entities (currently proper names and common nouns) in the text +// along with entity types, salience, mentions for each entity, and +// other properties. +func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitiesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeEntities[0:len(c.CallOptions.AnalyzeEntities):len(c.CallOptions.AnalyzeEntities)], opts...) + var resp *languagepb.AnalyzeEntitiesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeEntities(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntitySentiment finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes +// sentiment associated with each entity and its mentions. +func (c *Client) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitySentimentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeEntitySentiment[0:len(c.CallOptions.AnalyzeEntitySentiment):len(c.CallOptions.AnalyzeEntitySentiment)], opts...) + var resp *languagepb.AnalyzeEntitySentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeEntitySentiment(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeSyntax analyzes the syntax of the text and provides sentence boundaries and +// tokenization along with part of speech tags, dependency trees, and other +// properties. +func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSyntaxResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeSyntax[0:len(c.CallOptions.AnalyzeSyntax):len(c.CallOptions.AnalyzeSyntax)], opts...) + var resp *languagepb.AnalyzeSyntaxResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeSyntax(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ClassifyText classifies a document into categories. +func (c *Client) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest, opts ...gax.CallOption) (*languagepb.ClassifyTextResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ClassifyText[0:len(c.CallOptions.ClassifyText):len(c.CallOptions.ClassifyText)], opts...) + var resp *languagepb.ClassifyTextResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ClassifyText(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnnotateText a convenience method that provides all syntax, sentiment, entity, and +// classification features in one call. +func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest, opts ...gax.CallOption) (*languagepb.AnnotateTextResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnnotateText[0:len(c.CallOptions.AnnotateText):len(c.CallOptions.AnnotateText)], opts...) + var resp *languagepb.AnnotateTextResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateText(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/language_client_example_test.go b/vendor/cloud.google.com/go/language/apiv1beta2/language_client_example_test.go new file mode 100644 index 0000000..d427900 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/language_client_example_test.go @@ -0,0 +1,141 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language_test + +import ( + "cloud.google.com/go/language/apiv1beta2" + "golang.org/x/net/context" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnalyzeSentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntities() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitiesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntities(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntitySentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitySentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntitySentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeSyntax() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSyntaxRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSyntax(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ClassifyText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.ClassifyTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ClassifyText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnnotateText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnnotateTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnnotateText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/mock_test.go b/vendor/cloud.google.com/go/language/apiv1beta2/mock_test.go new file mode 100644 index 0000000..c6974ae --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/mock_test.go @@ -0,0 +1,518 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockLanguageServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + languagepb.LanguageServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockLanguageServer) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitiesResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest) (*languagepb.AnalyzeEntitySentimentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitySentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSyntaxResponse), nil +} + +func (s *mockLanguageServer) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest) (*languagepb.ClassifyTextResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.ClassifyTextResponse), nil +} + +func (s *mockLanguageServer) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnnotateTextResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockLanguage mockLanguageServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + languagepb.RegisterLanguageServiceServer(serv, &mockLanguage) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestLanguageServiceAnalyzeSentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSentimentError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntities(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitiesResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntitySentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitySentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitySentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntitySentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitySentimentError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitySentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntitySentiment(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeSyntax(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSyntaxResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceClassifyText(t *testing.T) { + var expectedResponse *languagepb.ClassifyTextResponse = &languagepb.ClassifyTextResponse{} + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.ClassifyTextRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ClassifyText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceClassifyTextError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.ClassifyTextRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ClassifyText(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnnotateText(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnnotateTextResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnnotateTextError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/license_test.go b/vendor/cloud.google.com/go/license_test.go new file mode 100644 index 0000000..f93e9e0 --- /dev/null +++ b/vendor/cloud.google.com/go/license_test.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +var sentinels = []string{ + "Copyright", + "Google", + `Licensed under the Apache License, Version 2.0 (the "License");`, +} + +func TestLicense(t *testing.T) { + t.Parallel() + err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if ext := filepath.Ext(path); ext != ".go" && ext != ".proto" { + return nil + } + if strings.HasSuffix(path, ".pb.go") { + // .pb.go files are generated from the proto files. + // .proto files must have license headers. + return nil + } + if path == "bigtable/cmd/cbt/cbtdoc.go" { + // Automatically generated. + return nil + } + + src, err := ioutil.ReadFile(path) + if err != nil { + return nil + } + src = src[:140] // Ensure all of the sentinel values are at the top of the file. + + // Find license + for _, sentinel := range sentinels { + if !bytes.Contains(src, []byte(sentinel)) { + t.Errorf("%v: license header not present. want %q", path, sentinel) + return nil + } + } + + return nil + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/README.md b/vendor/cloud.google.com/go/logging/apiv2/README.md new file mode 100644 index 0000000..d2d9a17 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/README.md @@ -0,0 +1,11 @@ +Auto-generated logging v2 clients +================================= + +This package includes auto-generated clients for the logging v2 API. + +Use the handwritten logging client (in the parent directory, +cloud.google.com/go/logging) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. + + diff --git a/vendor/cloud.google.com/go/logging/apiv2/WriteLogEntries_smoke_test.go b/vendor/cloud.google.com/go/logging/apiv2/WriteLogEntries_smoke_test.go new file mode 100644 index 0000000..08d675a --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/WriteLogEntries_smoke_test.go @@ -0,0 +1,68 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestLoggingServiceV2Smoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var entries []*loggingpb.LogEntry = nil + var formattedLogName string = fmt.Sprintf("projects/%s/logs/%s", projectId, "test-"+strconv.FormatInt(time.Now().UnixNano(), 10)+"") + var request = &loggingpb.WriteLogEntriesRequest{ + Entries: entries, + LogName: formattedLogName, + } + + if _, err := c.WriteLogEntries(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go new file mode 100644 index 0000000..bad7a3a --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -0,0 +1,421 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ConfigCallOptions contains the retry settings for each method of ConfigClient. +type ConfigCallOptions struct { + ListSinks []gax.CallOption + GetSink []gax.CallOption + CreateSink []gax.CallOption + UpdateSink []gax.CallOption + DeleteSink []gax.CallOption + ListExclusions []gax.CallOption + GetExclusion []gax.CallOption + CreateExclusion []gax.CallOption + UpdateExclusion []gax.CallOption + DeleteExclusion []gax.CallOption +} + +func defaultConfigClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultConfigCallOptions() *ConfigCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &ConfigCallOptions{ + ListSinks: retry[[2]string{"default", "idempotent"}], + GetSink: retry[[2]string{"default", "idempotent"}], + CreateSink: retry[[2]string{"default", "non_idempotent"}], + UpdateSink: retry[[2]string{"default", "non_idempotent"}], + DeleteSink: retry[[2]string{"default", "idempotent"}], + ListExclusions: retry[[2]string{"default", "idempotent"}], + GetExclusion: retry[[2]string{"default", "idempotent"}], + CreateExclusion: retry[[2]string{"default", "non_idempotent"}], + UpdateExclusion: retry[[2]string{"default", "non_idempotent"}], + DeleteExclusion: retry[[2]string{"default", "idempotent"}], + } +} + +// ConfigClient is a client for interacting with Stackdriver Logging API. +type ConfigClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + configClient loggingpb.ConfigServiceV2Client + + // The call options for this service. + CallOptions *ConfigCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewConfigClient creates a new config service v2 client. +// +// Service for configuring sinks used to export log entries outside of +// Stackdriver Logging. +func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ConfigClient{ + conn: conn, + CallOptions: defaultConfigCallOptions(), + + configClient: loggingpb.NewConfigServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ConfigClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ConfigClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListSinks lists sinks. +func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...) + it := &LogSinkIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { + var resp *loggingpb.ListSinksResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.ListSinks(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Sinks, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetSink gets a sink. +func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.GetSink(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateSink creates a sink that exports specified log entries to a destination. The +// export of newly-ingested log entries begins immediately, unless the sink's +// writer_identity is not permitted to write to the destination. A sink can +// export log entries only from the resource owning the sink. +func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.CreateSink(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSink updates a sink. This method replaces the following fields in the existing +// sink with values from the new sink: destination, and filter. +// The updated sink might also have a new writer_identity; see the +// unique_writer_identity field. +func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.UpdateSink(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that +// service account is also deleted. +func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.configClient.DeleteSink(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListExclusions lists all the exclusions in a parent resource. +func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest, opts ...gax.CallOption) *LogExclusionIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListExclusions[0:len(c.CallOptions.ListExclusions):len(c.CallOptions.ListExclusions)], opts...) + it := &LogExclusionIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogExclusion, string, error) { + var resp *loggingpb.ListExclusionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.ListExclusions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Exclusions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetExclusion gets the description of an exclusion. +func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetExclusion[0:len(c.CallOptions.GetExclusion):len(c.CallOptions.GetExclusion)], opts...) + var resp *loggingpb.LogExclusion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.GetExclusion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateExclusion creates a new exclusion in a specified parent resource. +// Only log entries belonging to that resource can be excluded. +// You can have up to 10 exclusions in a resource. +func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateExclusion[0:len(c.CallOptions.CreateExclusion):len(c.CallOptions.CreateExclusion)], opts...) + var resp *loggingpb.LogExclusion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.CreateExclusion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateExclusion changes one or more properties of an existing exclusion. +func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateExclusion[0:len(c.CallOptions.UpdateExclusion):len(c.CallOptions.UpdateExclusion)], opts...) + var resp *loggingpb.LogExclusion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.UpdateExclusion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteExclusion deletes an exclusion. +func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteExclusion[0:len(c.CallOptions.DeleteExclusion):len(c.CallOptions.DeleteExclusion)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.configClient.DeleteExclusion(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// LogExclusionIterator manages a stream of *loggingpb.LogExclusion. +type LogExclusionIterator struct { + items []*loggingpb.LogExclusion + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogExclusion, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogExclusionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogExclusionIterator) Next() (*loggingpb.LogExclusion, error) { + var item *loggingpb.LogExclusion + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogExclusionIterator) bufLen() int { + return len(it.items) +} + +func (it *LogExclusionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// LogSinkIterator manages a stream of *loggingpb.LogSink. +type LogSinkIterator struct { + items []*loggingpb.LogSink + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogSink, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogSinkIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) { + var item *loggingpb.LogSink + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogSinkIterator) bufLen() int { + return len(it.items) +} + +func (it *LogSinkIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go new file mode 100644 index 0000000..a7a36dc --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go @@ -0,0 +1,222 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewConfigClient() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleConfigClient_ListSinks() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListSinksRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSinks(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleConfigClient_GetSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_CreateSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_UpdateSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_DeleteSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteSinkRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSink(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleConfigClient_ListExclusions() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListExclusionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListExclusions(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleConfigClient_GetExclusion() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetExclusionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetExclusion(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_CreateExclusion() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateExclusionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateExclusion(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_UpdateExclusion() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateExclusionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateExclusion(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_DeleteExclusion() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteExclusionRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteExclusion(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go new file mode 100644 index 0000000..b8087b0 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package logging is an auto-generated package for the +// Stackdriver Logging API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Writes log entries and manages your Stackdriver Logging configuration. +// +// Use the client at cloud.google.com/go/logging in preference to this. +package logging // import "cloud.google.com/go/logging/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go new file mode 100644 index 0000000..90dae0a --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -0,0 +1,413 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + DeleteLog []gax.CallOption + WriteLogEntries []gax.CallOption + ListLogEntries []gax.CallOption + ListMonitoredResourceDescriptors []gax.CallOption + ListLogs []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + {"list", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + DeleteLog: retry[[2]string{"default", "idempotent"}], + WriteLogEntries: retry[[2]string{"default", "non_idempotent"}], + ListLogEntries: retry[[2]string{"list", "idempotent"}], + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + ListLogs: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Logging API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client loggingpb.LoggingServiceV2Client + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new logging service v2 client. +// +// Service for ingesting and querying logs. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: loggingpb.NewLoggingServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// DeleteLog deletes all the log entries in a log. +// The log reappears if it receives new entries. +// Log entries written shortly before the delete operation might not be +// deleted. +func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteLog(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// WriteLogEntries ## Log entry resources +// +// Writes log entries to Stackdriver Logging. This API method is the +// only way to send log entries to Stackdriver Logging. This method +// is used, directly or indirectly, by the Stackdriver Logging agent +// (fluentd) and all logging libraries configured to use Stackdriver +// Logging. +func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...) + var resp *loggingpb.WriteLogEntriesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.WriteLogEntries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries from +// Stackdriver Logging. For ways to export log entries, see +// Exporting Logs (at /logging/docs/export). +func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...) + it := &LogEntryIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { + var resp *loggingpb.ListLogEntriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListLogEntries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Entries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver +// Logging. +func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *loggingpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListLogs lists the logs in projects, organizations, folders, or billing accounts. +// Only logs that have entries are listed. +func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *loggingpb.ListLogsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListLogs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.LogNames, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// LogEntryIterator manages a stream of *loggingpb.LogEntry. +type LogEntryIterator struct { + items []*loggingpb.LogEntry + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogEntry, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogEntryIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) { + var item *loggingpb.LogEntry + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogEntryIterator) bufLen() int { + return len(it.items) +} + +func (it *LogEntryIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go new file mode 100644 index 0000000..232736d --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go @@ -0,0 +1,140 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_DeleteLog() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteLogRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteLog(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_WriteLogEntries() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.WriteLogEntriesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.WriteLogEntries(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListLogEntries() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogEntriesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogEntries(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListMonitoredResourceDescriptors() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListMonitoredResourceDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMonitoredResourceDescriptors(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListLogs() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go new file mode 100644 index 0000000..e2f9661 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -0,0 +1,264 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// MetricsCallOptions contains the retry settings for each method of MetricsClient. +type MetricsCallOptions struct { + ListLogMetrics []gax.CallOption + GetLogMetric []gax.CallOption + CreateLogMetric []gax.CallOption + UpdateLogMetric []gax.CallOption + DeleteLogMetric []gax.CallOption +} + +func defaultMetricsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultMetricsCallOptions() *MetricsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &MetricsCallOptions{ + ListLogMetrics: retry[[2]string{"default", "idempotent"}], + GetLogMetric: retry[[2]string{"default", "idempotent"}], + CreateLogMetric: retry[[2]string{"default", "non_idempotent"}], + UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}], + DeleteLogMetric: retry[[2]string{"default", "idempotent"}], + } +} + +// MetricsClient is a client for interacting with Stackdriver Logging API. +type MetricsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricsClient loggingpb.MetricsServiceV2Client + + // The call options for this service. + CallOptions *MetricsCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewMetricsClient creates a new metrics service v2 client. +// +// Service for configuring logs-based metrics. +func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricsClient{ + conn: conn, + CallOptions: defaultMetricsCallOptions(), + + metricsClient: loggingpb.NewMetricsServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListLogMetrics lists logs-based metrics. +func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...) + it := &LogMetricIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { + var resp *loggingpb.ListLogMetricsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricsClient.ListLogMetrics(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Metrics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetLogMetric gets a logs-based metric. +func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricsClient.GetLogMetric(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateLogMetric creates a logs-based metric. +func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricsClient.CreateLogMetric(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateLogMetric creates or updates a logs-based metric. +func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricsClient.UpdateLogMetric(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteLogMetric deletes a logs-based metric. +func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricsClient.DeleteLogMetric(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// LogMetricIterator manages a stream of *loggingpb.LogMetric. +type LogMetricIterator struct { + items []*loggingpb.LogMetric + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogMetric, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogMetricIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) { + var item *loggingpb.LogMetric + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogMetricIterator) bufLen() int { + return len(it.items) +} + +func (it *LogMetricIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go new file mode 100644 index 0000000..d69e1e9 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go @@ -0,0 +1,128 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewMetricsClient() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleMetricsClient_ListLogMetrics() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogMetricsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogMetrics(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricsClient_GetLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_CreateLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_UpdateLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_DeleteLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteLogMetricRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/mock_test.go b/vendor/cloud.google.com/go/logging/apiv2/mock_test.go new file mode 100644 index 0000000..765cc94 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/mock_test.go @@ -0,0 +1,1677 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockLoggingServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.LoggingServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockLoggingServer) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockLoggingServer) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.WriteLogEntriesResponse), nil +} + +func (s *mockLoggingServer) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) (*loggingpb.ListLogEntriesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogEntriesResponse), nil +} + +func (s *mockLoggingServer) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) (*loggingpb.ListMonitoredResourceDescriptorsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListMonitoredResourceDescriptorsResponse), nil +} + +func (s *mockLoggingServer) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest) (*loggingpb.ListLogsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogsResponse), nil +} + +type mockConfigServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.ConfigServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockConfigServer) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) (*loggingpb.ListSinksResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListSinksResponse), nil +} + +func (s *mockConfigServer) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockConfigServer) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest) (*loggingpb.ListExclusionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListExclusionsResponse), nil +} + +func (s *mockConfigServer) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest) (*loggingpb.LogExclusion, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogExclusion), nil +} + +func (s *mockConfigServer) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest) (*loggingpb.LogExclusion, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogExclusion), nil +} + +func (s *mockConfigServer) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest) (*loggingpb.LogExclusion, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogExclusion), nil +} + +func (s *mockConfigServer) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +type mockMetricsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.MetricsServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockMetricsServer) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) (*loggingpb.ListLogMetricsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogMetricsResponse), nil +} + +func (s *mockMetricsServer) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockLogging mockLoggingServer + mockConfig mockConfigServer + mockMetrics mockMetricsServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + loggingpb.RegisterLoggingServiceV2Server(serv, &mockLogging) + loggingpb.RegisterConfigServiceV2Server(serv, &mockConfig) + loggingpb.RegisterMetricsServiceV2Server(serv, &mockMetrics) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestLoggingServiceV2DeleteLog(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var formattedLogName string = fmt.Sprintf("projects/%s/logs/%s", "[PROJECT]", "[LOG]") + var request = &loggingpb.DeleteLogRequest{ + LogName: formattedLogName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLog(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestLoggingServiceV2DeleteLogError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var formattedLogName string = fmt.Sprintf("projects/%s/logs/%s", "[PROJECT]", "[LOG]") + var request = &loggingpb.DeleteLogRequest{ + LogName: formattedLogName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLog(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestLoggingServiceV2WriteLogEntries(t *testing.T) { + var expectedResponse *loggingpb.WriteLogEntriesResponse = &loggingpb.WriteLogEntriesResponse{} + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var entries []*loggingpb.LogEntry = nil + var request = &loggingpb.WriteLogEntriesRequest{ + Entries: entries, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.WriteLogEntries(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2WriteLogEntriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var entries []*loggingpb.LogEntry = nil + var request = &loggingpb.WriteLogEntriesRequest{ + Entries: entries, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.WriteLogEntries(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListLogEntries(t *testing.T) { + var nextPageToken string = "" + var entriesElement *loggingpb.LogEntry = &loggingpb.LogEntry{} + var entries = []*loggingpb.LogEntry{entriesElement} + var expectedResponse = &loggingpb.ListLogEntriesResponse{ + NextPageToken: nextPageToken, + Entries: entries, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var resourceNames []string = nil + var request = &loggingpb.ListLogEntriesRequest{ + ResourceNames: resourceNames, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogEntries(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Entries[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListLogEntriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var resourceNames []string = nil + var request = &loggingpb.ListLogEntriesRequest{ + ResourceNames: resourceNames, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogEntries(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListMonitoredResourceDescriptors(t *testing.T) { + var nextPageToken string = "" + var resourceDescriptorsElement *monitoredrespb.MonitoredResourceDescriptor = &monitoredrespb.MonitoredResourceDescriptor{} + var resourceDescriptors = []*monitoredrespb.MonitoredResourceDescriptor{resourceDescriptorsElement} + var expectedResponse = &loggingpb.ListMonitoredResourceDescriptorsResponse{ + NextPageToken: nextPageToken, + ResourceDescriptors: resourceDescriptors, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var request *loggingpb.ListMonitoredResourceDescriptorsRequest = &loggingpb.ListMonitoredResourceDescriptorsRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ResourceDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListMonitoredResourceDescriptorsError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var request *loggingpb.ListMonitoredResourceDescriptorsRequest = &loggingpb.ListMonitoredResourceDescriptorsRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListLogs(t *testing.T) { + var nextPageToken string = "" + var logNamesElement string = "logNamesElement-1079688374" + var logNames = []string{logNamesElement} + var expectedResponse = &loggingpb.ListLogsResponse{ + NextPageToken: nextPageToken, + LogNames: logNames, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &loggingpb.ListLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.LogNames[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListLogsError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &loggingpb.ListLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2ListSinks(t *testing.T) { + var nextPageToken string = "" + var sinksElement *loggingpb.LogSink = &loggingpb.LogSink{} + var sinks = []*loggingpb.LogSink{sinksElement} + var expectedResponse = &loggingpb.ListSinksResponse{ + NextPageToken: nextPageToken, + Sinks: sinks, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &loggingpb.ListSinksRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSinks(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Sinks[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2ListSinksError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &loggingpb.ListSinksRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSinks(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2GetSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var includeChildren bool = true + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + IncludeChildren: includeChildren, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = fmt.Sprintf("projects/%s/sinks/%s", "[PROJECT]", "[SINK]") + var request = &loggingpb.GetSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2GetSinkError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedSinkName string = fmt.Sprintf("projects/%s/sinks/%s", "[PROJECT]", "[SINK]") + var request = &loggingpb.GetSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSink(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2CreateSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var includeChildren bool = true + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + IncludeChildren: includeChildren, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.CreateSinkRequest{ + Parent: formattedParent, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2CreateSinkError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.CreateSinkRequest{ + Parent: formattedParent, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSink(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2UpdateSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var includeChildren bool = true + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + IncludeChildren: includeChildren, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = fmt.Sprintf("projects/%s/sinks/%s", "[PROJECT]", "[SINK]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.UpdateSinkRequest{ + SinkName: formattedSinkName, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2UpdateSinkError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedSinkName string = fmt.Sprintf("projects/%s/sinks/%s", "[PROJECT]", "[SINK]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.UpdateSinkRequest{ + SinkName: formattedSinkName, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSink(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2DeleteSink(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = fmt.Sprintf("projects/%s/sinks/%s", "[PROJECT]", "[SINK]") + var request = &loggingpb.DeleteSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestConfigServiceV2DeleteSinkError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedSinkName string = fmt.Sprintf("projects/%s/sinks/%s", "[PROJECT]", "[SINK]") + var request = &loggingpb.DeleteSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSink(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestConfigServiceV2ListExclusions(t *testing.T) { + var nextPageToken string = "" + var exclusionsElement *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var exclusions = []*loggingpb.LogExclusion{exclusionsElement} + var expectedResponse = &loggingpb.ListExclusionsResponse{ + NextPageToken: nextPageToken, + Exclusions: exclusions, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &loggingpb.ListExclusionsRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListExclusions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Exclusions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2ListExclusionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &loggingpb.ListExclusionsRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListExclusions(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2GetExclusion(t *testing.T) { + var name2 string = "name2-1052831874" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var disabled bool = true + var expectedResponse = &loggingpb.LogExclusion{ + Name: name2, + Description: description, + Filter: filter, + Disabled: disabled, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/exclusions/%s", "[PROJECT]", "[EXCLUSION]") + var request = &loggingpb.GetExclusionRequest{ + Name: formattedName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetExclusion(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2GetExclusionError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/exclusions/%s", "[PROJECT]", "[EXCLUSION]") + var request = &loggingpb.GetExclusionRequest{ + Name: formattedName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetExclusion(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2CreateExclusion(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var disabled bool = true + var expectedResponse = &loggingpb.LogExclusion{ + Name: name, + Description: description, + Filter: filter, + Disabled: disabled, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var request = &loggingpb.CreateExclusionRequest{ + Parent: formattedParent, + Exclusion: exclusion, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateExclusion(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2CreateExclusionError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var request = &loggingpb.CreateExclusionRequest{ + Parent: formattedParent, + Exclusion: exclusion, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateExclusion(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2UpdateExclusion(t *testing.T) { + var name2 string = "name2-1052831874" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var disabled bool = true + var expectedResponse = &loggingpb.LogExclusion{ + Name: name2, + Description: description, + Filter: filter, + Disabled: disabled, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/exclusions/%s", "[PROJECT]", "[EXCLUSION]") + var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &loggingpb.UpdateExclusionRequest{ + Name: formattedName, + Exclusion: exclusion, + UpdateMask: updateMask, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateExclusion(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2UpdateExclusionError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/exclusions/%s", "[PROJECT]", "[EXCLUSION]") + var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &loggingpb.UpdateExclusionRequest{ + Name: formattedName, + Exclusion: exclusion, + UpdateMask: updateMask, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateExclusion(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2DeleteExclusion(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/exclusions/%s", "[PROJECT]", "[EXCLUSION]") + var request = &loggingpb.DeleteExclusionRequest{ + Name: formattedName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteExclusion(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestConfigServiceV2DeleteExclusionError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/exclusions/%s", "[PROJECT]", "[EXCLUSION]") + var request = &loggingpb.DeleteExclusionRequest{ + Name: formattedName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteExclusion(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestMetricsServiceV2ListLogMetrics(t *testing.T) { + var nextPageToken string = "" + var metricsElement *loggingpb.LogMetric = &loggingpb.LogMetric{} + var metrics = []*loggingpb.LogMetric{metricsElement} + var expectedResponse = &loggingpb.ListLogMetricsResponse{ + NextPageToken: nextPageToken, + Metrics: metrics, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &loggingpb.ListLogMetricsRequest{ + Parent: formattedParent, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogMetrics(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Metrics[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2ListLogMetricsError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &loggingpb.ListLogMetricsRequest{ + Parent: formattedParent, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogMetrics(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2GetLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var valueExtractor string = "valueExtractor2047672534" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + ValueExtractor: valueExtractor, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = fmt.Sprintf("projects/%s/metrics/%s", "[PROJECT]", "[METRIC]") + var request = &loggingpb.GetLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2GetLogMetricError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedMetricName string = fmt.Sprintf("projects/%s/metrics/%s", "[PROJECT]", "[METRIC]") + var request = &loggingpb.GetLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetLogMetric(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2CreateLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var valueExtractor string = "valueExtractor2047672534" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + ValueExtractor: valueExtractor, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.CreateLogMetricRequest{ + Parent: formattedParent, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2CreateLogMetricError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.CreateLogMetricRequest{ + Parent: formattedParent, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateLogMetric(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2UpdateLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var valueExtractor string = "valueExtractor2047672534" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + ValueExtractor: valueExtractor, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = fmt.Sprintf("projects/%s/metrics/%s", "[PROJECT]", "[METRIC]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.UpdateLogMetricRequest{ + MetricName: formattedMetricName, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2UpdateLogMetricError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedMetricName string = fmt.Sprintf("projects/%s/metrics/%s", "[PROJECT]", "[METRIC]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.UpdateLogMetricRequest{ + MetricName: formattedMetricName, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateLogMetric(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2DeleteLogMetric(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = fmt.Sprintf("projects/%s/metrics/%s", "[PROJECT]", "[METRIC]") + var request = &loggingpb.DeleteLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricsServiceV2DeleteLogMetricError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedMetricName string = fmt.Sprintf("projects/%s/metrics/%s", "[PROJECT]", "[METRIC]") + var request = &loggingpb.DeleteLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLogMetric(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/path_funcs.go b/vendor/cloud.google.com/go/logging/apiv2/path_funcs.go new file mode 100644 index 0000000..37bbe9d --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/path_funcs.go @@ -0,0 +1,107 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +// ConfigProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ConfigProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// ConfigSinkPath returns the path for the sink resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/sinks/%s", project, sink) +// instead. +func ConfigSinkPath(project, sink string) string { + return "" + + "projects/" + + project + + "/sinks/" + + sink + + "" +} + +// ConfigExclusionPath returns the path for the exclusion resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/exclusions/%s", project, exclusion) +// instead. +func ConfigExclusionPath(project, exclusion string) string { + return "" + + "projects/" + + project + + "/exclusions/" + + exclusion + + "" +} + +// ProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// LogPath returns the path for the log resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/logs/%s", project, log) +// instead. +func LogPath(project, log string) string { + return "" + + "projects/" + + project + + "/logs/" + + log + + "" +} + +// MetricsProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func MetricsProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// MetricsMetricPath returns the path for the metric resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/metrics/%s", project, metric) +// instead. +func MetricsMetricPath(project, metric string) string { + return "" + + "projects/" + + project + + "/metrics/" + + metric + + "" +} diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go new file mode 100644 index 0000000..d5f425e --- /dev/null +++ b/vendor/cloud.google.com/go/logging/doc.go @@ -0,0 +1,117 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package logging contains a Stackdriver Logging client suitable for writing logs. +For reading logs, and working with sinks, metrics and monitored resources, +see package cloud.google.com/go/logging/logadmin. + +This client uses Logging API v2. +See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. + + +Note: This package is in beta. Some backwards-incompatible changes may occur. + + +Creating a Client + +Use a Client to interact with the Stackdriver Logging API. + + // Create a Client + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + + +Basic Usage + +For most use cases, you'll want to add log entries to a buffer to be periodically +flushed (automatically and asynchronously) to the Stackdriver Logging service. + + // Initialize a logger + lg := client.Logger("my-log") + + // Add entry to log buffer + lg.Log(logging.Entry{Payload: "something happened!"}) + + +Closing your Client + +You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service. + + // Close the client when finished. + err = client.Close() + if err != nil { + // TODO: Handle error. + } + + +Synchronous Logging + +For critical errors, you may want to send your log entries immediately. +LogSync is slow and will block until the log entry has been sent, so it is +not recommended for normal use. + + lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"}) + + +Payloads + +An entry payload can be a string, as in the examples above. It can also be any value +that can be marshaled to a JSON object, like a map[string]interface{} or a struct: + + type MyEntry struct { + Name string + Count int + } + lg.Log(logging.Entry{Payload: MyEntry{Name: "Bob", Count: 3}}) + +If you have a []byte of JSON, wrap it in json.RawMessage: + + j := []byte(`{"Name": "Bob", "Count": 3}`) + lg.Log(logging.Entry{Payload: json.RawMessage(j)}) + + +The Standard Logger Interface + +You may want use a standard log.Logger in your program. + + // stdlg implements log.Logger + stdlg := lg.StandardLogger(logging.Info) + stdlg.Println("some info") + + +Log Levels + +An Entry may have one of a number of severity levels associated with it. + + logging.Entry{ + Payload: "something terrible happened!", + Severity: logging.Critical, + } + + +Viewing Logs + +You can view Stackdriver logs for projects at +https://console.cloud.google.com/logs/viewer. Use the dropdown at the top left. When +running from a Google Cloud Platform VM, select "GCE VM Instance". Otherwise, select +"Google Project" and then the project ID. Logs for organizations, folders and billing +accounts can be viewed on the command line with the "gcloud logging read" command. + + +*/ +package logging // import "cloud.google.com/go/logging" diff --git a/vendor/cloud.google.com/go/logging/examples_test.go b/vendor/cloud.google.com/go/logging/examples_test.go new file mode 100644 index 0000000..dcee76d --- /dev/null +++ b/vendor/cloud.google.com/go/logging/examples_test.go @@ -0,0 +1,166 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "encoding/json" + "fmt" + "os" + + "cloud.google.com/go/logging" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Ping() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.Ping(ctx); err != nil { + // TODO: Handle error. + } +} + +// Although Logger.Flush and Client.Close both return errors, they don't tell you +// whether the errors were frequent or significant. For most programs, it doesn't +// matter if there were a few errors while writing logs, although if those few errors +// indicated a bug in your program, you might want to know about them. The best way +// to handle errors is by setting the OnError function. If it runs quickly, it will +// see every error generated during logging. +func ExampleNewClient_errorFunc() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Print all errors to stdout, and count them. Multiple calls to the OnError + // function never happen concurrently, so there is no need for locking nErrs, + // provided you don't read it until after the logging client is closed. + var nErrs int + client.OnError = func(e error) { + fmt.Fprintf(os.Stdout, "logging: %v", e) + nErrs++ + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } + fmt.Printf("saw %d errors\n", nErrs) +} + +func ExampleClient_Logger() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + _ = lg // TODO: use the Logger. +} + +func ExampleLogger_LogSync() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + err = lg.LogSync(ctx, logging.Entry{Payload: "red alert"}) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleLogger_Log() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: "something happened"}) +} + +// An Entry payload can be anything that marshals to a +// JSON object, like a struct. +func ExampleLogger_Log_struct() { + type MyEntry struct { + Name string + Count int + } + + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: MyEntry{Name: "Bob", Count: 3}}) +} + +// To log a JSON value, wrap it in json.RawMessage. +func ExampleLogger_Log_json() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + j := []byte(`{"Name": "Bob", "Count": 3}`) + lg.Log(logging.Entry{Payload: json.RawMessage(j)}) +} + +func ExampleLogger_Flush() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: "something happened"}) + lg.Flush() +} + +func ExampleLogger_StandardLogger() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + slg := lg.StandardLogger(logging.Info) + slg.Println("an informative message") +} + +func ExampleParseSeverity() { + sev := logging.ParseSeverity("ALERT") + fmt.Println(sev) + // Output: Alert +} diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go new file mode 100644 index 0000000..38cfbb5 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/common.go @@ -0,0 +1,39 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "strings" +) + +const ( + ProdAddr = "logging.googleapis.com:443" + Version = "0.2.0" +) + +func LogPath(parent, logID string) string { + logID = strings.Replace(logID, "/", "%2F", -1) + return fmt.Sprintf("%s/logs/%s", parent, logID) +} + +func LogIDFromPath(parent, path string) string { + start := len(parent) + len("/logs/") + if len(path) < start { + return "" + } + logID := path[start:] + return strings.Replace(logID, "%2F", "/", -1) +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/equal.go b/vendor/cloud.google.com/go/logging/internal/testing/equal.go new file mode 100644 index 0000000..c95d199 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/equal.go @@ -0,0 +1,42 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + + "github.com/golang/protobuf/proto" +) + +// Compare two payloads, assuming they are both proto.Messages +// or both strings. +func PayloadEqual(a, b interface{}) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + switch a := a.(type) { + case proto.Message: + return proto.Equal(a, b.(proto.Message)) + case string: + return a == b.(string) + default: + panic(fmt.Sprintf("payloadEqual: unexpected type %T", a)) + } +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/fake.go b/vendor/cloud.google.com/go/logging/internal/testing/fake.go new file mode 100644 index 0000000..c8fdc94 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/fake.go @@ -0,0 +1,395 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testing provides support for testing the logging client. +package testing + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + "sync" + "time" + + emptypb "github.com/golang/protobuf/ptypes/empty" + tspb "github.com/golang/protobuf/ptypes/timestamp" + + "cloud.google.com/go/internal/testutil" + context "golang.org/x/net/context" + lpb "google.golang.org/genproto/googleapis/api/label" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +type loggingHandler struct { + logpb.LoggingServiceV2Server + + mu sync.Mutex + logs map[string][]*logpb.LogEntry // indexed by log name +} + +type configHandler struct { + logpb.ConfigServiceV2Server + + mu sync.Mutex + sinks map[string]*logpb.LogSink // indexed by (full) sink name +} + +type metricHandler struct { + logpb.MetricsServiceV2Server + + mu sync.Mutex + metrics map[string]*logpb.LogMetric // indexed by (full) metric name +} + +// NewServer creates a new in-memory fake server implementing the logging service. +// It returns the address of the server. +func NewServer() (string, error) { + srv, err := testutil.NewServer() + if err != nil { + return "", err + } + logpb.RegisterLoggingServiceV2Server(srv.Gsrv, &loggingHandler{ + logs: make(map[string][]*logpb.LogEntry), + }) + logpb.RegisterConfigServiceV2Server(srv.Gsrv, &configHandler{ + sinks: make(map[string]*logpb.LogSink), + }) + logpb.RegisterMetricsServiceV2Server(srv.Gsrv, &metricHandler{ + metrics: make(map[string]*logpb.LogMetric), + }) + srv.Start() + return srv.Addr, nil +} + +// DeleteLog deletes a log and all its log entries. The log will reappear if it +// receives new entries. +func (h *loggingHandler) DeleteLog(_ context.Context, req *logpb.DeleteLogRequest) (*emptypb.Empty, error) { + // TODO(jba): return NotFound if log isn't there? + h.mu.Lock() + defer h.mu.Unlock() + delete(h.logs, req.LogName) + return &emptypb.Empty{}, nil +} + +// The only IDs that WriteLogEntries will accept. +// Important for testing Ping. +const ( + validProjectID = "PROJECT_ID" + validOrgID = "433637338589" +) + +// WriteLogEntries writes log entries to Stackdriver Logging. All log entries in +// Stackdriver Logging are written by this method. +func (h *loggingHandler) WriteLogEntries(_ context.Context, req *logpb.WriteLogEntriesRequest) (*logpb.WriteLogEntriesResponse, error) { + if !strings.HasPrefix(req.LogName, "projects/"+validProjectID+"/") && !strings.HasPrefix(req.LogName, "organizations/"+validOrgID+"/") { + return nil, fmt.Errorf("bad LogName: %q", req.LogName) + } + // TODO(jba): support insertId? + h.mu.Lock() + defer h.mu.Unlock() + for _, e := range req.Entries { + // Assign timestamp if missing. + if e.Timestamp == nil { + e.Timestamp = &tspb.Timestamp{Seconds: time.Now().Unix(), Nanos: 0} + } + // Fill from common fields in request. + if e.LogName == "" { + e.LogName = req.LogName + } + if e.Resource == nil { + // TODO(jba): use a global one if nil? + e.Resource = req.Resource + } + for k, v := range req.Labels { + if _, ok := e.Labels[k]; !ok { + e.Labels[k] = v + } + } + + // Store by log name. + h.logs[e.LogName] = append(h.logs[e.LogName], e) + } + return &logpb.WriteLogEntriesResponse{}, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries +// from Stackdriver Logging. +// +// This fake implementation ignores project IDs. It does not support full filtering, only +// expressions of the form "logName = NAME". +func (h *loggingHandler) ListLogEntries(_ context.Context, req *logpb.ListLogEntriesRequest) (*logpb.ListLogEntriesResponse, error) { + h.mu.Lock() + defer h.mu.Unlock() + entries, err := h.filterEntries(req.Filter) + if err != nil { + return nil, err + } + if err = sortEntries(entries, req.OrderBy); err != nil { + return nil, err + } + + from, to, nextPageToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(entries)) + if err != nil { + return nil, err + } + return &logpb.ListLogEntriesResponse{ + Entries: entries[from:to], + NextPageToken: nextPageToken, + }, nil +} + +func (h *loggingHandler) filterEntries(filter string) ([]*logpb.LogEntry, error) { + logName, err := parseFilter(filter) + if err != nil { + return nil, err + } + if logName != "" { + return h.logs[logName], nil + } + var entries []*logpb.LogEntry + for _, es := range h.logs { + entries = append(entries, es...) + } + return entries, nil +} + +var filterRegexp = regexp.MustCompile(`^logName\s*=\s*"?([-_/.%\w]+)"?$`) + +// returns the log name, or "" for the empty filter +func parseFilter(filter string) (string, error) { + if filter == "" { + return "", nil + } + subs := filterRegexp.FindStringSubmatch(filter) + if subs == nil { + return "", invalidArgument("bad filter") + } + return subs[1], nil // cannot panic by construction of regexp +} + +func sortEntries(entries []*logpb.LogEntry, orderBy string) error { + switch orderBy { + case "", "timestamp asc": + sort.Sort(byTimestamp(entries)) + return nil + + case "timestamp desc": + sort.Sort(sort.Reverse(byTimestamp(entries))) + return nil + + default: + return invalidArgument("bad order_by") + } +} + +type byTimestamp []*logpb.LogEntry + +func (s byTimestamp) Len() int { return len(s) } +func (s byTimestamp) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTimestamp) Less(i, j int) bool { + c := compareTimestamps(s[i].Timestamp, s[j].Timestamp) + switch { + case c < 0: + return true + case c > 0: + return false + default: + return s[i].InsertId < s[j].InsertId + } +} + +func compareTimestamps(ts1, ts2 *tspb.Timestamp) int64 { + if ts1.Seconds != ts2.Seconds { + return ts1.Seconds - ts2.Seconds + } + return int64(ts1.Nanos - ts2.Nanos) +} + +// Lists monitored resource descriptors that are used by Stackdriver Logging. +func (h *loggingHandler) ListMonitoredResourceDescriptors(context.Context, *logpb.ListMonitoredResourceDescriptorsRequest) (*logpb.ListMonitoredResourceDescriptorsResponse, error) { + return &logpb.ListMonitoredResourceDescriptorsResponse{ + ResourceDescriptors: []*mrpb.MonitoredResourceDescriptor{ + { + Type: "global", + DisplayName: "Global", + Description: "... a log is not associated with any specific resource.", + Labels: []*lpb.LabelDescriptor{ + {Key: "project_id", Description: "The identifier of the GCP project..."}, + }, + }, + }, + }, nil +} + +// Lists logs. +func (h *loggingHandler) ListLogs(_ context.Context, req *logpb.ListLogsRequest) (*logpb.ListLogsResponse, error) { + // Return fixed, fake response. + logNames := []string{"a", "b", "c"} + from, to, npt, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(logNames)) + if err != nil { + return nil, err + } + var lns []string + for _, ln := range logNames[from:to] { + lns = append(lns, req.Parent+"/logs/"+ln) + } + return &logpb.ListLogsResponse{ + LogNames: lns, + NextPageToken: npt, + }, nil +} + +// Gets a sink. +func (h *configHandler) GetSink(_ context.Context, req *logpb.GetSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + if s, ok := h.sinks[req.SinkName]; ok { + return s, nil + } + // TODO(jba): use error codes + return nil, fmt.Errorf("sink %q not found", req.SinkName) +} + +// Creates a sink. +func (h *configHandler) CreateSink(_ context.Context, req *logpb.CreateSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + fullName := fmt.Sprintf("%s/sinks/%s", req.Parent, req.Sink.Name) + if _, ok := h.sinks[fullName]; ok { + return nil, fmt.Errorf("sink with name %q already exists", fullName) + } + h.sinks[fullName] = req.Sink + return req.Sink, nil +} + +// Creates or updates a sink. +func (h *configHandler) UpdateSink(_ context.Context, req *logpb.UpdateSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + // Update of a non-existent sink will create it. + h.sinks[req.SinkName] = req.Sink + return req.Sink, nil +} + +// Deletes a sink. +func (h *configHandler) DeleteSink(_ context.Context, req *logpb.DeleteSinkRequest) (*emptypb.Empty, error) { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.sinks, req.SinkName) + return &emptypb.Empty{}, nil +} + +// Lists sinks. This fake implementation ignores the Parent field of +// ListSinksRequest. All sinks are listed, regardless of their project. +func (h *configHandler) ListSinks(_ context.Context, req *logpb.ListSinksRequest) (*logpb.ListSinksResponse, error) { + h.mu.Lock() + var sinks []*logpb.LogSink + for _, s := range h.sinks { + sinks = append(sinks, s) + } + h.mu.Unlock() // safe because no *logpb.LogSink is ever modified + // Since map iteration varies, sort the sinks. + sort.Sort(sinksByName(sinks)) + from, to, nextPageToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(sinks)) + if err != nil { + return nil, err + } + return &logpb.ListSinksResponse{ + Sinks: sinks[from:to], + NextPageToken: nextPageToken, + }, nil +} + +type sinksByName []*logpb.LogSink + +func (s sinksByName) Len() int { return len(s) } +func (s sinksByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sinksByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +// Gets a metric. +func (h *metricHandler) GetLogMetric(_ context.Context, req *logpb.GetLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + if s, ok := h.metrics[req.MetricName]; ok { + return s, nil + } + // TODO(jba): use error codes + return nil, fmt.Errorf("metric %q not found", req.MetricName) +} + +// Creates a metric. +func (h *metricHandler) CreateLogMetric(_ context.Context, req *logpb.CreateLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + fullName := fmt.Sprintf("%s/metrics/%s", req.Parent, req.Metric.Name) + if _, ok := h.metrics[fullName]; ok { + return nil, fmt.Errorf("metric with name %q already exists", fullName) + } + h.metrics[fullName] = req.Metric + return req.Metric, nil +} + +// Creates or updates a metric. +func (h *metricHandler) UpdateLogMetric(_ context.Context, req *logpb.UpdateLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + // Update of a non-existent metric will create it. + h.metrics[req.MetricName] = req.Metric + return req.Metric, nil +} + +// Deletes a metric. +func (h *metricHandler) DeleteLogMetric(_ context.Context, req *logpb.DeleteLogMetricRequest) (*emptypb.Empty, error) { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.metrics, req.MetricName) + return &emptypb.Empty{}, nil +} + +// Lists metrics. This fake implementation ignores the Parent field of +// ListMetricsRequest. All metrics are listed, regardless of their project. +func (h *metricHandler) ListLogMetrics(_ context.Context, req *logpb.ListLogMetricsRequest) (*logpb.ListLogMetricsResponse, error) { + h.mu.Lock() + var metrics []*logpb.LogMetric + for _, s := range h.metrics { + metrics = append(metrics, s) + } + h.mu.Unlock() // safe because no *logpb.LogMetric is ever modified + // Since map iteration varies, sort the metrics. + sort.Sort(metricsByName(metrics)) + from, to, nextPageToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(metrics)) + if err != nil { + return nil, err + } + return &logpb.ListLogMetricsResponse{ + Metrics: metrics[from:to], + NextPageToken: nextPageToken, + }, nil +} + +type metricsByName []*logpb.LogMetric + +func (s metricsByName) Len() int { return len(s) } +func (s metricsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s metricsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +func invalidArgument(msg string) error { + // TODO(jba): status codes + return errors.New(msg) +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go b/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go new file mode 100644 index 0000000..cd6e5c2 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains only basic checks. The fake is effectively tested by the +// logging client unit tests. + +package testing + +import ( + "testing" + "time" + + "github.com/golang/protobuf/proto" + tspb "github.com/golang/protobuf/ptypes/timestamp" + logpb "google.golang.org/genproto/googleapis/logging/v2" + grpc "google.golang.org/grpc" +) + +func TestNewServer(t *testing.T) { + // Confirm that we can create and use a working gRPC server. + addr, err := NewServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + // Avoid "connection is closing; please retry" message from gRPC. + time.Sleep(300 * time.Millisecond) + conn.Close() +} + +func TestParseFilter(t *testing.T) { + for _, test := range []struct { + filter string + want string + wantErr bool + }{ + {"", "", false}, + {"logName = syslog", "syslog", false}, + {"logname = syslog", "", true}, + {"logName = 'syslog'", "", true}, + {"logName == syslog", "", true}, + } { + got, err := parseFilter(test.filter) + if err != nil { + if !test.wantErr { + t.Errorf("%q: got %v, want no error", test.filter, err) + } + continue + } + if test.wantErr { + t.Errorf("%q: got no error, want one", test.filter) + continue + } + if got != test.want { + t.Errorf("%q: got %q, want %q", test.filter, got, test.want) + } + } +} + +func TestSortEntries(t *testing.T) { + entries := []*logpb.LogEntry{ + /* 0 */ {Timestamp: &tspb.Timestamp{Seconds: 30}}, + /* 1 */ {Timestamp: &tspb.Timestamp{Seconds: 10}}, + /* 2 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "b"}, + /* 3 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "a"}, + /* 4 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "c"}, + } + for _, test := range []struct { + orderBy string + want []int // slice of index into entries; nil == error + }{ + {"", []int{1, 3, 2, 4, 0}}, + {"timestamp asc", []int{1, 3, 2, 4, 0}}, + {"timestamp desc", []int{0, 4, 2, 3, 1}}, + {"something else", nil}, + } { + got := make([]*logpb.LogEntry, len(entries)) + copy(got, entries) + err := sortEntries(got, test.orderBy) + if err != nil { + if test.want != nil { + t.Errorf("%q: got %v, want nil error", test.orderBy, err) + } + continue + } + want := make([]*logpb.LogEntry, len(entries)) + for i, j := range test.want { + want[i] = entries[j] + } + if !logEntriesEqual(got, want) { + t.Errorf("%q: got %v, want %v", test.orderBy, got, want) + } + } +} + +func logEntriesEqual(a, b []*logpb.LogEntry) bool { + if len(a) != len(b) { + return false + } + for i, aa := range a { + if !proto.Equal(aa, b[i]) { + return false + } + } + return true +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go new file mode 100644 index 0000000..39e6f57 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go @@ -0,0 +1,66 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + "time" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Entries() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Entries(ctx, logadmin.Filter(`logName = "projects/my-project/logs/my-log"`)) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleFilter_timestamp() { + // This example demonstrates how to list the last 24 hours of log entries. + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + oneDayAgo := time.Now().Add(-24 * time.Hour) + t := oneDayAgo.Format(time.RFC3339) // Logging API wants timestamps in RFC 3339 format. + it := client.Entries(ctx, logadmin.Filter(fmt.Sprintf(`timestamp > "%s"`, t))) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleEntryIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Entries(ctx) + for { + entry, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(entry) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go new file mode 100644 index 0000000..2e876e9 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Metrics() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Metrics(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleMetricIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Metrics(ctx) + for { + metric, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(metric) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go new file mode 100644 index 0000000..036eeeb --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "log" + "net/http" + + "cloud.google.com/go/logging" + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +var ( + client *logadmin.Client + projectID = flag.String("project-id", "", "ID of the project to use") +) + +func ExampleClient_Entries_pagination() { + // This example demonstrates how to iterate through items a page at a time + // even if each successive page is fetched by a different process. It is a + // complete web server that displays pages of log entries. To run it as a + // standalone program, rename both the package and this function to "main". + ctx := context.Background() + flag.Parse() + if *projectID == "" { + log.Fatal("-project-id missing") + } + var err error + client, err = logadmin.NewClient(ctx, *projectID) + if err != nil { + log.Fatalf("creating logging client: %v", err) + } + + http.HandleFunc("/entries", handleEntries) + log.Print("listening on 8080") + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +var pageTemplate = template.Must(template.New("").Parse(` +
+ {{range .Entries}} + + {{end}} +
{{.}}
+{{if .Next}} + Next Page +{{end}} +`)) + +func handleEntries(w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + filter := fmt.Sprintf(`logName = "projects/%s/logs/testlog"`, *projectID) + it := client.Entries(ctx, logadmin.Filter(filter)) + var entries []*logging.Entry + nextTok, err := iterator.NewPager(it, 5, r.URL.Query().Get("pageToken")).NextPage(&entries) + if err != nil { + http.Error(w, fmt.Sprintf("problem getting the next page: %v", err), http.StatusInternalServerError) + return + } + data := struct { + Entries []*logging.Entry + Next string + }{ + entries, + nextTok, + } + var buf bytes.Buffer + if err := pageTemplate.Execute(&buf, data); err != nil { + http.Error(w, fmt.Sprintf("problem executing page template: %v", err), http.StatusInternalServerError) + } + if _, err := buf.WriteTo(w); err != nil { + log.Printf("writing response: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go new file mode 100644 index 0000000..fe67e23 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_ResourceDescriptors() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.ResourceDescriptors(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleResourceDescriptorIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.ResourceDescriptors(ctx) + for { + rdesc, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(rdesc) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go new file mode 100644 index 0000000..918fd9f --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Sinks() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Sinks(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleSinkIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Sinks(ctx) + for { + sink, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/examples_test.go b/vendor/cloud.google.com/go/logging/logadmin/examples_test.go new file mode 100644 index 0000000..0926dd5 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/examples_test.go @@ -0,0 +1,161 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteLog() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.DeleteLog(ctx, "my-log") + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.CreateMetric(ctx, &logadmin.Metric{ + ID: "severe-errors", + Description: "entries at ERROR or higher severities", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.DeleteMetric(ctx, "severe-errors"); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Metric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + m, err := client.Metric(ctx, "severe-errors") + if err != nil { + // TODO: Handle error. + } + fmt.Println(m) +} + +func ExampleClient_UpdateMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.UpdateMetric(ctx, &logadmin.Metric{ + ID: "severe-errors", + Description: "entries at high severities", + Filter: "severity > ERROR", + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + sink, err := client.CreateSink(ctx, &logadmin.Sink{ + ID: "severe-errors-to-gcs", + Destination: "storage.googleapis.com/my-bucket", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) +} + +func ExampleClient_DeleteSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.DeleteSink(ctx, "severe-errors-to-gcs"); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Sink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + s, err := client.Sink(ctx, "severe-errors-to-gcs") + if err != nil { + // TODO: Handle error. + } + fmt.Println(s) +} + +func ExampleClient_UpdateSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + sink, err := client.UpdateSink(ctx, &logadmin.Sink{ + ID: "severe-errors-to-gcs", + Destination: "storage.googleapis.com/my-other-bucket", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/logadmin.go b/vendor/cloud.google.com/go/logging/logadmin/logadmin.go new file mode 100644 index 0000000..bb97b70 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/logadmin.go @@ -0,0 +1,406 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// These features are missing now, but will likely be added: +// - There is no way to specify CallOptions. + +// Package logadmin contains a Stackdriver Logging client that can be used +// for reading logs and working with sinks, metrics and monitored resources. +// For a client that can write logs, see package cloud.google.com/go/logging. +// +// The client uses Logging API v2. +// See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. +// +// Note: This package is in beta. Some backwards-incompatible changes may occur. +package logadmin // import "cloud.google.com/go/logging/logadmin" + +import ( + "fmt" + "math" + "net/http" + "net/url" + "strings" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/logging" + vkit "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/internal" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc/codes" + + // Import the following so EntryIterator can unmarshal log protos. + _ "google.golang.org/genproto/googleapis/appengine/logging/v1" + _ "google.golang.org/genproto/googleapis/cloud/audit" +) + +// Client is a Logging client. A Client is associated with a single Cloud project. +type Client struct { + lClient *vkit.Client // logging client + sClient *vkit.ConfigClient // sink client + mClient *vkit.MetricsClient // metric client + parent string + closed bool +} + +// NewClient returns a new logging client associated with the provided project ID. +// +// By default NewClient uses AdminScope. To use a different scope, call +// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). +func NewClient(ctx context.Context, parent string, opts ...option.ClientOption) (*Client, error) { + if !strings.ContainsRune(parent, '/') { + parent = "projects/" + parent + } + opts = append([]option.ClientOption{ + option.WithEndpoint(internal.ProdAddr), + option.WithScopes(logging.AdminScope), + }, opts...) + lc, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + // TODO(jba): pass along any client options that should be provided to all clients. + sc, err := vkit.NewConfigClient(ctx, option.WithGRPCConn(lc.Connection())) + if err != nil { + return nil, err + } + mc, err := vkit.NewMetricsClient(ctx, option.WithGRPCConn(lc.Connection())) + if err != nil { + return nil, err + } + // Retry some non-idempotent methods on INTERNAL, because it happens sometimes + // and in all observed cases the operation did not complete. + retryerOnInternal := func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Internal, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + } + mc.CallOptions.CreateLogMetric = []gax.CallOption{gax.WithRetry(retryerOnInternal)} + mc.CallOptions.UpdateLogMetric = []gax.CallOption{gax.WithRetry(retryerOnInternal)} + + lc.SetGoogleClientInfo("gccl", version.Repo) + sc.SetGoogleClientInfo("gccl", version.Repo) + mc.SetGoogleClientInfo("gccl", version.Repo) + client := &Client{ + lClient: lc, + sClient: sc, + mClient: mc, + parent: parent, + } + return client, nil +} + +// Close closes the client. +func (c *Client) Close() error { + if c.closed { + return nil + } + // Return only the first error. Since all clients share an underlying connection, + // Closes after the first always report a "connection is closing" error. + err := c.lClient.Close() + _ = c.sClient.Close() + _ = c.mClient.Close() + c.closed = true + return err +} + +// DeleteLog deletes a log and all its log entries. The log will reappear if it receives new entries. +// logID identifies the log within the project. An example log ID is "syslog". Requires AdminScope. +func (c *Client) DeleteLog(ctx context.Context, logID string) error { + return c.lClient.DeleteLog(ctx, &logpb.DeleteLogRequest{ + LogName: internal.LogPath(c.parent, logID), + }) +} + +func toHTTPRequest(p *logtypepb.HttpRequest) (*logging.HTTPRequest, error) { + if p == nil { + return nil, nil + } + u, err := url.Parse(p.RequestUrl) + if err != nil { + return nil, err + } + var dur time.Duration + if p.Latency != nil { + dur, err = ptypes.Duration(p.Latency) + if err != nil { + return nil, err + } + } + hr := &http.Request{ + Method: p.RequestMethod, + URL: u, + Header: map[string][]string{}, + } + if p.UserAgent != "" { + hr.Header.Set("User-Agent", p.UserAgent) + } + if p.Referer != "" { + hr.Header.Set("Referer", p.Referer) + } + return &logging.HTTPRequest{ + Request: hr, + RequestSize: p.RequestSize, + Status: int(p.Status), + ResponseSize: p.ResponseSize, + Latency: dur, + RemoteIP: p.RemoteIp, + CacheHit: p.CacheHit, + CacheValidatedWithOriginServer: p.CacheValidatedWithOriginServer, + }, nil +} + +// An EntriesOption is an option for listing log entries. +type EntriesOption interface { + set(*logpb.ListLogEntriesRequest) +} + +// ProjectIDs sets the project IDs or project numbers from which to retrieve +// log entries. Examples of a project ID: "my-project-1A", "1234567890". +func ProjectIDs(pids []string) EntriesOption { return projectIDs(pids) } + +type projectIDs []string + +func (p projectIDs) set(r *logpb.ListLogEntriesRequest) { + r.ResourceNames = make([]string, len(p)) + for i, v := range p { + r.ResourceNames[i] = fmt.Sprintf("projects/%s", v) + } +} + +// ResourceNames sets the resource names from which to retrieve +// log entries. Examples: "projects/my-project-1A", "organizations/my-org". +func ResourceNames(rns []string) EntriesOption { return resourceNames(rns) } + +type resourceNames []string + +func (rn resourceNames) set(r *logpb.ListLogEntriesRequest) { + r.ResourceNames = append([]string(nil), rn...) +} + +// Filter sets an advanced logs filter for listing log entries (see +// https://cloud.google.com/logging/docs/view/advanced_filters). The filter is +// compared against all log entries in the projects specified by ProjectIDs. +// Only entries that match the filter are retrieved. An empty filter (the +// default) matches all log entries. +// +// In the filter string, log names must be written in their full form, as +// "projects/PROJECT-ID/logs/LOG-ID". Forward slashes in LOG-ID must be +// replaced by %2F before calling Filter. +// +// Timestamps in the filter string must be written in RFC 3339 format. See the +// timestamp example. +func Filter(f string) EntriesOption { return filter(f) } + +type filter string + +func (f filter) set(r *logpb.ListLogEntriesRequest) { r.Filter = string(f) } + +// NewestFirst causes log entries to be listed from most recent (newest) to +// least recent (oldest). By default, they are listed from oldest to newest. +func NewestFirst() EntriesOption { return newestFirst{} } + +type newestFirst struct{} + +func (newestFirst) set(r *logpb.ListLogEntriesRequest) { r.OrderBy = "timestamp desc" } + +// Entries returns an EntryIterator for iterating over log entries. By default, +// the log entries will be restricted to those from the project passed to +// NewClient. This may be overridden by passing a ProjectIDs option. Requires ReadScope or AdminScope. +func (c *Client) Entries(ctx context.Context, opts ...EntriesOption) *EntryIterator { + it := &EntryIterator{ + it: c.lClient.ListLogEntries(ctx, listLogEntriesRequest(c.parent, opts)), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +func listLogEntriesRequest(parent string, opts []EntriesOption) *logpb.ListLogEntriesRequest { + req := &logpb.ListLogEntriesRequest{ + ResourceNames: []string{parent}, + } + for _, opt := range opts { + opt.set(req) + } + return req +} + +// An EntryIterator iterates over log entries. +type EntryIterator struct { + it *vkit.LogEntryIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*logging.Entry +} + +// PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details. +func (it *EntryIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done +// (https://godoc.org/google.golang.org/api/iterator) if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *EntryIterator) Next() (*logging.Entry, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *EntryIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + e, err := fromLogEntry(item) + if err != nil { + return err + } + it.items = append(it.items, e) + return nil + }) +} + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +var slashUnescaper = strings.NewReplacer("%2F", "/", "%2f", "/") + +func fromLogEntry(le *logpb.LogEntry) (*logging.Entry, error) { + time, err := ptypes.Timestamp(le.Timestamp) + if err != nil { + return nil, err + } + var payload interface{} + switch x := le.Payload.(type) { + case *logpb.LogEntry_TextPayload: + payload = x.TextPayload + + case *logpb.LogEntry_ProtoPayload: + var d ptypes.DynamicAny + if err := ptypes.UnmarshalAny(x.ProtoPayload, &d); err != nil { + return nil, fmt.Errorf("logging: unmarshalling proto payload: %v", err) + } + payload = d.Message + + case *logpb.LogEntry_JsonPayload: + // Leave this as a Struct. + // TODO(jba): convert to map[string]interface{}? + payload = x.JsonPayload + + default: + return nil, fmt.Errorf("logging: unknown payload type: %T", le.Payload) + } + hr, err := toHTTPRequest(le.HttpRequest) + if err != nil { + return nil, err + } + return &logging.Entry{ + Timestamp: time, + Severity: logging.Severity(le.Severity), + Payload: payload, + Labels: le.Labels, + InsertID: le.InsertId, + HTTPRequest: hr, + Operation: le.Operation, + LogName: slashUnescaper.Replace(le.LogName), + Resource: le.Resource, + Trace: le.Trace, + }, nil +} + +// Logs lists the logs owned by the parent resource of the client. +func (c *Client) Logs(ctx context.Context) *LogIterator { + it := &LogIterator{ + parentResource: c.parent, + it: c.lClient.ListLogs(ctx, &logpb.ListLogsRequest{Parent: c.parent}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A LogIterator iterates over logs. +type LogIterator struct { + parentResource string + it *vkit.StringIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []string +} + +// PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details. +func (it *LogIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done +// (https://godoc.org/google.golang.org/api/iterator) if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogIterator) Next() (string, error) { + if err := it.nextFunc(); err != nil { + return "", err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + logPath, err := it.it.Next() + if err != nil { + return err + } + logID := internal.LogIDFromPath(it.parentResource, logPath) + it.items = append(it.items, logID) + return nil + }) +} + +// Common fetch code for iterators that are backed by vkit iterators. +func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func() error) (string, error) { + pi.MaxSize = pageSize + pi.Token = pageToken + // Get one item, which will fill the buffer. + if err := next(); err != nil { + return "", err + } + // Collect the rest of the buffer. + for pi.Remaining() > 0 { + if err := next(); err != nil { + return "", err + } + } + return pi.Token, nil +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go b/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go new file mode 100644 index 0000000..7cdf449 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go @@ -0,0 +1,267 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): test that OnError is getting called appropriately. + +package logadmin + +import ( + "flag" + "log" + "net/http" + "net/url" + "os" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/logging" + ltesting "cloud.google.com/go/logging/internal/testing" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/net/context" + "google.golang.org/api/option" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + audit "google.golang.org/genproto/googleapis/cloud/audit" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" +) + +var ( + client *Client + testProjectID string +) + +var ( + // If true, this test is using the production service, not a fake. + integrationTest bool + + newClient func(ctx context.Context, projectID string) *Client +) + +func TestMain(m *testing.M) { + flag.Parse() // needed for testing.Short() + ctx := context.Background() + testProjectID = testutil.ProjID() + if testProjectID == "" || testing.Short() { + integrationTest = false + if testProjectID != "" { + log.Print("Integration tests skipped in short mode (using fake instead)") + } + testProjectID = "PROJECT_ID" + addr, err := ltesting.NewServer() + if err != nil { + log.Fatalf("creating fake server: %v", err) + } + newClient = func(ctx context.Context, projectID string) *Client { + conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + log.Fatalf("dialing %q: %v", addr, err) + } + c, err := NewClient(ctx, projectID, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + return c + } + } else { + integrationTest = true + ts := testutil.TokenSource(ctx, logging.AdminScope) + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + log.Printf("running integration tests with project %s", testProjectID) + newClient = func(ctx context.Context, projectID string) *Client { + c, err := NewClient(ctx, projectID, option.WithTokenSource(ts), + option.WithGRPCDialOption(grpc.WithBlock())) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + return c + } + } + client = newClient(ctx, testProjectID) + initMetrics(ctx) + cleanup := initSinks(ctx) + exit := m.Run() + cleanup() + client.Close() + os.Exit(exit) +} + +// EntryIterator and DeleteLog are tested in the logging package. + +func TestClientClose(t *testing.T) { + c := newClient(context.Background(), testProjectID) + if err := c.Close(); err != nil { + t.Errorf("want got %v, want nil", err) + } +} + +func TestFromLogEntry(t *testing.T) { + now := time.Now() + res := &mrpb.MonitoredResource{Type: "global"} + ts, err := ptypes.TimestampProto(now) + if err != nil { + t.Fatal(err) + } + logEntry := logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Payload: &logpb.LogEntry_TextPayload{TextPayload: "hello"}, + Timestamp: ts, + Severity: logtypepb.LogSeverity_INFO, + InsertId: "123", + HttpRequest: &logtypepb.HttpRequest{ + RequestMethod: "GET", + RequestUrl: "http:://example.com/path?q=1", + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: &durpb.Duration{Seconds: 100}, + UserAgent: "user-agent", + RemoteIp: "127.0.0.1", + Referer: "referer", + CacheHit: true, + CacheValidatedWithOriginServer: true, + }, + Labels: map[string]string{ + "a": "1", + "b": "two", + "c": "true", + }, + } + u, err := url.Parse("http:://example.com/path?q=1") + if err != nil { + t.Fatal(err) + } + want := &logging.Entry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: now.In(time.UTC), + Severity: logging.Info, + Payload: "hello", + Labels: map[string]string{ + "a": "1", + "b": "two", + "c": "true", + }, + InsertID: "123", + HTTPRequest: &logging.HTTPRequest{ + Request: &http.Request{ + Method: "GET", + URL: u, + Header: map[string][]string{ + "User-Agent": []string{"user-agent"}, + "Referer": []string{"referer"}, + }, + }, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: 100 * time.Second, + RemoteIP: "127.0.0.1", + CacheHit: true, + CacheValidatedWithOriginServer: true, + }, + } + got, err := fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want, cmpopts.IgnoreUnexported(http.Request{})); diff != "" { + t.Errorf("FullEntry:\n%s", diff) + } + + // Proto payload. + alog := &audit.AuditLog{ + ServiceName: "svc", + MethodName: "method", + ResourceName: "shelves/S/books/B", + } + any, err := ptypes.MarshalAny(alog) + if err != nil { + t.Fatal(err) + } + logEntry = logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: ts, + Payload: &logpb.LogEntry_ProtoPayload{ProtoPayload: any}, + } + got, err = fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + if !ltesting.PayloadEqual(got.Payload, alog) { + t.Errorf("got %+v, want %+v", got.Payload, alog) + } + + // JSON payload. + jstruct := &structpb.Struct{Fields: map[string]*structpb.Value{ + "f": &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: 3.1}}, + }} + logEntry = logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: ts, + Payload: &logpb.LogEntry_JsonPayload{JsonPayload: jstruct}, + } + got, err = fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + if !ltesting.PayloadEqual(got.Payload, jstruct) { + t.Errorf("got %+v, want %+v", got.Payload, jstruct) + } +} + +func TestListLogEntriesRequest(t *testing.T) { + for _, test := range []struct { + opts []EntriesOption + resourceNames []string + filter string + orderBy string + }{ + // Default is client's project ID, empty filter and orderBy. + {nil, []string{"projects/PROJECT_ID"}, "", ""}, + {[]EntriesOption{NewestFirst(), Filter("f")}, + []string{"projects/PROJECT_ID"}, "f", "timestamp desc"}, + {[]EntriesOption{ProjectIDs([]string{"foo"})}, + []string{"projects/foo"}, "", ""}, + {[]EntriesOption{ResourceNames([]string{"folders/F", "organizations/O"})}, + []string{"folders/F", "organizations/O"}, "", ""}, + {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, + []string{"projects/foo"}, "f", "timestamp desc"}, + {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, + []string{"projects/foo"}, "f", "timestamp desc"}, + // If there are repeats, last one wins. + {[]EntriesOption{NewestFirst(), Filter("no"), ProjectIDs([]string{"foo"}), Filter("f")}, + []string{"projects/foo"}, "f", "timestamp desc"}, + } { + got := listLogEntriesRequest("projects/PROJECT_ID", test.opts) + want := &logpb.ListLogEntriesRequest{ + ResourceNames: test.resourceNames, + Filter: test.filter, + OrderBy: test.orderBy, + } + if !proto.Equal(got, want) { + t.Errorf("%v:\ngot %v\nwant %v", test.opts, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/metrics.go b/vendor/cloud.google.com/go/logging/logadmin/metrics.go new file mode 100644 index 0000000..1824ed3 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/metrics.go @@ -0,0 +1,154 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "fmt" + + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// Metric describes a logs-based metric. The value of the metric is the +// number of log entries that match a logs filter. +// +// Metrics are a feature of Stackdriver Monitoring. +// See https://cloud.google.com/monitoring/api/v3/metrics for more about them. +type Metric struct { + // ID is a client-assigned metric identifier. Example: + // "severe_errors". Metric identifiers are limited to 1000 + // characters and can include only the following characters: A-Z, + // a-z, 0-9, and the special characters _-.,+!*',()%/\. The + // forward-slash character (/) denotes a hierarchy of name pieces, + // and it cannot be the first character of the name. + ID string + + // Description describes this metric. It is used in documentation. + Description string + + // Filter is an advanced logs filter (see + // https://cloud.google.com/logging/docs/view/advanced_filters). + // Example: "logName:syslog AND severity>=ERROR". + Filter string +} + +// CreateMetric creates a logs-based metric. +func (c *Client) CreateMetric(ctx context.Context, m *Metric) error { + _, err := c.mClient.CreateLogMetric(ctx, &logpb.CreateLogMetricRequest{ + Parent: c.parent, + Metric: toLogMetric(m), + }) + return err +} + +// DeleteMetric deletes a log-based metric. +// The provided metric ID is the metric identifier. For example, "severe_errors". +func (c *Client) DeleteMetric(ctx context.Context, metricID string) error { + return c.mClient.DeleteLogMetric(ctx, &logpb.DeleteLogMetricRequest{ + MetricName: c.metricPath(metricID), + }) +} + +// Metric gets a logs-based metric. +// The provided metric ID is the metric identifier. For example, "severe_errors". +// Requires ReadScope or AdminScope. +func (c *Client) Metric(ctx context.Context, metricID string) (*Metric, error) { + lm, err := c.mClient.GetLogMetric(ctx, &logpb.GetLogMetricRequest{ + MetricName: c.metricPath(metricID), + }) + if err != nil { + return nil, err + } + return fromLogMetric(lm), nil +} + +// UpdateMetric creates a logs-based metric if it does not exist, or updates an +// existing one. +func (c *Client) UpdateMetric(ctx context.Context, m *Metric) error { + _, err := c.mClient.UpdateLogMetric(ctx, &logpb.UpdateLogMetricRequest{ + MetricName: c.metricPath(m.ID), + Metric: toLogMetric(m), + }) + return err +} + +func (c *Client) metricPath(metricID string) string { + return fmt.Sprintf("%s/metrics/%s", c.parent, metricID) +} + +// Metrics returns a MetricIterator for iterating over all Metrics in the Client's project. +// Requires ReadScope or AdminScope. +func (c *Client) Metrics(ctx context.Context) *MetricIterator { + it := &MetricIterator{ + it: c.mClient.ListLogMetrics(ctx, &logpb.ListLogMetricsRequest{Parent: c.parent}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A MetricIterator iterates over Metrics. +type MetricIterator struct { + it *vkit.LogMetricIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Metric +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *MetricIterator) Next() (*Metric, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, fromLogMetric(item)) + return nil + }) +} + +func toLogMetric(m *Metric) *logpb.LogMetric { + return &logpb.LogMetric{ + Name: m.ID, + Description: m.Description, + Filter: m.Filter, + } +} + +func fromLogMetric(lm *logpb.LogMetric) *Metric { + return &Metric{ + ID: lm.Name, + Description: lm.Description, + Filter: lm.Filter, + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go b/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go new file mode 100644 index 0000000..3a70358 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go @@ -0,0 +1,154 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "log" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +var metricIDs = testutil.NewUIDSpace("GO-CLIENT-TEST-METRIC") + +// Initializes the tests before they run. +func initMetrics(ctx context.Context) { + // Clean up from aborted tests. + it := client.Metrics(ctx) +loop: + for { + m, err := it.Next() + switch err { + case nil: + if metricIDs.Older(m.ID, 24*time.Hour) { + client.DeleteMetric(ctx, m.ID) + } + case iterator.Done: + break loop + default: + log.Printf("cleanupMetrics: %v", err) + return + } + } +} + +func TestCreateDeleteMetric(t *testing.T) { + ctx := context.Background() + metric := &Metric{ + ID: metricIDs.New(), + Description: "DESC", + Filter: "FILTER", + } + if err := client.CreateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + defer client.DeleteMetric(ctx, metric.ID) + + got, err := client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + if err := client.DeleteMetric(ctx, metric.ID); err != nil { + t.Fatal(err) + } + + if _, err := client.Metric(ctx, metric.ID); err == nil { + t.Fatal("got no error, expected one") + } +} + +func TestUpdateMetric(t *testing.T) { + ctx := context.Background() + metric := &Metric{ + ID: metricIDs.New(), + Description: "DESC", + Filter: "FILTER", + } + + // Updating a non-existent metric creates a new one. + if err := client.UpdateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + defer client.DeleteMetric(ctx, metric.ID) + got, err := client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Updating an existing metric changes it. + metric.Description = "CHANGED" + if err := client.UpdateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + got, err = client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestListMetrics(t *testing.T) { + ctx := context.Background() + + var metrics []*Metric + want := map[string]*Metric{} + for i := 0; i < 10; i++ { + m := &Metric{ + ID: metricIDs.New(), + Description: "DESC", + Filter: "FILTER", + } + metrics = append(metrics, m) + want[m.ID] = m + } + for _, m := range metrics { + if err := client.CreateMetric(ctx, m); err != nil { + t.Fatalf("Create(%q): %v", m.ID, err) + } + defer client.DeleteMetric(ctx, m.ID) + } + + got := map[string]*Metric{} + it := client.Metrics(ctx) + for { + m, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + // If tests run simultaneously, we may have more metrics than we + // created. So only check for our own. + if _, ok := want[m.ID]; ok { + got[m.ID] = m + } + } + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/resources.go b/vendor/cloud.google.com/go/logging/logadmin/resources.go new file mode 100644 index 0000000..79e8fdb --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/resources.go @@ -0,0 +1,74 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// ResourceDescriptors returns a ResourceDescriptorIterator +// for iterating over MonitoredResourceDescriptors. Requires ReadScope or AdminScope. +// See https://cloud.google.com/logging/docs/api/v2/#monitored-resources for an explanation of +// monitored resources. +// See https://cloud.google.com/logging/docs/api/v2/resource-list for a list of monitored resources. +func (c *Client) ResourceDescriptors(ctx context.Context) *ResourceDescriptorIterator { + it := &ResourceDescriptorIterator{ + it: c.lClient.ListMonitoredResourceDescriptors(ctx, + &logpb.ListMonitoredResourceDescriptorsRequest{}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// ResourceDescriptorIterator is an iterator over MonitoredResourceDescriptors. +type ResourceDescriptorIterator struct { + it *vkit.MonitoredResourceDescriptorIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*mrpb.MonitoredResourceDescriptor +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ResourceDescriptorIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *ResourceDescriptorIterator) Next() (*mrpb.MonitoredResourceDescriptor, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ResourceDescriptorIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, item) + return nil + }) +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/resources_test.go b/vendor/cloud.google.com/go/logging/logadmin/resources_test.go new file mode 100644 index 0000000..067d3d7 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/resources_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func TestMonitoredResourceDescriptors(t *testing.T) { + // We can't create MonitoredResourceDescriptors, and there is no guarantee + // about what the service will return. So we just check that the result is + // non-empty. + it := client.ResourceDescriptors(context.Background()) + n := 0 +loop: + for { + _, err := it.Next() + switch err { + case nil: + n++ + case iterator.Done: + break loop + default: + t.Fatal(err) + } + } + if n == 0 { + t.Fatal("Next: got no MetricResourceDescriptors, expected at least one") + } + // TODO(jba) test pagination. +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/sinks.go b/vendor/cloud.google.com/go/logging/logadmin/sinks.go new file mode 100644 index 0000000..5659dbd --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/sinks.go @@ -0,0 +1,168 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "fmt" + + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// Sink describes a sink used to export log entries outside Stackdriver +// Logging. Incoming log entries matching a filter are exported to a +// destination (a Cloud Storage bucket, BigQuery dataset or Cloud Pub/Sub +// topic). +// +// For more information, see https://cloud.google.com/logging/docs/export/using_exported_logs. +// (The Sinks in this package are what the documentation refers to as "project sinks".) +type Sink struct { + // ID is a client-assigned sink identifier. Example: + // "my-severe-errors-to-pubsub". + // Sink identifiers are limited to 1000 characters + // and can include only the following characters: A-Z, a-z, + // 0-9, and the special characters "_-.". + ID string + + // Destination is the export destination. See + // https://cloud.google.com/logging/docs/api/tasks/exporting-logs. + // Examples: "storage.googleapis.com/a-bucket", + // "bigquery.googleapis.com/projects/a-project-id/datasets/a-dataset". + Destination string + + // Filter optionally specifies an advanced logs filter (see + // https://cloud.google.com/logging/docs/view/advanced_filters) that + // defines the log entries to be exported. Example: "logName:syslog AND + // severity>=ERROR". If omitted, all entries are returned. + Filter string +} + +// CreateSink creates a Sink. It returns an error if the Sink already exists. +// Requires AdminScope. +func (c *Client) CreateSink(ctx context.Context, sink *Sink) (*Sink, error) { + ls, err := c.sClient.CreateSink(ctx, &logpb.CreateSinkRequest{ + Parent: c.parent, + Sink: toLogSink(sink), + }) + if err != nil { + fmt.Printf("Sink: %+v\n", toLogSink(sink)) + return nil, err + } + return fromLogSink(ls), nil +} + +// DeleteSink deletes a sink. The provided sinkID is the sink's identifier, such as +// "my-severe-errors-to-pubsub". +// Requires AdminScope. +func (c *Client) DeleteSink(ctx context.Context, sinkID string) error { + return c.sClient.DeleteSink(ctx, &logpb.DeleteSinkRequest{ + SinkName: c.sinkPath(sinkID), + }) +} + +// Sink gets a sink. The provided sinkID is the sink's identifier, such as +// "my-severe-errors-to-pubsub". +// Requires ReadScope or AdminScope. +func (c *Client) Sink(ctx context.Context, sinkID string) (*Sink, error) { + ls, err := c.sClient.GetSink(ctx, &logpb.GetSinkRequest{ + SinkName: c.sinkPath(sinkID), + }) + if err != nil { + return nil, err + } + return fromLogSink(ls), nil +} + +// UpdateSink updates an existing Sink. Requires AdminScope. +func (c *Client) UpdateSink(ctx context.Context, sink *Sink) (*Sink, error) { + ls, err := c.sClient.UpdateSink(ctx, &logpb.UpdateSinkRequest{ + SinkName: c.sinkPath(sink.ID), + Sink: toLogSink(sink), + }) + if err != nil { + return nil, err + } + return fromLogSink(ls), err +} + +func (c *Client) sinkPath(sinkID string) string { + return fmt.Sprintf("%s/sinks/%s", c.parent, sinkID) +} + +// Sinks returns a SinkIterator for iterating over all Sinks in the Client's project. +// Requires ReadScope or AdminScope. +func (c *Client) Sinks(ctx context.Context) *SinkIterator { + it := &SinkIterator{ + it: c.sClient.ListSinks(ctx, &logpb.ListSinksRequest{Parent: c.parent}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A SinkIterator iterates over Sinks. +type SinkIterator struct { + it *vkit.LogSinkIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Sink +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SinkIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *SinkIterator) Next() (*Sink, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SinkIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, fromLogSink(item)) + return nil + }) +} + +func toLogSink(s *Sink) *logpb.LogSink { + return &logpb.LogSink{ + Name: s.ID, + Destination: s.Destination, + Filter: s.Filter, + OutputVersionFormat: logpb.LogSink_V2, + } +} + +func fromLogSink(ls *logpb.LogSink) *Sink { + return &Sink{ + ID: ls.Name, + Destination: ls.Destination, + Filter: ls.Filter, + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go b/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go new file mode 100644 index 0000000..20f4b7c --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go @@ -0,0 +1,227 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): document in CONTRIBUTING.md that service account must be given "Logs Configuration Writer" IAM role for sink tests to pass. +// TODO(jba): [cont] (1) From top left menu, go to IAM & Admin. (2) In Roles dropdown for acct, select Logging > Logs Configuration Writer. (3) Save. +// TODO(jba): Also, cloud-logs@google.com must have Owner permission on the GCS bucket named for the test project. + +package logadmin + +import ( + "log" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var sinkIDs = testutil.NewUIDSpace("GO-CLIENT-TEST-SINK") + +const testFilter = "" + +var testSinkDestination string + +// Called just before TestMain calls m.Run. +// Returns a cleanup function to be called after the tests finish. +func initSinks(ctx context.Context) func() { + // Create a unique GCS bucket so concurrent tests don't interfere with each other. + bucketIDs := testutil.NewUIDSpace(testProjectID + "-log-sink") + testBucket := bucketIDs.New() + testSinkDestination = "storage.googleapis.com/" + testBucket + var storageClient *storage.Client + if integrationTest { + // Create a unique bucket as a sink destination, and give the cloud logging account + // owner right. + ts := testutil.TokenSource(ctx, storage.ScopeFullControl) + var err error + storageClient, err = storage.NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("new storage client: %v", err) + } + bucket := storageClient.Bucket(testBucket) + if err := bucket.Create(ctx, testProjectID, nil); err != nil { + log.Fatalf("creating storage bucket %q: %v", testBucket, err) + } + if err := bucket.ACL().Set(ctx, "group-cloud-logs@google.com", storage.RoleOwner); err != nil { + log.Fatalf("setting owner role: %v", err) + } + } + // Clean up from aborted tests. + it := client.Sinks(ctx) + for { + s, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Printf("listing sinks: %v", err) + break + } + if sinkIDs.Older(s.ID, 24*time.Hour) { + client.DeleteSink(ctx, s.ID) // ignore error + } + } + if integrationTest { + for _, bn := range bucketNames(ctx, storageClient) { + if bucketIDs.Older(bn, 24*time.Hour) { + storageClient.Bucket(bn).Delete(ctx) // ignore error + } + } + return func() { + if err := storageClient.Bucket(testBucket).Delete(ctx); err != nil { + log.Printf("deleting %q: %v", testBucket, err) + } + storageClient.Close() + } + } + return func() {} +} + +// Collect the name of all buckets for the test project. +func bucketNames(ctx context.Context, client *storage.Client) []string { + var names []string + it := client.Buckets(ctx, testProjectID) +loop: + for { + b, err := it.Next() + switch err { + case nil: + names = append(names, b.Name) + case iterator.Done: + break loop + default: + log.Printf("listing buckets: %v", err) + break loop + } + } + return names +} + +func TestCreateDeleteSink(t *testing.T) { + ctx := context.Background() + sink := &Sink{ + ID: sinkIDs.New(), + Destination: testSinkDestination, + Filter: testFilter, + } + got, err := client.CreateSink(ctx, sink) + if err != nil { + t.Fatal(err) + } + defer client.DeleteSink(ctx, sink.ID) + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + if err := client.DeleteSink(ctx, sink.ID); err != nil { + t.Fatal(err) + } + + if _, err := client.Sink(ctx, sink.ID); err == nil { + t.Fatal("got no error, expected one") + } +} + +func TestUpdateSink(t *testing.T) { + ctx := context.Background() + sink := &Sink{ + ID: sinkIDs.New(), + Destination: testSinkDestination, + Filter: testFilter, + } + + if _, err := client.CreateSink(ctx, sink); err != nil { + t.Fatal(err) + } + got, err := client.UpdateSink(ctx, sink) + if err != nil { + t.Fatal(err) + } + defer client.DeleteSink(ctx, sink.ID) + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Updating an existing sink changes it. + sink.Filter = "" + if _, err := client.UpdateSink(ctx, sink); err != nil { + t.Fatal(err) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestListSinks(t *testing.T) { + ctx := context.Background() + var sinks []*Sink + want := map[string]*Sink{} + for i := 0; i < 4; i++ { + s := &Sink{ + ID: sinkIDs.New(), + Destination: testSinkDestination, + Filter: testFilter, + } + sinks = append(sinks, s) + want[s.ID] = s + } + for _, s := range sinks { + if _, err := client.CreateSink(ctx, s); err != nil { + t.Fatalf("Create(%q): %v", s.ID, err) + } + defer client.DeleteSink(ctx, s.ID) + } + + got := map[string]*Sink{} + it := client.Sinks(ctx) + for { + s, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + // If tests run simultaneously, we may have more sinks than we + // created. So only check for our own. + if _, ok := want[s.ID]; ok { + got[s.ID] = s + } + } + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go new file mode 100644 index 0000000..b341f61 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -0,0 +1,814 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// API/gRPC features intentionally missing from this client: +// - You cannot have the server pick the time of the entry. This client +// always sends a time. +// - There is no way to provide a protocol buffer payload. +// - No support for the "partial success" feature when writing log entries. + +// TODO(jba): test whether forward-slash characters in the log ID must be URL-encoded. +// These features are missing now, but will likely be added: +// - There is no way to specify CallOptions. + +package logging + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/internal/version" + vkit "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/internal" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + structpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +const ( + // Scope for reading from the logging service. + ReadScope = "https://www.googleapis.com/auth/logging.read" + + // Scope for writing to the logging service. + WriteScope = "https://www.googleapis.com/auth/logging.write" + + // Scope for administrative actions on the logging service. + AdminScope = "https://www.googleapis.com/auth/logging.admin" +) + +const ( + // defaultErrorCapacity is the capacity of the channel used to deliver + // errors to the OnError function. + defaultErrorCapacity = 10 + + // DefaultDelayThreshold is the default value for the DelayThreshold LoggerOption. + DefaultDelayThreshold = time.Second + + // DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption. + DefaultEntryCountThreshold = 1000 + + // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption. + DefaultEntryByteThreshold = 1 << 20 // 1MiB + + // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption. + DefaultBufferedByteLimit = 1 << 30 // 1GiB + + // defaultWriteTimeout is the timeout for the underlying write API calls. As + // write API calls are not idempotent, they are not retried on timeout. This + // timeout is to allow clients to degrade gracefully if underlying logging + // service is temporarily impaired for some reason. + defaultWriteTimeout = 10 * time.Minute +) + +// For testing: +var now = time.Now + +// ErrOverflow signals that the number of buffered entries for a Logger +// exceeds its BufferLimit. +var ErrOverflow = bundler.ErrOverflow + +// ErrOversizedEntry signals that an entry's size exceeds the maximum number of +// bytes that will be sent in a single call to the logging service. +var ErrOversizedEntry = bundler.ErrOversizedItem + +// Client is a Logging client. A Client is associated with a single Cloud project. +type Client struct { + client *vkit.Client // client for the logging service + parent string // e.g. "projects/proj-id" + errc chan error // should be buffered to minimize dropped errors + donec chan struct{} // closed on Client.Close to close Logger bundlers + loggers sync.WaitGroup // so we can wait for loggers to close + closed bool + + mu sync.Mutex + nErrs int // number of errors we saw + lastErr error // last error we saw + + // OnError is called when an error occurs in a call to Log or Flush. The + // error may be due to an invalid Entry, an overflow because BufferLimit + // was reached (in which case the error will be ErrOverflow) or an error + // communicating with the logging service. OnError is called with errors + // from all Loggers. It is never called concurrently. OnError is expected + // to return quickly; if errors occur while OnError is running, some may + // not be reported. The default behavior is to call log.Printf. + // + // This field should be set only once, before any method of Client is called. + OnError func(err error) +} + +// NewClient returns a new logging client associated with the provided parent. +// A parent can take any of the following forms: +// projects/PROJECT_ID +// folders/FOLDER_ID +// billingAccounts/ACCOUNT_ID +// organizations/ORG_ID +// for backwards compatibility, a string with no '/' is also allowed and is interpreted +// as a project ID. +// +// By default NewClient uses WriteScope. To use a different scope, call +// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). +func NewClient(ctx context.Context, parent string, opts ...option.ClientOption) (*Client, error) { + if !strings.ContainsRune(parent, '/') { + parent = "projects/" + parent + } + opts = append([]option.ClientOption{ + option.WithEndpoint(internal.ProdAddr), + option.WithScopes(WriteScope), + }, opts...) + c, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + c.SetGoogleClientInfo("gccl", version.Repo) + client := &Client{ + client: c, + parent: parent, + errc: make(chan error, defaultErrorCapacity), // create a small buffer for errors + donec: make(chan struct{}), + OnError: func(e error) { log.Printf("logging client: %v", e) }, + } + // Call the user's function synchronously, to make life easier for them. + go func() { + for err := range client.errc { + // This reference to OnError is memory-safe if the user sets OnError before + // calling any client methods. The reference happens before the first read from + // client.errc, which happens before the first write to client.errc, which + // happens before any call, which happens before the user sets OnError. + if fn := client.OnError; fn != nil { + fn(err) + } else { + log.Printf("logging (parent %q): %v", parent, err) + } + } + }() + return client, nil +} + +var unixZeroTimestamp *tspb.Timestamp + +func init() { + var err error + unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0)) + if err != nil { + panic(err) + } +} + +// Ping reports whether the client's connection to the logging service and the +// authentication configuration are valid. To accomplish this, Ping writes a +// log entry "ping" to a log named "ping". +func (c *Client) Ping(ctx context.Context) error { + ent := &logpb.LogEntry{ + Payload: &logpb.LogEntry_TextPayload{TextPayload: "ping"}, + Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both + InsertId: "ping", // necessary for the service to dedup these entries. + } + _, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: internal.LogPath(c.parent, "ping"), + Resource: monitoredResource(c.parent), + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// error puts the error on the client's error channel +// without blocking, and records summary error info. +func (c *Client) error(err error) { + select { + case c.errc <- err: + default: + } + c.mu.Lock() + c.lastErr = err + c.nErrs++ + c.mu.Unlock() +} + +func (c *Client) extractErrorInfo() error { + var err error + c.mu.Lock() + if c.lastErr != nil { + err = fmt.Errorf("saw %d errors; last: %v", c.nErrs, c.lastErr) + c.nErrs = 0 + c.lastErr = nil + } + c.mu.Unlock() + return err +} + +// A Logger is used to write log messages to a single log. It can be configured +// with a log ID, common monitored resource, and a set of common labels. +type Logger struct { + client *Client + logName string // "projects/{projectID}/logs/{logID}" + stdLoggers map[Severity]*log.Logger + bundler *bundler.Bundler + + // Options + commonResource *mrpb.MonitoredResource + commonLabels map[string]string + writeTimeout time.Duration +} + +// A LoggerOption is a configuration option for a Logger. +type LoggerOption interface { + set(*Logger) +} + +// CommonResource sets the monitored resource associated with all log entries +// written from a Logger. If not provided, the resource is automatically +// detected based on the running environment. This value can be overridden +// per-entry by setting an Entry's Resource field. +func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} } + +type commonResource struct{ *mrpb.MonitoredResource } + +func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource } + +var detectedResource struct { + pb *mrpb.MonitoredResource + once sync.Once +} + +func detectResource() *mrpb.MonitoredResource { + detectedResource.once.Do(func() { + if !metadata.OnGCE() { + return + } + projectID, err := metadata.ProjectID() + if err != nil { + return + } + id, err := metadata.InstanceID() + if err != nil { + return + } + zone, err := metadata.Zone() + if err != nil { + return + } + detectedResource.pb = &mrpb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{ + "project_id": projectID, + "instance_id": id, + "zone": zone, + }, + } + }) + return detectedResource.pb +} + +var resourceInfo = map[string]struct{ rtype, label string }{ + "organizations": {"organization", "organization_id"}, + "folders": {"folder", "folder_id"}, + "projects": {"project", "project_id"}, + "billingAccounts": {"billing_account", "account_id"}, +} + +func monitoredResource(parent string) *mrpb.MonitoredResource { + parts := strings.SplitN(parent, "/", 2) + if len(parts) != 2 { + return globalResource(parent) + } + info, ok := resourceInfo[parts[0]] + if !ok { + return globalResource(parts[1]) + } + return &mrpb.MonitoredResource{ + Type: info.rtype, + Labels: map[string]string{info.label: parts[1]}, + } +} + +func globalResource(projectID string) *mrpb.MonitoredResource { + return &mrpb.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": projectID, + }, + } +} + +// CommonLabels are labels that apply to all log entries written from a Logger, +// so that you don't have to repeat them in each log entry's Labels field. If +// any of the log entries contains a (key, value) with the same key that is in +// CommonLabels, then the entry's (key, value) overrides the one in +// CommonLabels. +func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) } + +type commonLabels map[string]string + +func (c commonLabels) set(l *Logger) { l.commonLabels = c } + +// ConcurrentWriteLimit determines how many goroutines will send log entries to the +// underlying service. The default is 1. Set ConcurrentWriteLimit to a higher value to +// increase throughput. +func ConcurrentWriteLimit(n int) LoggerOption { return concurrentWriteLimit(n) } + +type concurrentWriteLimit int + +func (c concurrentWriteLimit) set(l *Logger) { l.bundler.HandlerLimit = int(c) } + +// DelayThreshold is the maximum amount of time that an entry should remain +// buffered in memory before a call to the logging service is triggered. Larger +// values of DelayThreshold will generally result in fewer calls to the logging +// service, while increasing the risk that log entries will be lost if the +// process crashes. +// The default is DefaultDelayThreshold. +func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) } + +type delayThreshold time.Duration + +func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) } + +// EntryCountThreshold is the maximum number of entries that will be buffered +// in memory before a call to the logging service is triggered. Larger values +// will generally result in fewer calls to the logging service, while +// increasing both memory consumption and the risk that log entries will be +// lost if the process crashes. +// The default is DefaultEntryCountThreshold. +func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) } + +type entryCountThreshold int + +func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) } + +// EntryByteThreshold is the maximum number of bytes of entries that will be +// buffered in memory before a call to the logging service is triggered. See +// EntryCountThreshold for a discussion of the tradeoffs involved in setting +// this option. +// The default is DefaultEntryByteThreshold. +func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) } + +type entryByteThreshold int + +func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } + +// EntryByteLimit is the maximum number of bytes of entries that will be sent +// in a single call to the logging service. ErrOversizedEntry is returned if an +// entry exceeds EntryByteLimit. This option limits the size of a single RPC +// payload, to account for network or service issues with large RPCs. If +// EntryByteLimit is smaller than EntryByteThreshold, the latter has no effect. +// The default is zero, meaning there is no limit. +func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } + +type entryByteLimit int + +func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) } + +// BufferedByteLimit is the maximum number of bytes that the Logger will keep +// in memory before returning ErrOverflow. This option limits the total memory +// consumption of the Logger (but note that each Logger has its own, separate +// limit). It is possible to reach BufferedByteLimit even if it is larger than +// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter +// two options may be enqueued (and hence occupying memory) while new log +// entries are being added. +// The default is DefaultBufferedByteLimit. +func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) } + +type bufferedByteLimit int + +func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } + +// Logger returns a Logger that will write entries with the given log ID, such as +// "syslog". A log ID must be less than 512 characters long and can only +// include the following characters: upper and lower case alphanumeric +// characters: [A-Za-z0-9]; and punctuation characters: forward-slash, +// underscore, hyphen, and period. +func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { + r := detectResource() + if r == nil { + r = monitoredResource(c.parent) + } + l := &Logger{ + client: c, + logName: internal.LogPath(c.parent, logID), + commonResource: r, + } + l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { + l.writeLogEntries(entries.([]*logpb.LogEntry)) + }) + l.bundler.DelayThreshold = DefaultDelayThreshold + l.bundler.BundleCountThreshold = DefaultEntryCountThreshold + l.bundler.BundleByteThreshold = DefaultEntryByteThreshold + l.bundler.BufferedByteLimit = DefaultBufferedByteLimit + for _, opt := range opts { + opt.set(l) + } + l.stdLoggers = map[Severity]*log.Logger{} + for s := range severityName { + l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0) + } + + c.loggers.Add(1) + // Start a goroutine that cleans up the bundler, its channel + // and the writer goroutines when the client is closed. + go func() { + defer c.loggers.Done() + <-c.donec + l.bundler.Flush() + }() + return l +} + +type severityWriter struct { + l *Logger + s Severity +} + +func (w severityWriter) Write(p []byte) (n int, err error) { + w.l.Log(Entry{ + Severity: w.s, + Payload: string(p), + }) + return len(p), nil +} + +// Close waits for all opened loggers to be flushed and closes the client. +func (c *Client) Close() error { + if c.closed { + return nil + } + close(c.donec) // close Logger bundlers + c.loggers.Wait() // wait for all bundlers to flush and close + // Now there can be no more errors. + close(c.errc) // terminate error goroutine + // Prefer errors arising from logging to the error returned from Close. + err := c.extractErrorInfo() + err2 := c.client.Close() + if err == nil { + err = err2 + } + c.closed = true + return err +} + +// Severity is the severity of the event described in a log entry. These +// guideline severity levels are ordered, with numerically smaller levels +// treated as less severe than numerically larger levels. +type Severity int + +const ( + // Default means the log entry has no assigned severity level. + Default = Severity(logtypepb.LogSeverity_DEFAULT) + // Debug means debug or trace information. + Debug = Severity(logtypepb.LogSeverity_DEBUG) + // Info means routine information, such as ongoing status or performance. + Info = Severity(logtypepb.LogSeverity_INFO) + // Notice means normal but significant events, such as start up, shut down, or configuration. + Notice = Severity(logtypepb.LogSeverity_NOTICE) + // Warning means events that might cause problems. + Warning = Severity(logtypepb.LogSeverity_WARNING) + // Error means events that are likely to cause problems. + Error = Severity(logtypepb.LogSeverity_ERROR) + // Critical means events that cause more severe problems or brief outages. + Critical = Severity(logtypepb.LogSeverity_CRITICAL) + // Alert means a person must take an action immediately. + Alert = Severity(logtypepb.LogSeverity_ALERT) + // Emergency means one or more systems are unusable. + Emergency = Severity(logtypepb.LogSeverity_EMERGENCY) +) + +var severityName = map[Severity]string{ + Default: "Default", + Debug: "Debug", + Info: "Info", + Notice: "Notice", + Warning: "Warning", + Error: "Error", + Critical: "Critical", + Alert: "Alert", + Emergency: "Emergency", +} + +// String converts a severity level to a string. +func (v Severity) String() string { + // same as proto.EnumName + s, ok := severityName[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// ParseSeverity returns the Severity whose name equals s, ignoring case. It +// returns Default if no Severity matches. +func ParseSeverity(s string) Severity { + sl := strings.ToLower(s) + for sev, name := range severityName { + if strings.ToLower(name) == sl { + return sev + } + } + return Default +} + +// Entry is a log entry. +// See https://cloud.google.com/logging/docs/view/logs_index for more about entries. +type Entry struct { + // Timestamp is the time of the entry. If zero, the current time is used. + Timestamp time.Time + + // Severity is the entry's severity level. + // The zero value is Default. + Severity Severity + + // Payload must be either a string, or something that marshals via the + // encoding/json package to a JSON object (and not any other type of JSON value). + Payload interface{} + + // Labels optionally specifies key/value labels for the log entry. + // The Logger.Log method takes ownership of this map. See Logger.CommonLabels + // for more about labels. + Labels map[string]string + + // InsertID is a unique ID for the log entry. If you provide this field, + // the logging service considers other log entries in the same log with the + // same ID as duplicates which can be removed. If omitted, the logging + // service will generate a unique ID for this log entry. Note that because + // this client retries RPCs automatically, it is possible (though unlikely) + // that an Entry without an InsertID will be written more than once. + InsertID string + + // HTTPRequest optionally specifies metadata about the HTTP request + // associated with this log entry, if applicable. It is optional. + HTTPRequest *HTTPRequest + + // Operation optionally provides information about an operation associated + // with the log entry, if applicable. + Operation *logpb.LogEntryOperation + + // LogName is the full log name, in the form + // "projects/{ProjectID}/logs/{LogID}". It is set by the client when + // reading entries. It is an error to set it when writing entries. + LogName string + + // Resource is the monitored resource associated with the entry. + Resource *mrpb.MonitoredResource + + // Trace is the resource name of the trace associated with the log entry, + // if any. If it contains a relative resource name, the name is assumed to + // be relative to //tracing.googleapis.com. + Trace string +} + +// HTTPRequest contains an http.Request as well as additional +// information about the request and its response. +type HTTPRequest struct { + // Request is the http.Request passed to the handler. + Request *http.Request + + // RequestSize is the size of the HTTP request message in bytes, including + // the request headers and the request body. + RequestSize int64 + + // Status is the response code indicating the status of the response. + // Examples: 200, 404. + Status int + + // ResponseSize is the size of the HTTP response message sent back to the client, in bytes, + // including the response headers and the response body. + ResponseSize int64 + + // Latency is the request processing latency on the server, from the time the request was + // received until the response was sent. + Latency time.Duration + + // LocalIP is the IP address (IPv4 or IPv6) of the origin server that the request + // was sent to. + LocalIP string + + // RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the + // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". + RemoteIP string + + // CacheHit reports whether an entity was served from cache (with or without + // validation). + CacheHit bool + + // CacheValidatedWithOriginServer reports whether the response was + // validated with the origin server before being served from cache. This + // field is only meaningful if CacheHit is true. + CacheValidatedWithOriginServer bool +} + +func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { + if r == nil { + return nil + } + if r.Request == nil { + panic("HTTPRequest must have a non-nil Request") + } + u := *r.Request.URL + u.Fragment = "" + pb := &logtypepb.HttpRequest{ + RequestMethod: r.Request.Method, + RequestUrl: u.String(), + RequestSize: r.RequestSize, + Status: int32(r.Status), + ResponseSize: r.ResponseSize, + UserAgent: r.Request.UserAgent(), + ServerIp: r.LocalIP, + RemoteIp: r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr? + Referer: r.Request.Referer(), + CacheHit: r.CacheHit, + CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer, + } + if r.Latency != 0 { + pb.Latency = ptypes.DurationProto(r.Latency) + } + return pb +} + +// toProtoStruct converts v, which must marshal into a JSON object, +// into a Google Struct proto. +func toProtoStruct(v interface{}) (*structpb.Struct, error) { + // Fast path: if v is already a *structpb.Struct, nothing to do. + if s, ok := v.(*structpb.Struct); ok { + return s, nil + } + // v is a Go value that supports JSON marshalling. We want a Struct + // protobuf. Some day we may have a more direct way to get there, but right + // now the only way is to marshal the Go value to JSON, unmarshal into a + // map, and then build the Struct proto from the map. + var jb []byte + var err error + if raw, ok := v.(json.RawMessage); ok { // needed for Go 1.7 and below + jb = []byte(raw) + } else { + jb, err = json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("logging: json.Marshal: %v", err) + } + } + var m map[string]interface{} + err = json.Unmarshal(jb, &m) + if err != nil { + return nil, fmt.Errorf("logging: json.Unmarshal: %v", err) + } + return jsonMapToProtoStruct(m), nil +} + +func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct { + fields := map[string]*structpb.Value{} + for k, v := range m { + fields[k] = jsonValueToStructValue(v) + } + return &structpb.Struct{Fields: fields} +} + +func jsonValueToStructValue(v interface{}) *structpb.Value { + switch x := v.(type) { + case bool: + return &structpb.Value{Kind: &structpb.Value_BoolValue{BoolValue: x}} + case float64: + return &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: x}} + case string: + return &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: x}} + case nil: + return &structpb.Value{Kind: &structpb.Value_NullValue{}} + case map[string]interface{}: + return &structpb.Value{Kind: &structpb.Value_StructValue{StructValue: jsonMapToProtoStruct(x)}} + case []interface{}: + var vals []*structpb.Value + for _, e := range x { + vals = append(vals, jsonValueToStructValue(e)) + } + return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: vals}}} + default: + panic(fmt.Sprintf("bad type %T for JSON value", v)) + } +} + +// LogSync logs the Entry synchronously without any buffering. Because LogSync is slow +// and will block, it is intended primarily for debugging or critical errors. +// Prefer Log for most uses. +// TODO(jba): come up with a better name (LogNow?) or eliminate. +func (l *Logger) LogSync(ctx context.Context, e Entry) error { + ent, err := toLogEntry(e) + if err != nil { + return err + } + _, err = l.client.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// Log buffers the Entry for output to the logging service. It never blocks. +func (l *Logger) Log(e Entry) { + ent, err := toLogEntry(e) + if err != nil { + l.client.error(err) + return + } + if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { + l.client.error(err) + } +} + +// Flush blocks until all currently buffered log entries are sent. +// +// If any errors occurred since the last call to Flush from any Logger, or the +// creation of the client if this is the first call, then Flush returns a non-nil +// error with summary information about the errors. This information is unlikely to +// be actionable. For more accurate error reporting, set Client.OnError. +func (l *Logger) Flush() error { + l.bundler.Flush() + return l.client.extractErrorInfo() +} + +func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) { + req := &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: entries, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout) + defer cancel() + _, err := l.client.client.WriteLogEntries(ctx, req) + if err != nil { + l.client.error(err) + } +} + +// StandardLogger returns a *log.Logger for the provided severity. +// +// This method is cheap. A single log.Logger is pre-allocated for each +// severity level in each Logger. Callers may mutate the returned log.Logger +// (for example by calling SetFlags or SetPrefix). +func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] } + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +func toLogEntry(e Entry) (*logpb.LogEntry, error) { + if e.LogName != "" { + return nil, errors.New("logging: Entry.LogName should be not be set when writing") + } + t := e.Timestamp + if t.IsZero() { + t = now() + } + ts, err := ptypes.TimestampProto(t) + if err != nil { + return nil, err + } + ent := &logpb.LogEntry{ + Timestamp: ts, + Severity: logtypepb.LogSeverity(e.Severity), + InsertId: e.InsertID, + HttpRequest: fromHTTPRequest(e.HTTPRequest), + Operation: e.Operation, + Labels: e.Labels, + Trace: e.Trace, + Resource: e.Resource, + } + switch p := e.Payload.(type) { + case string: + ent.Payload = &logpb.LogEntry_TextPayload{TextPayload: p} + default: + s, err := toProtoStruct(p) + if err != nil { + return nil, err + } + ent.Payload = &logpb.LogEntry_JsonPayload{JsonPayload: s} + } + return ent, nil +} diff --git a/vendor/cloud.google.com/go/logging/logging_test.go b/vendor/cloud.google.com/go/logging/logging_test.go new file mode 100644 index 0000000..66964a2 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging_test.go @@ -0,0 +1,630 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): test that OnError is getting called appropriately. + +package logging_test + +import ( + "flag" + "fmt" + "log" + "math/rand" + "os" + "strings" + "sync" + "testing" + "time" + + gax "github.com/googleapis/gax-go" + + cinternal "cloud.google.com/go/internal" + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/logging" + ltesting "cloud.google.com/go/logging/internal/testing" + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const testLogIDPrefix = "GO-LOGGING-CLIENT/TEST-LOG" + +var uids = testutil.NewUIDSpace(testLogIDPrefix) + +var ( + client *logging.Client + aclient *logadmin.Client + testProjectID string + testLogID string + testFilter string + errorc chan error + ctx context.Context + + // Adjust the fields of a FullEntry received from the production service + // before comparing it with the expected result. We can't correctly + // compare certain fields, like times or server-generated IDs. + clean func(*logging.Entry) + + // Create a new client with the given project ID. + newClients func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) +) + +func testNow() time.Time { + return time.Unix(1000, 0) +} + +// If true, this test is using the production service, not a fake. +var integrationTest bool + +func TestMain(m *testing.M) { + flag.Parse() // needed for testing.Short() + ctx = context.Background() + testProjectID = testutil.ProjID() + errorc = make(chan error, 100) + if testProjectID == "" || testing.Short() { + integrationTest = false + if testProjectID != "" { + log.Print("Integration tests skipped in short mode (using fake instead)") + } + testProjectID = "PROJECT_ID" + clean = func(e *logging.Entry) { + // Remove the insert ID for consistency with the integration test. + e.InsertID = "" + } + + addr, err := ltesting.NewServer() + if err != nil { + log.Fatalf("creating fake server: %v", err) + } + logging.SetNow(testNow) + + newClients = func(ctx context.Context, parent string) (*logging.Client, *logadmin.Client) { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + log.Fatalf("dialing %q: %v", addr, err) + } + c, err := logging.NewClient(ctx, parent, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + ac, err := logadmin.NewClient(ctx, parent, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + return c, ac + } + + } else { + integrationTest = true + clean = func(e *logging.Entry) { + // We cannot compare timestamps, so set them to the test time. + // Also, remove the insert ID added by the service. + e.Timestamp = testNow().UTC() + e.InsertID = "" + } + ts := testutil.TokenSource(ctx, logging.AdminScope) + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + log.Printf("running integration tests with project %s", testProjectID) + newClients = func(ctx context.Context, parent string) (*logging.Client, *logadmin.Client) { + c, err := logging.NewClient(ctx, parent, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + ac, err := logadmin.NewClient(ctx, parent, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + return c, ac + } + + } + client, aclient = newClients(ctx, testProjectID) + client.OnError = func(e error) { errorc <- e } + + exit := m.Run() + os.Exit(exit) +} + +func initLogs(ctx context.Context) { + testLogID = uids.New() + testFilter = fmt.Sprintf(`logName = "projects/%s/logs/%s"`, testProjectID, + strings.Replace(testLogID, "/", "%2F", -1)) +} + +// Testing of Logger.Log is done in logadmin_test.go, TestEntries. + +func TestLogSync(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + lg := client.Logger(testLogID) + err := lg.LogSync(ctx, logging.Entry{Payload: "hello"}) + if err != nil { + t.Fatal(err) + } + err = lg.LogSync(ctx, logging.Entry{Payload: "goodbye"}) + if err != nil { + t.Fatal(err) + } + // Allow overriding the MonitoredResource. + err = lg.LogSync(ctx, logging.Entry{Payload: "mr", Resource: &mrpb.MonitoredResource{Type: "global"}}) + if err != nil { + t.Fatal(err) + } + + want := []*logging.Entry{ + entryForTesting("hello"), + entryForTesting("goodbye"), + entryForTesting("mr"), + } + var got []*logging.Entry + ok := waitFor(func() bool { + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == len(want) + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) + } + if msg, ok := compareEntries(got, want); !ok { + t.Error(msg) + } +} + +func TestLogAndEntries(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + payloads := []string{"p1", "p2", "p3", "p4", "p5"} + lg := client.Logger(testLogID) + for _, p := range payloads { + // Use the insert ID to guarantee iteration order. + lg.Log(logging.Entry{Payload: p, InsertID: p}) + } + lg.Flush() + var want []*logging.Entry + for _, p := range payloads { + want = append(want, entryForTesting(p)) + } + var got []*logging.Entry + ok := waitFor(func() bool { + var err error + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == len(want) + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) + } + if msg, ok := compareEntries(got, want); !ok { + t.Error(msg) + } +} + +// compareEntries compares most fields list of Entries against expected. compareEntries does not compare: +// - HTTPRequest +// - Operation +// - Resource +func compareEntries(got, want []*logging.Entry) (string, bool) { + if len(got) != len(want) { + return fmt.Sprintf("got %d entries, want %d", len(got), len(want)), false + } + for i := range got { + if !compareEntry(got[i], want[i]) { + return fmt.Sprintf("#%d:\ngot %+v\nwant %+v", i, got[i], want[i]), false + } + } + return "", true +} + +func compareEntry(got, want *logging.Entry) bool { + if got.Timestamp.Unix() != want.Timestamp.Unix() { + return false + } + + if got.Severity != want.Severity { + return false + } + + if !ltesting.PayloadEqual(got.Payload, want.Payload) { + return false + } + if !testutil.Equal(got.Labels, want.Labels) { + return false + } + + if got.InsertID != want.InsertID { + return false + } + + if got.LogName != want.LogName { + return false + } + + return true +} + +func entryForTesting(payload interface{}) *logging.Entry { + return &logging.Entry{ + Timestamp: testNow().UTC(), + Payload: payload, + LogName: "projects/" + testProjectID + "/logs/" + testLogID, + Resource: &mrpb.MonitoredResource{Type: "global", Labels: map[string]string{"project_id": testProjectID}}, + } +} + +func countLogEntries(ctx context.Context, filter string) int { + it := aclient.Entries(ctx, logadmin.Filter(filter)) + n := 0 + for { + _, err := it.Next() + if err == iterator.Done { + return n + } + if err != nil { + log.Fatalf("counting log entries: %v", err) + } + n++ + } +} + +func allTestLogEntries(ctx context.Context) ([]*logging.Entry, error) { + return allEntries(ctx, aclient, testFilter) +} + +func allEntries(ctx context.Context, aclient *logadmin.Client, filter string) ([]*logging.Entry, error) { + var es []*logging.Entry + it := aclient.Entries(ctx, logadmin.Filter(filter)) + for { + e, err := cleanNext(it) + switch err { + case nil: + es = append(es, e) + case iterator.Done: + return es, nil + default: + return nil, err + } + } +} + +func cleanNext(it *logadmin.EntryIterator) (*logging.Entry, error) { + e, err := it.Next() + if err != nil { + return nil, err + } + clean(e) + return e, nil +} + +func TestStandardLogger(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + lg := client.Logger(testLogID) + slg := lg.StandardLogger(logging.Info) + + if slg != lg.StandardLogger(logging.Info) { + t.Error("There should be only one standard logger at each severity.") + } + if slg == lg.StandardLogger(logging.Debug) { + t.Error("There should be a different standard logger for each severity.") + } + + slg.Print("info") + lg.Flush() + var got []*logging.Entry + ok := waitFor(func() bool { + var err error + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == 1 + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), 1) + } + if len(got) != 1 { + t.Fatalf("expected non-nil request with one entry; got:\n%+v", got) + } + if got, want := got[0].Payload.(string), "info\n"; got != want { + t.Errorf("payload: got %q, want %q", got, want) + } + if got, want := logging.Severity(got[0].Severity), logging.Info; got != want { + t.Errorf("severity: got %s, want %s", got, want) + } +} + +func TestSeverity(t *testing.T) { + if got, want := logging.Info.String(), "Info"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if got, want := logging.Severity(-99).String(), "-99"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestParseSeverity(t *testing.T) { + for _, test := range []struct { + in string + want logging.Severity + }{ + {"", logging.Default}, + {"whatever", logging.Default}, + {"Default", logging.Default}, + {"ERROR", logging.Error}, + {"Error", logging.Error}, + {"error", logging.Error}, + } { + got := logging.ParseSeverity(test.in) + if got != test.want { + t.Errorf("%q: got %s, want %s\n", test.in, got, test.want) + } + } +} + +func TestErrors(t *testing.T) { + initLogs(ctx) // Generate new testLogID + // Drain errors already seen. +loop: + for { + select { + case <-errorc: + default: + break loop + } + } + // Try to log something that can't be JSON-marshalled. + lg := client.Logger(testLogID) + lg.Log(logging.Entry{Payload: func() {}}) + // Expect an error from Flush. + err := lg.Flush() + if err == nil { + t.Fatal("expected error, got nil") + } +} + +type badTokenSource struct{} + +func (badTokenSource) Token() (*oauth2.Token, error) { + return &oauth2.Token{}, nil +} + +func TestPing(t *testing.T) { + // Ping twice, in case the service's InsertID logic messes with the error code. + ctx := context.Background() + // The global client should be valid. + if err := client.Ping(ctx); err != nil { + t.Errorf("project %s: got %v, expected nil", testProjectID, err) + } + if err := client.Ping(ctx); err != nil { + t.Errorf("project %s, #2: got %v, expected nil", testProjectID, err) + } + // nonexistent project + c, a := newClients(ctx, testProjectID+"-BAD") + defer c.Close() + defer a.Close() + if err := c.Ping(ctx); err == nil { + t.Errorf("nonexistent project: want error pinging logging api, got nil") + } + if err := c.Ping(ctx); err == nil { + t.Errorf("nonexistent project, #2: want error pinging logging api, got nil") + } + + // Bad creds. We cannot test this with the fake, since it doesn't do auth. + if integrationTest { + c, err := logging.NewClient(ctx, testProjectID, option.WithTokenSource(badTokenSource{})) + if err != nil { + t.Fatal(err) + } + if err := c.Ping(ctx); err == nil { + t.Errorf("bad creds: want error pinging logging api, got nil") + } + if err := c.Ping(ctx); err == nil { + t.Errorf("bad creds, #2: want error pinging logging api, got nil") + } + if err := c.Close(); err != nil { + t.Fatalf("error closing client: %v", err) + } + } +} + +func TestLogsAndDelete(t *testing.T) { + // This function tests both the Logs and DeleteLog methods. We only try to + // delete those logs that we can observe and that were generated by this + // test. This may not include the logs generated from the current test run, + // because the logging service is only eventually consistent. It's + // therefore possible that on some runs, this test will do nothing. + ctx := context.Background() + it := aclient.Logs(ctx) + nDeleted := 0 + for { + logID, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + if strings.HasPrefix(logID, testLogIDPrefix) { + if err := aclient.DeleteLog(ctx, logID); err != nil { + // Ignore NotFound. Sometimes, amazingly, DeleteLog cannot find + // a log that is returned by Logs. + if status.Code(err) != codes.NotFound { + t.Fatalf("deleting %q: %v", logID, err) + } + } else { + nDeleted++ + } + } + } + t.Logf("deleted %d logs", nDeleted) +} + +func TestNonProjectParent(t *testing.T) { + ctx := context.Background() + initLogs(ctx) + const orgID = "433637338589" // org ID for google.com + parent := "organizations/" + orgID + c, a := newClients(ctx, parent) + defer c.Close() + defer a.Close() + lg := c.Logger(testLogID) + err := lg.LogSync(ctx, logging.Entry{Payload: "hello"}) + if integrationTest { + // We don't have permission to log to the organization. + if got, want := status.Code(err), codes.PermissionDenied; got != want { + t.Errorf("got code %s, want %s", got, want) + } + return + } + // Continue test against fake. + if err != nil { + t.Fatal(err) + } + want := []*logging.Entry{{ + Timestamp: testNow().UTC(), + Payload: "hello", + LogName: parent + "/logs/" + testLogID, + Resource: &mrpb.MonitoredResource{ + Type: "organization", + Labels: map[string]string{"organization_id": orgID}, + }, + }} + var got []*logging.Entry + ok := waitFor(func() bool { + got, err = allEntries(ctx, a, fmt.Sprintf(`logName = "%s/logs/%s"`, parent, + strings.Replace(testLogID, "/", "%2F", -1))) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == len(want) + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) + } + if msg, ok := compareEntries(got, want); !ok { + t.Error(msg) + } +} + +// waitFor calls f repeatedly with exponential backoff, blocking until it returns true. +// It returns false after a while (if it times out). +func waitFor(f func() bool) bool { + // TODO(shadams): Find a better way to deflake these tests. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + err := cinternal.Retry(ctx, + gax.Backoff{Initial: time.Second, Multiplier: 2}, + func() (bool, error) { return f(), nil }) + return err == nil +} + +// Interleave a lot of Log and Flush calls, to induce race conditions. +// Run this test with: +// go test -run LogFlushRace -race -count 100 +func TestLogFlushRace(t *testing.T) { + initLogs(ctx) // Generate new testLogID + lg := client.Logger(testLogID, + logging.ConcurrentWriteLimit(5), // up to 5 concurrent log writes + logging.EntryCountThreshold(100)) // small bundle size to increase interleaving + var wgf, wgl sync.WaitGroup + donec := make(chan struct{}) + for i := 0; i < 10; i++ { + wgl.Add(1) + go func() { + defer wgl.Done() + for j := 0; j < 1e4; j++ { + lg.Log(logging.Entry{Payload: "the payload"}) + } + }() + } + for i := 0; i < 5; i++ { + wgf.Add(1) + go func() { + defer wgf.Done() + for { + select { + case <-donec: + return + case <-time.After(time.Duration(rand.Intn(5)) * time.Millisecond): + lg.Flush() + } + } + }() + } + wgl.Wait() + close(donec) + wgf.Wait() +} + +// Test the throughput of concurrent writers. +// TODO(jba): when 1.8 is out, use sub-benchmarks. +func BenchmarkConcurrentWrites1(b *testing.B) { + benchmarkConcurrentWrites(b, 1) +} + +func BenchmarkConcurrentWrites2(b *testing.B) { + benchmarkConcurrentWrites(b, 2) +} + +func BenchmarkConcurrentWrites4(b *testing.B) { + benchmarkConcurrentWrites(b, 4) +} + +func BenchmarkConcurrentWrites8(b *testing.B) { + benchmarkConcurrentWrites(b, 8) +} + +func BenchmarkConcurrentWrites16(b *testing.B) { + benchmarkConcurrentWrites(b, 16) +} + +func BenchmarkConcurrentWrites32(b *testing.B) { + benchmarkConcurrentWrites(b, 32) +} + +func benchmarkConcurrentWrites(b *testing.B, c int) { + if !integrationTest { + b.Skip("only makes sense when running against production service") + } + b.StopTimer() + lg := client.Logger(testLogID, logging.ConcurrentWriteLimit(c), logging.EntryCountThreshold(1000)) + const ( + nEntries = 1e5 + payload = "the quick brown fox jumps over the lazy dog" + ) + b.SetBytes(int64(nEntries * len(payload))) + b.StartTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < nEntries; j++ { + lg.Log(logging.Entry{Payload: payload}) + } + lg.Flush() + } +} diff --git a/vendor/cloud.google.com/go/logging/logging_unexported_test.go b/vendor/cloud.google.com/go/logging/logging_unexported_test.go new file mode 100644 index 0000000..4fa42cb --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging_unexported_test.go @@ -0,0 +1,333 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests that require access to unexported names of the logging package. + +package logging + +import ( + "encoding/json" + "net/http" + "net/url" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "github.com/golang/protobuf/proto" + durpb "github.com/golang/protobuf/ptypes/duration" + structpb "github.com/golang/protobuf/ptypes/struct" + "google.golang.org/api/support/bundler" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" +) + +func TestLoggerCreation(t *testing.T) { + const logID = "testing" + c := &Client{parent: "projects/PROJECT_ID"} + customResource := &mrpb.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": "ANOTHER_PROJECT", + }, + } + defaultBundler := &bundler.Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultEntryCountThreshold, + BundleByteThreshold: DefaultEntryByteThreshold, + BundleByteLimit: 0, + BufferedByteLimit: DefaultBufferedByteLimit, + } + for _, test := range []struct { + options []LoggerOption + wantLogger *Logger + defaultResource bool + wantBundler *bundler.Bundler + }{ + { + options: nil, + wantLogger: &Logger{}, + defaultResource: true, + wantBundler: defaultBundler, + }, + { + options: []LoggerOption{ + CommonResource(nil), + CommonLabels(map[string]string{"a": "1"}), + }, + wantLogger: &Logger{ + commonResource: nil, + commonLabels: map[string]string{"a": "1"}, + }, + wantBundler: defaultBundler, + }, + { + options: []LoggerOption{CommonResource(customResource)}, + wantLogger: &Logger{commonResource: customResource}, + wantBundler: defaultBundler, + }, + { + options: []LoggerOption{ + DelayThreshold(time.Minute), + EntryCountThreshold(99), + EntryByteThreshold(17), + EntryByteLimit(18), + BufferedByteLimit(19), + }, + wantLogger: &Logger{}, + defaultResource: true, + wantBundler: &bundler.Bundler{ + DelayThreshold: time.Minute, + BundleCountThreshold: 99, + BundleByteThreshold: 17, + BundleByteLimit: 18, + BufferedByteLimit: 19, + }, + }, + } { + gotLogger := c.Logger(logID, test.options...) + if got, want := gotLogger.commonResource, test.wantLogger.commonResource; !test.defaultResource && !proto.Equal(got, want) { + t.Errorf("%v: resource: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.commonLabels, test.wantLogger.commonLabels; !testutil.Equal(got, want) { + t.Errorf("%v: commonLabels: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.DelayThreshold, test.wantBundler.DelayThreshold; got != want { + t.Errorf("%v: DelayThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleCountThreshold, test.wantBundler.BundleCountThreshold; got != want { + t.Errorf("%v: BundleCountThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleByteThreshold, test.wantBundler.BundleByteThreshold; got != want { + t.Errorf("%v: BundleByteThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleByteLimit, test.wantBundler.BundleByteLimit; got != want { + t.Errorf("%v: BundleByteLimit: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BufferedByteLimit, test.wantBundler.BufferedByteLimit; got != want { + t.Errorf("%v: BufferedByteLimit: got %v, want %v", test.options, got, want) + } + } +} + +func TestToProtoStruct(t *testing.T) { + v := struct { + Foo string `json:"foo"` + Bar int `json:"bar,omitempty"` + Baz []float64 `json:"baz"` + Moo map[string]interface{} `json:"moo"` + }{ + Foo: "foovalue", + Baz: []float64{1.1}, + Moo: map[string]interface{}{ + "a": 1, + "b": "two", + "c": true, + }, + } + + got, err := toProtoStruct(v) + if err != nil { + t.Fatal(err) + } + want := &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "foo": {Kind: &structpb.Value_StringValue{StringValue: v.Foo}}, + "baz": {Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: []*structpb.Value{ + {Kind: &structpb.Value_NumberValue{NumberValue: 1.1}}, + }}}}, + "moo": {Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": {Kind: &structpb.Value_NumberValue{NumberValue: 1}}, + "b": {Kind: &structpb.Value_StringValue{StringValue: "two"}}, + "c": {Kind: &structpb.Value_BoolValue{BoolValue: true}}, + }, + }, + }}, + }, + } + if !proto.Equal(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } + + // Non-structs should fail to convert. + for v := range []interface{}{3, "foo", []int{1, 2, 3}} { + _, err := toProtoStruct(v) + if err == nil { + t.Errorf("%v: got nil, want error", v) + } + } + + // Test fast path. + got, err = toProtoStruct(want) + if err != nil { + t.Fatal(err) + } + if got != want { + t.Error("got and want should be identical, but are not") + } +} + +func TestToLogEntryPayload(t *testing.T) { + for _, test := range []struct { + in interface{} + wantText string + wantStruct *structpb.Struct + }{ + { + in: "string", + wantText: "string", + }, + { + in: map[string]interface{}{"a": 1, "b": true}, + wantStruct: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": {Kind: &structpb.Value_NumberValue{NumberValue: 1}}, + "b": {Kind: &structpb.Value_BoolValue{BoolValue: true}}, + }, + }, + }, + { + in: json.RawMessage([]byte(`{"a": 1, "b": true}`)), + wantStruct: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": {Kind: &structpb.Value_NumberValue{NumberValue: 1}}, + "b": {Kind: &structpb.Value_BoolValue{BoolValue: true}}, + }, + }, + }, + } { + e, err := toLogEntry(Entry{Payload: test.in}) + if err != nil { + t.Fatalf("%+v: %v", test.in, err) + } + if test.wantStruct != nil { + got := e.GetJsonPayload() + if !proto.Equal(got, test.wantStruct) { + t.Errorf("%+v: got %s, want %s", test.in, got, test.wantStruct) + } + } else { + got := e.GetTextPayload() + if got != test.wantText { + t.Errorf("%+v: got %s, want %s", test.in, got, test.wantText) + } + } + } +} + +func TestFromHTTPRequest(t *testing.T) { + const testURL = "http:://example.com/path?q=1" + u, err := url.Parse(testURL) + if err != nil { + t.Fatal(err) + } + req := &HTTPRequest{ + Request: &http.Request{ + Method: "GET", + URL: u, + Header: map[string][]string{ + "User-Agent": []string{"user-agent"}, + "Referer": []string{"referer"}, + }, + }, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: 100 * time.Second, + LocalIP: "127.0.0.1", + RemoteIP: "10.0.1.1", + CacheHit: true, + CacheValidatedWithOriginServer: true, + } + got := fromHTTPRequest(req) + want := &logtypepb.HttpRequest{ + RequestMethod: "GET", + RequestUrl: testURL, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: &durpb.Duration{Seconds: 100}, + UserAgent: "user-agent", + ServerIp: "127.0.0.1", + RemoteIp: "10.0.1.1", + Referer: "referer", + CacheHit: true, + CacheValidatedWithOriginServer: true, + } + if !proto.Equal(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } +} + +func TestMonitoredResource(t *testing.T) { + for _, test := range []struct { + parent string + want *mrpb.MonitoredResource + }{ + { + "projects/P", + &mrpb.MonitoredResource{ + Type: "project", + Labels: map[string]string{"project_id": "P"}, + }, + }, + + { + "folders/F", + &mrpb.MonitoredResource{ + Type: "folder", + Labels: map[string]string{"folder_id": "F"}, + }, + }, + { + "billingAccounts/B", + &mrpb.MonitoredResource{ + Type: "billing_account", + Labels: map[string]string{"account_id": "B"}, + }, + }, + { + "organizations/123", + &mrpb.MonitoredResource{ + Type: "organization", + Labels: map[string]string{"organization_id": "123"}, + }, + }, + { + "unknown/X", + &mrpb.MonitoredResource{ + Type: "global", + Labels: map[string]string{"project_id": "X"}, + }, + }, + { + "whatever", + &mrpb.MonitoredResource{ + Type: "global", + Labels: map[string]string{"project_id": "whatever"}, + }, + }, + } { + got := monitoredResource(test.parent) + if !testutil.Equal(got, test.want) { + t.Errorf("%q: got %+v, want %+v", test.parent, got, test.want) + } + } +} + +// Used by the tests in logging_test. +func SetNow(f func() time.Time) { + now = f +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go new file mode 100644 index 0000000..edb98b0 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -0,0 +1,45 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package longrunning is an auto-generated package for the +// Google Long Running Operations API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// +// Use the client at cloud.google.com/go/longrunning in preference to this. +package longrunning // import "cloud.google.com/go/longrunning/autogen" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{} +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go new file mode 100644 index 0000000..07fe43b --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go @@ -0,0 +1,34 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +import ( + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" +) + +// InternalFromConn is for use by the google Cloud Libraries only. +// +// InternalFromConn creates OperationsClient from available connection. +func InternalFromConn(conn *grpc.ClientConn) *OperationsClient { + c := &OperationsClient{ + conn: conn, + CallOptions: defaultOperationsCallOptions(), + + operationsClient: longrunningpb.NewOperationsClient(conn), + } + c.SetGoogleClientInfo() + return c +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/mock_test.go b/vendor/cloud.google.com/go/longrunning/autogen/mock_test.go new file mode 100644 index 0000000..4be9a3f --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/mock_test.go @@ -0,0 +1,381 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package longrunning + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockOperationsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + longrunningpb.OperationsServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockOperationsServer) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest) (*longrunningpb.ListOperationsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.ListOperationsResponse), nil +} + +func (s *mockOperationsServer) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockOperationsServer) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockOperationsServer) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockOperations mockOperationsServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + longrunningpb.RegisterOperationsServer(serv, &mockOperations) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestOperationsGetOperation(t *testing.T) { + var name2 string = "name2-1052831874" + var done bool = true + var expectedResponse = &longrunningpb.Operation{ + Name: name2, + Done: done, + } + + mockOperations.err = nil + mockOperations.reqs = nil + + mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) + + var name string = "name3373707" + var request = &longrunningpb.GetOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestOperationsGetOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockOperations.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var request = &longrunningpb.GetOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestOperationsListOperations(t *testing.T) { + var nextPageToken string = "" + var operationsElement *longrunningpb.Operation = &longrunningpb.Operation{} + var operations = []*longrunningpb.Operation{operationsElement} + var expectedResponse = &longrunningpb.ListOperationsResponse{ + NextPageToken: nextPageToken, + Operations: operations, + } + + mockOperations.err = nil + mockOperations.reqs = nil + + mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) + + var name string = "name3373707" + var filter string = "filter-1274492040" + var request = &longrunningpb.ListOperationsRequest{ + Name: name, + Filter: filter, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListOperations(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Operations[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestOperationsListOperationsError(t *testing.T) { + errCode := codes.PermissionDenied + mockOperations.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var filter string = "filter-1274492040" + var request = &longrunningpb.ListOperationsRequest{ + Name: name, + Filter: filter, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListOperations(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestOperationsCancelOperation(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockOperations.err = nil + mockOperations.reqs = nil + + mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) + + var name string = "name3373707" + var request = &longrunningpb.CancelOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestOperationsCancelOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockOperations.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var request = &longrunningpb.CancelOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestOperationsDeleteOperation(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockOperations.err = nil + mockOperations.reqs = nil + + mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) + + var name string = "name3373707" + var request = &longrunningpb.DeleteOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestOperationsDeleteOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockOperations.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var request = &longrunningpb.DeleteOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go new file mode 100644 index 0000000..97b33a8 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -0,0 +1,267 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package longrunning + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// OperationsCallOptions contains the retry settings for each method of OperationsClient. +type OperationsCallOptions struct { + GetOperation []gax.CallOption + ListOperations []gax.CallOption + CancelOperation []gax.CallOption + DeleteOperation []gax.CallOption +} + +func defaultOperationsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("longrunning.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultOperationsCallOptions() *OperationsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &OperationsCallOptions{ + GetOperation: retry[[2]string{"default", "idempotent"}], + ListOperations: retry[[2]string{"default", "idempotent"}], + CancelOperation: retry[[2]string{"default", "idempotent"}], + DeleteOperation: retry[[2]string{"default", "idempotent"}], + } +} + +// OperationsClient is a client for interacting with Google Long Running Operations API. +type OperationsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + operationsClient longrunningpb.OperationsClient + + // The call options for this service. + CallOptions *OperationsCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewOperationsClient creates a new operations client. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return [Operation][google.longrunning.Operation] to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultOperationsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &OperationsClient{ + conn: conn, + CallOptions: defaultOperationsCallOptions(), + + operationsClient: longrunningpb.NewOperationsClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *OperationsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *OperationsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn't support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding below allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. +func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) + it := &OperationIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { + var resp *longrunningpb.ListOperationsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Operations, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, +// corresponding to Code.CANCELLED. +func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn't support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// OperationIterator manages a stream of *longrunningpb.Operation. +type OperationIterator struct { + items []*longrunningpb.Operation + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *OperationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *OperationIterator) Next() (*longrunningpb.Operation, error) { + var item *longrunningpb.Operation + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *OperationIterator) bufLen() int { + return len(it.items) +} + +func (it *OperationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client_example_test.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client_example_test.go new file mode 100644 index 0000000..7bde78b --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client_example_test.go @@ -0,0 +1,108 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package longrunning_test + +import ( + "cloud.google.com/go/longrunning/autogen" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +func ExampleNewOperationsClient() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleOperationsClient_GetOperation() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &longrunningpb.GetOperationRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleOperationsClient_ListOperations() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &longrunningpb.ListOperationsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListOperations(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleOperationsClient_CancelOperation() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &longrunningpb.CancelOperationRequest{ + // TODO: Fill request struct fields. + } + err = c.CancelOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleOperationsClient_DeleteOperation() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &longrunningpb.DeleteOperationRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/longrunning/example_test.go b/vendor/cloud.google.com/go/longrunning/example_test.go new file mode 100644 index 0000000..c7b52ac --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/example_test.go @@ -0,0 +1,116 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +import ( + "fmt" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/longrunning" +) + +func bestMomentInHistory() (*Operation, error) { + t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", "2009-11-10 23:00:00 +0000 UTC") + if err != nil { + return nil, err + } + resp, err := ptypes.TimestampProto(t) + if err != nil { + return nil, err + } + respAny, err := ptypes.MarshalAny(resp) + if err != nil { + return nil, err + } + metaAny, err := ptypes.MarshalAny(ptypes.DurationProto(1 * time.Hour)) + return &Operation{ + proto: &pb.Operation{ + Name: "best-moment", + Done: true, + Metadata: metaAny, + Result: &pb.Operation_Response{ + Response: respAny, + }, + }, + }, err +} + +func ExampleOperation_Wait() { + // Complex computation, might take a long time. + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + var ts timestamp.Timestamp + err = op.Wait(context.TODO(), &ts) + if err != nil && !op.Done() { + fmt.Println("failed to fetch operation status", err) + } else if err != nil && op.Done() { + fmt.Println("operation completed with error", err) + } else { + fmt.Println(ptypes.TimestampString(&ts)) + } + // Output: + // 2009-11-10T23:00:00Z +} + +func ExampleOperation_Metadata() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + + // The operation might contain metadata. + // In this example, the metadata contains the estimated length of time + // the operation might take to complete. + var meta duration.Duration + if err := op.Metadata(&meta); err != nil { + // TODO: Handle err. + } + d, err := ptypes.Duration(&meta) + if err == ErrNoMetadata { + fmt.Println("no metadata") + } else if err != nil { + // TODO: Handle err. + } else { + fmt.Println(d) + } + // Output: + // 1h0m0s +} + +func ExampleOperation_Cancel() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + if err := op.Cancel(context.Background()); err != nil { + // TODO: Handle err. + } +} + +func ExampleOperation_Delete() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + if err := op.Delete(context.Background()); err != nil { + // TODO: Handle err. + } +} diff --git a/vendor/cloud.google.com/go/longrunning/longrunning.go b/vendor/cloud.google.com/go/longrunning/longrunning.go new file mode 100644 index 0000000..20d3b75 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/longrunning.go @@ -0,0 +1,181 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package longrunning supports Long Running Operations for the Google Cloud Libraries. +// See google.golang.org/genproto/googleapis/longrunning for its service definition. +// +// Users of the Google Cloud Libraries will typically not use this package directly. +// Instead they will call functions returning Operations and call their methods. +// +// This package is still experimental and subject to change. +package longrunning // import "cloud.google.com/go/longrunning" + +import ( + "errors" + "fmt" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/googleapis/gax-go" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" + + autogen "cloud.google.com/go/longrunning/autogen" + pb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc/codes" +) + +// ErrNoMetadata is the error returned by Metadata if the operation contains no metadata. +var ErrNoMetadata = errors.New("operation contains no metadata") + +// Operation represents the result of an API call that may not be ready yet. +type Operation struct { + c operationsClient + proto *pb.Operation +} + +type operationsClient interface { + GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error) + CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error + DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error +} + +// InternalNewOperation is for use by the google Cloud Libraries only. +// +// InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation. +// The conn parameter refers to a server that proto was received from. +func InternalNewOperation(inner *autogen.OperationsClient, proto *pb.Operation) *Operation { + return &Operation{ + c: inner, + proto: proto, + } +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service +// from which the operation is created. +func (op *Operation) Name() string { + return op.proto.Name +} + +// Done reports whether the long-running operation has completed. +func (op *Operation) Done() bool { + return op.proto.Done +} + +// Metadata unmarshals op's metadata into meta. +// If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified. +func (op *Operation) Metadata(meta proto.Message) error { + if m := op.proto.Metadata; m != nil { + return ptypes.UnmarshalAny(m, meta) + } + return ErrNoMetadata +} + +// Poll fetches the latest state of a long-running operation. +// +// If Poll fails, the error is returned and op is unmodified. +// If Poll succeeds and the operation has completed with failure, +// the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true; if resp != nil, the response of the operation +// is stored in resp. +func (op *Operation) Poll(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error { + if !op.Done() { + p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}, opts...) + if err != nil { + return err + } + op.proto = p + } + if !op.Done() { + return nil + } + + switch r := op.proto.Result.(type) { + case *pb.Operation_Error: + // TODO (pongad): r.Details may contain further information + return status.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message) + case *pb.Operation_Response: + if resp == nil { + return nil + } + return ptypes.UnmarshalAny(r.Response, resp) + default: + return fmt.Errorf("unsupported result type %[1]T: %[1]v", r) + } +} + +// DefaultWaitInterval is the polling interval used by Operation.Wait. +const DefaultWaitInterval = 60 * time.Second + +// Wait is equivalent to WaitWithInterval using DefaultWaitInterval. +func (op *Operation) Wait(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error { + return op.WaitWithInterval(ctx, resp, DefaultWaitInterval, opts...) +} + +// WaitWithInterval blocks until the operation is completed. +// If resp != nil, Wait stores the response in resp. +// WaitWithInterval polls every interval, except initially +// when it polls using exponential backoff. +// +// See documentation of Poll for error-handling information. +func (op *Operation) WaitWithInterval(ctx context.Context, resp proto.Message, interval time.Duration, opts ...gax.CallOption) error { + bo := gax.Backoff{ + Initial: 1 * time.Second, + Max: interval, + } + if bo.Max < bo.Initial { + bo.Max = bo.Initial + } + return op.wait(ctx, resp, &bo, gax.Sleep, opts...) +} + +type sleeper func(context.Context, time.Duration) error + +// wait implements Wait, taking exponentialBackoff and sleeper arguments for testing. +func (op *Operation) wait(ctx context.Context, resp proto.Message, bo *gax.Backoff, sl sleeper, opts ...gax.CallOption) error { + for { + if err := op.Poll(ctx, resp, opts...); err != nil { + return err + } + if op.Done() { + return nil + } + if err := sl(ctx, bo.Pause()); err != nil { + return err + } + } +} + +// Cancel starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// grpc.Code(error) == codes.Unimplemented. Clients can use +// Poll or other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, op.Poll returns an error +// with code Canceled. +func (op *Operation) Cancel(ctx context.Context, opts ...gax.CallOption) error { + return op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}, opts...) +} + +// Delete deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn't support this method, grpc.Code(error) == codes.Unimplemented. +func (op *Operation) Delete(ctx context.Context, opts ...gax.CallOption) error { + return op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}, opts...) +} diff --git a/vendor/cloud.google.com/go/longrunning/longrunning_test.go b/vendor/cloud.google.com/go/longrunning/longrunning_test.go new file mode 100644 index 0000000..3116c3d --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/longrunning_test.go @@ -0,0 +1,215 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lro supports Long Running Operations for the Google Cloud Libraries. +// +// This package is still experimental and subject to change. +package longrunning + +import ( + "errors" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/longrunning" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type getterService struct { + operationsClient + + // clock represents the fake current time of the service. + // It is the running sum of the of the duration we have slept. + clock time.Duration + + // getTimes records the the times at which GetOperation is called. + getTimes []time.Duration + + // results are the fake results that GetOperation should return. + results []*pb.Operation +} + +func (s *getterService) GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error) { + i := len(s.getTimes) + s.getTimes = append(s.getTimes, s.clock) + if i >= len(s.results) { + return nil, errors.New("unexpected call") + } + return s.results[i], nil +} + +func (s *getterService) sleeper() sleeper { + return func(_ context.Context, d time.Duration) error { + s.clock += d + return nil + } +} + +func TestWait(t *testing.T) { + responseDur := ptypes.DurationProto(42 * time.Second) + responseAny, err := ptypes.MarshalAny(responseDur) + if err != nil { + t.Fatal(err) + } + + s := &getterService{ + results: []*pb.Operation{ + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + { + Name: "foo", + Done: true, + Result: &pb.Operation_Response{ + Response: responseAny, + }, + }, + }, + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if op.Done() { + t.Fatal("operation should not have completed yet") + } + + var resp duration.Duration + bo := gax.Backoff{ + Initial: 1 * time.Second, + Max: 3 * time.Second, + } + if err := op.wait(context.Background(), &resp, &bo, s.sleeper()); err != nil { + t.Fatal(err) + } + if !proto.Equal(&resp, responseDur) { + t.Errorf("response, got %v, want %v", resp, responseDur) + } + if !op.Done() { + t.Errorf("operation should have completed") + } + + maxWait := []time.Duration{ + 1 * time.Second, + 2 * time.Second, + 3 * time.Second, + 3 * time.Second, + 3 * time.Second, + } + for i := 0; i < len(s.getTimes)-1; i++ { + w := s.getTimes[i+1] - s.getTimes[i] + if mw := maxWait[i]; w > mw { + t.Errorf("backoff, waited %s, max %s", w, mw) + } + } +} + +func TestPollRequestError(t *testing.T) { + const opName = "foo" + + // All calls error. + s := &getterService{} + op := &Operation{ + c: s, + proto: &pb.Operation{Name: opName}, + } + if err := op.Poll(context.Background(), nil); err == nil { + t.Fatalf("Poll should error") + } + if n := op.Name(); n != opName { + t.Errorf("operation name, got %q, want %q", n, opName) + } + if op.Done() { + t.Errorf("operation should not have completed; we failed to fetch state") + } +} + +func TestPollErrorResult(t *testing.T) { + const ( + errCode = codes.NotFound + errMsg = "my error" + ) + op := &Operation{ + proto: &pb.Operation{ + Name: "foo", + Done: true, + Result: &pb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: errMsg, + }, + }, + }, + } + err := op.Poll(context.Background(), nil) + if got := grpc.Code(err); got != errCode { + t.Errorf("error code, want %s, got %s", errCode, got) + } + if got := grpc.ErrorDesc(err); got != errMsg { + t.Errorf("error code, want %s, got %s", errMsg, got) + } + if !op.Done() { + t.Errorf("operation should have completed") + } +} + +type errService struct { + operationsClient + errCancel, errDelete error +} + +func (s *errService) CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error { + return s.errCancel +} + +func (s *errService) DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error { + return s.errDelete +} + +func TestCancelReturnsError(t *testing.T) { + s := &errService{ + errCancel: errors.New("cancel error"), + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if got, want := op.Cancel(context.Background()), s.errCancel; got != want { + t.Errorf("cancel, got error %s, want %s", got, want) + } +} + +func TestDeleteReturnsError(t *testing.T) { + s := &errService{ + errDelete: errors.New("delete error"), + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if got, want := op.Delete(context.Background()), s.errDelete; got != want { + t.Errorf("cancel, got error %s, want %s", got, want) + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/ListMonitoredResourceDescriptors_smoke_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/ListMonitoredResourceDescriptors_smoke_test.go new file mode 100644 index 0000000..e0cda4a --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/ListMonitoredResourceDescriptors_smoke_test.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestMetricServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewMetricClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var formattedName string = fmt.Sprintf("projects/%s", projectId) + var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + Name: formattedName, + } + + iter := c.ListMonitoredResourceDescriptors(ctx, request) + if _, err := iter.Next(); err != nil && err != iterator.Done { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go new file mode 100644 index 0000000..7cb10a5 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go @@ -0,0 +1,274 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. +type AlertPolicyCallOptions struct { + ListAlertPolicies []gax.CallOption + GetAlertPolicy []gax.CallOption + CreateAlertPolicy []gax.CallOption + DeleteAlertPolicy []gax.CallOption + UpdateAlertPolicy []gax.CallOption +} + +func defaultAlertPolicyClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &AlertPolicyCallOptions{ + ListAlertPolicies: retry[[2]string{"default", "idempotent"}], + GetAlertPolicy: retry[[2]string{"default", "idempotent"}], + CreateAlertPolicy: retry[[2]string{"default", "non_idempotent"}], + DeleteAlertPolicy: retry[[2]string{"default", "idempotent"}], + UpdateAlertPolicy: retry[[2]string{"default", "non_idempotent"}], + } +} + +// AlertPolicyClient is a client for interacting with Stackdriver Monitoring API. +type AlertPolicyClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + alertPolicyClient monitoringpb.AlertPolicyServiceClient + + // The call options for this service. + CallOptions *AlertPolicyCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewAlertPolicyClient creates a new alert policy service client. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Stackdriver Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be "unhealthy" and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the "Monitoring" tab in +// Cloud Console (at https://console.cloud.google.com/). +func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultAlertPolicyClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &AlertPolicyClient{ + conn: conn, + CallOptions: defaultAlertPolicyCallOptions(), + + alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *AlertPolicyClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AlertPolicyClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListAlertPolicies lists the existing alerting policies for the project. +func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...) + it := &AlertPolicyIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { + var resp *monitoringpb.ListAlertPoliciesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.AlertPolicies, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetAlertPolicy gets a single alerting policy. +func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateAlertPolicy creates a new alerting policy. +func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteAlertPolicy deletes an alerting policy. +func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with +// a new one or replace only certain fields in the current alerting policy by +// specifying the fields to be updated via updateMask. Returns the +// updated alerting policy. +func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. +type AlertPolicyIterator struct { + items []*monitoringpb.AlertPolicy + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { + var item *monitoringpb.AlertPolicy + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *AlertPolicyIterator) bufLen() int { + return len(it.items) +} + +func (it *AlertPolicyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client_example_test.go new file mode 100644 index 0000000..4ddc33a --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client_example_test.go @@ -0,0 +1,128 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewAlertPolicyClient() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleAlertPolicyClient_ListAlertPolicies() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListAlertPoliciesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListAlertPolicies(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleAlertPolicyClient_GetAlertPolicy() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetAlertPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetAlertPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleAlertPolicyClient_CreateAlertPolicy() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateAlertPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateAlertPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleAlertPolicyClient_DeleteAlertPolicy() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteAlertPolicyRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteAlertPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleAlertPolicyClient_UpdateAlertPolicy() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.UpdateAlertPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateAlertPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go new file mode 100644 index 0000000..dee265c --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package monitoring is an auto-generated package for the +// Stackdriver Monitoring API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Manages your Stackdriver Monitoring data and configurations. Most projects +// must be associated with a Stackdriver account, with a few exceptions as +// noted on the individual method pages. +package monitoring // import "cloud.google.com/go/monitoring/apiv3" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go new file mode 100644 index 0000000..01e6e3a --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go @@ -0,0 +1,355 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &GroupCallOptions{ + ListGroups: retry[[2]string{"default", "idempotent"}], + GetGroup: retry[[2]string{"default", "idempotent"}], + CreateGroup: retry[[2]string{"default", "non_idempotent"}], + UpdateGroup: retry[[2]string{"default", "idempotent"}], + DeleteGroup: retry[[2]string{"default", "idempotent"}], + ListGroupMembers: retry[[2]string{"default", "idempotent"}], + } +} + +// GroupClient is a client for interacting with Stackdriver Monitoring API. +type GroupClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The call options for this service. + CallOptions *GroupCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewGroupClient creates a new group service client. +// +// The Group API lets you inspect and manage your +// groups (at google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &GroupClient{ + conn: conn, + CallOptions: defaultGroupCallOptions(), + + groupClient: monitoringpb.NewGroupServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...) + it := &GroupIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + var resp *monitoringpb.ListGroupsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Group, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...) + it := &MonitoredResourceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + var resp *monitoringpb.ListGroupMembersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Members, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go new file mode 100644 index 0000000..eaf8eee --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go @@ -0,0 +1,152 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewGroupClient() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleGroupClient_ListGroups() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListGroupsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroups(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleGroupClient_GetGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_CreateGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_UpdateGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.UpdateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_DeleteGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteGroupRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleGroupClient_ListGroupMembers() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListGroupMembersRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroupMembers(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go new file mode 100644 index 0000000..318963e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go @@ -0,0 +1,444 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption +} + +func defaultMetricClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}], + ListMetricDescriptors: retry[[2]string{"default", "idempotent"}], + GetMetricDescriptor: retry[[2]string{"default", "idempotent"}], + CreateMetricDescriptor: retry[[2]string{"default", "non_idempotent"}], + DeleteMetricDescriptor: retry[[2]string{"default", "idempotent"}], + ListTimeSeries: retry[[2]string{"default", "idempotent"}], + CreateTimeSeries: retry[[2]string{"default", "non_idempotent"}], + } +} + +// MetricClient is a client for interacting with Stackdriver Monitoring API. +type MetricClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The call options for this service. + CallOptions *MetricCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewMetricClient creates a new metric service client. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricClient{ + conn: conn, + CallOptions: defaultMetricCallOptions(), + + metricClient: monitoringpb.NewMetricServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...) + it := &MetricDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + var resp *monitoringpb.ListMetricDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.MetricDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateMetricDescriptor creates a new metric descriptor. +// User-created metric descriptors define +// custom metrics (at /monitoring/custom-metrics). +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// custom metrics (at /monitoring/custom-metrics) can be deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...) + it := &TimeSeriesIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + var resp *monitoringpb.ListTimeSeriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TimeSeries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go new file mode 100644 index 0000000..29de109 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go @@ -0,0 +1,192 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewMetricClient() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleMetricClient_ListMonitoredResourceDescriptors() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMonitoredResourceDescriptors(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_GetMonitoredResourceDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetMonitoredResourceDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetMonitoredResourceDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_ListMetricDescriptors() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListMetricDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMetricDescriptors(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_GetMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_CreateMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_DeleteMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleMetricClient_ListTimeSeries() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListTimeSeriesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTimeSeries(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_CreateTimeSeries() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateTimeSeriesRequest{ + // TODO: Fill request struct fields. + } + err = c.CreateTimeSeries(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go new file mode 100644 index 0000000..be38e7f --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go @@ -0,0 +1,2636 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockAlertPolicyServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.AlertPolicyServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockAlertPolicyServer) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest) (*monitoringpb.ListAlertPoliciesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListAlertPoliciesResponse), nil +} + +func (s *mockAlertPolicyServer) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.AlertPolicy), nil +} + +func (s *mockAlertPolicyServer) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.AlertPolicy), nil +} + +func (s *mockAlertPolicyServer) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockAlertPolicyServer) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.AlertPolicy), nil +} + +type mockGroupServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.GroupServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockGroupServer) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest) (*monitoringpb.ListGroupsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListGroupsResponse), nil +} + +func (s *mockGroupServer) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest) (*monitoringpb.Group, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest) (*monitoringpb.Group, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest) (*monitoringpb.Group, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockGroupServer) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest) (*monitoringpb.ListGroupMembersResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListGroupMembersResponse), nil +} + +type mockMetricServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.MetricServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockMetricServer) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest) (*monitoringpb.ListMonitoredResourceDescriptorsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListMonitoredResourceDescriptorsResponse), nil +} + +func (s *mockMetricServer) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest) (*monitoredrespb.MonitoredResourceDescriptor, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoredrespb.MonitoredResourceDescriptor), nil +} + +func (s *mockMetricServer) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (*monitoringpb.ListMetricDescriptorsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListMetricDescriptorsResponse), nil +} + +func (s *mockMetricServer) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*metricpb.MetricDescriptor), nil +} + +func (s *mockMetricServer) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*metricpb.MetricDescriptor), nil +} + +func (s *mockMetricServer) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockMetricServer) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (*monitoringpb.ListTimeSeriesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListTimeSeriesResponse), nil +} + +func (s *mockMetricServer) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +type mockNotificationChannelServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.NotificationChannelServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockNotificationChannelServer) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest) (*monitoringpb.ListNotificationChannelDescriptorsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListNotificationChannelDescriptorsResponse), nil +} + +func (s *mockNotificationChannelServer) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest) (*monitoringpb.NotificationChannelDescriptor, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.NotificationChannelDescriptor), nil +} + +func (s *mockNotificationChannelServer) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest) (*monitoringpb.ListNotificationChannelsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListNotificationChannelsResponse), nil +} + +func (s *mockNotificationChannelServer) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.NotificationChannel), nil +} + +func (s *mockNotificationChannelServer) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.NotificationChannel), nil +} + +func (s *mockNotificationChannelServer) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.NotificationChannel), nil +} + +func (s *mockNotificationChannelServer) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +type mockUptimeCheckServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.UptimeCheckServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockUptimeCheckServer) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest) (*monitoringpb.ListUptimeCheckConfigsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListUptimeCheckConfigsResponse), nil +} + +func (s *mockUptimeCheckServer) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest) (*monitoringpb.UptimeCheckConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.UptimeCheckConfig), nil +} + +func (s *mockUptimeCheckServer) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest) (*monitoringpb.UptimeCheckConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.UptimeCheckConfig), nil +} + +func (s *mockUptimeCheckServer) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest) (*monitoringpb.UptimeCheckConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.UptimeCheckConfig), nil +} + +func (s *mockUptimeCheckServer) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockUptimeCheckServer) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest) (*monitoringpb.ListUptimeCheckIpsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListUptimeCheckIpsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockAlertPolicy mockAlertPolicyServer + mockGroup mockGroupServer + mockMetric mockMetricServer + mockNotificationChannel mockNotificationChannelServer + mockUptimeCheck mockUptimeCheckServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + monitoringpb.RegisterAlertPolicyServiceServer(serv, &mockAlertPolicy) + monitoringpb.RegisterGroupServiceServer(serv, &mockGroup) + monitoringpb.RegisterMetricServiceServer(serv, &mockMetric) + monitoringpb.RegisterNotificationChannelServiceServer(serv, &mockNotificationChannel) + monitoringpb.RegisterUptimeCheckServiceServer(serv, &mockUptimeCheck) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestAlertPolicyServiceListAlertPolicies(t *testing.T) { + var nextPageToken string = "" + var alertPoliciesElement *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var alertPolicies = []*monitoringpb.AlertPolicy{alertPoliciesElement} + var expectedResponse = &monitoringpb.ListAlertPoliciesResponse{ + NextPageToken: nextPageToken, + AlertPolicies: alertPolicies, + } + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListAlertPoliciesRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListAlertPolicies(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.AlertPolicies[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestAlertPolicyServiceListAlertPoliciesError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListAlertPoliciesRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListAlertPolicies(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestAlertPolicyServiceGetAlertPolicy(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.AlertPolicy{ + Name: name2, + DisplayName: displayName, + } + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]") + var request = &monitoringpb.GetAlertPolicyRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetAlertPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestAlertPolicyServiceGetAlertPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]") + var request = &monitoringpb.GetAlertPolicyRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetAlertPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestAlertPolicyServiceCreateAlertPolicy(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.AlertPolicy{ + Name: name2, + DisplayName: displayName, + } + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var request = &monitoringpb.CreateAlertPolicyRequest{ + Name: formattedName, + AlertPolicy: alertPolicy, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateAlertPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestAlertPolicyServiceCreateAlertPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var request = &monitoringpb.CreateAlertPolicyRequest{ + Name: formattedName, + AlertPolicy: alertPolicy, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateAlertPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestAlertPolicyServiceDeleteAlertPolicy(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]") + var request = &monitoringpb.DeleteAlertPolicyRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteAlertPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestAlertPolicyServiceDeleteAlertPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]") + var request = &monitoringpb.DeleteAlertPolicyRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteAlertPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestAlertPolicyServiceUpdateAlertPolicy(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.AlertPolicy{ + Name: name, + DisplayName: displayName, + } + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var request = &monitoringpb.UpdateAlertPolicyRequest{ + AlertPolicy: alertPolicy, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateAlertPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestAlertPolicyServiceUpdateAlertPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var request = &monitoringpb.UpdateAlertPolicyRequest{ + AlertPolicy: alertPolicy, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateAlertPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceListGroups(t *testing.T) { + var nextPageToken string = "" + var groupElement *monitoringpb.Group = &monitoringpb.Group{} + var group = []*monitoringpb.Group{groupElement} + var expectedResponse = &monitoringpb.ListGroupsResponse{ + NextPageToken: nextPageToken, + Group: group, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListGroupsRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroups(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Group[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceListGroupsError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListGroupsRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroups(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceGetGroup(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name2, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]") + var request = &monitoringpb.GetGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceGetGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]") + var request = &monitoringpb.GetGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceCreateGroup(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name2, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.CreateGroupRequest{ + Name: formattedName, + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceCreateGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.CreateGroupRequest{ + Name: formattedName, + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceUpdateGroup(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceUpdateGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceDeleteGroup(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]") + var request = &monitoringpb.DeleteGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestGroupServiceDeleteGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]") + var request = &monitoringpb.DeleteGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestGroupServiceListGroupMembers(t *testing.T) { + var nextPageToken string = "" + var totalSize int32 = 705419236 + var membersElement *monitoredrespb.MonitoredResource = &monitoredrespb.MonitoredResource{} + var members = []*monitoredrespb.MonitoredResource{membersElement} + var expectedResponse = &monitoringpb.ListGroupMembersResponse{ + NextPageToken: nextPageToken, + TotalSize: totalSize, + Members: members, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]") + var request = &monitoringpb.ListGroupMembersRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupMembers(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Members[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceListGroupMembersError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]") + var request = &monitoringpb.ListGroupMembersRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupMembers(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceListMonitoredResourceDescriptors(t *testing.T) { + var nextPageToken string = "" + var resourceDescriptorsElement *monitoredrespb.MonitoredResourceDescriptor = &monitoredrespb.MonitoredResourceDescriptor{} + var resourceDescriptors = []*monitoredrespb.MonitoredResourceDescriptor{resourceDescriptorsElement} + var expectedResponse = &monitoringpb.ListMonitoredResourceDescriptorsResponse{ + NextPageToken: nextPageToken, + ResourceDescriptors: resourceDescriptors, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ResourceDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListMonitoredResourceDescriptorsError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceGetMonitoredResourceDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoredrespb.MonitoredResourceDescriptor{ + Name: name2, + Type: type_, + DisplayName: displayName, + Description: description, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", "[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") + var request = &monitoringpb.GetMonitoredResourceDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMonitoredResourceDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceGetMonitoredResourceDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", "[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") + var request = &monitoringpb.GetMonitoredResourceDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMonitoredResourceDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceListMetricDescriptors(t *testing.T) { + var nextPageToken string = "" + var metricDescriptorsElement *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var metricDescriptors = []*metricpb.MetricDescriptor{metricDescriptorsElement} + var expectedResponse = &monitoringpb.ListMetricDescriptorsResponse{ + NextPageToken: nextPageToken, + MetricDescriptors: metricDescriptors, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListMetricDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMetricDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.MetricDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListMetricDescriptorsError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListMetricDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMetricDescriptors(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceGetMetricDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var unit string = "unit3594628" + var description string = "description-1724546052" + var displayName string = "displayName1615086568" + var expectedResponse = &metricpb.MetricDescriptor{ + Name: name2, + Type: type_, + Unit: unit, + Description: description, + DisplayName: displayName, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/metricDescriptors/%s", "[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.GetMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceGetMetricDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/metricDescriptors/%s", "[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.GetMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMetricDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceCreateMetricDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var unit string = "unit3594628" + var description string = "description-1724546052" + var displayName string = "displayName1615086568" + var expectedResponse = &metricpb.MetricDescriptor{ + Name: name2, + Type: type_, + Unit: unit, + Description: description, + DisplayName: displayName, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var metricDescriptor *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var request = &monitoringpb.CreateMetricDescriptorRequest{ + Name: formattedName, + MetricDescriptor: metricDescriptor, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceCreateMetricDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var metricDescriptor *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var request = &monitoringpb.CreateMetricDescriptorRequest{ + Name: formattedName, + MetricDescriptor: metricDescriptor, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateMetricDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceDeleteMetricDescriptor(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/metricDescriptors/%s", "[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.DeleteMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricServiceDeleteMetricDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/metricDescriptors/%s", "[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.DeleteMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteMetricDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestMetricServiceListTimeSeries(t *testing.T) { + var nextPageToken string = "" + var timeSeriesElement *monitoringpb.TimeSeries = &monitoringpb.TimeSeries{} + var timeSeries = []*monitoringpb.TimeSeries{timeSeriesElement} + var expectedResponse = &monitoringpb.ListTimeSeriesResponse{ + NextPageToken: nextPageToken, + TimeSeries: timeSeries, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var filter string = "filter-1274492040" + var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} + var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL + var request = &monitoringpb.ListTimeSeriesRequest{ + Name: formattedName, + Filter: filter, + Interval: interval, + View: view, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTimeSeries(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TimeSeries[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListTimeSeriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var filter string = "filter-1274492040" + var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} + var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL + var request = &monitoringpb.ListTimeSeriesRequest{ + Name: formattedName, + Filter: filter, + Interval: interval, + View: view, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTimeSeries(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceCreateTimeSeries(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var timeSeries []*monitoringpb.TimeSeries = nil + var request = &monitoringpb.CreateTimeSeriesRequest{ + Name: formattedName, + TimeSeries: timeSeries, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CreateTimeSeries(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricServiceCreateTimeSeriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var timeSeries []*monitoringpb.TimeSeries = nil + var request = &monitoringpb.CreateTimeSeriesRequest{ + Name: formattedName, + TimeSeries: timeSeries, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CreateTimeSeries(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestNotificationChannelServiceListNotificationChannelDescriptors(t *testing.T) { + var nextPageToken string = "" + var channelDescriptorsElement *monitoringpb.NotificationChannelDescriptor = &monitoringpb.NotificationChannelDescriptor{} + var channelDescriptors = []*monitoringpb.NotificationChannelDescriptor{channelDescriptorsElement} + var expectedResponse = &monitoringpb.ListNotificationChannelDescriptorsResponse{ + NextPageToken: nextPageToken, + ChannelDescriptors: channelDescriptors, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListNotificationChannelDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNotificationChannelDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ChannelDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceListNotificationChannelDescriptorsError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListNotificationChannelDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNotificationChannelDescriptors(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceGetNotificationChannelDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoringpb.NotificationChannelDescriptor{ + Name: name2, + Type: type_, + DisplayName: displayName, + Description: description, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannelDescriptors/%s", "[PROJECT]", "[CHANNEL_DESCRIPTOR]") + var request = &monitoringpb.GetNotificationChannelDescriptorRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNotificationChannelDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceGetNotificationChannelDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannelDescriptors/%s", "[PROJECT]", "[CHANNEL_DESCRIPTOR]") + var request = &monitoringpb.GetNotificationChannelDescriptorRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNotificationChannelDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceListNotificationChannels(t *testing.T) { + var nextPageToken string = "" + var notificationChannelsElement *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var notificationChannels = []*monitoringpb.NotificationChannel{notificationChannelsElement} + var expectedResponse = &monitoringpb.ListNotificationChannelsResponse{ + NextPageToken: nextPageToken, + NotificationChannels: notificationChannels, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListNotificationChannelsRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNotificationChannels(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.NotificationChannels[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceListNotificationChannelsError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListNotificationChannelsRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNotificationChannels(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceGetNotificationChannel(t *testing.T) { + var type_ string = "type3575610" + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoringpb.NotificationChannel{ + Type: type_, + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]") + var request = &monitoringpb.GetNotificationChannelRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNotificationChannel(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceGetNotificationChannelError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]") + var request = &monitoringpb.GetNotificationChannelRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNotificationChannel(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceCreateNotificationChannel(t *testing.T) { + var type_ string = "type3575610" + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoringpb.NotificationChannel{ + Type: type_, + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var request = &monitoringpb.CreateNotificationChannelRequest{ + Name: formattedName, + NotificationChannel: notificationChannel, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateNotificationChannel(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceCreateNotificationChannelError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var request = &monitoringpb.CreateNotificationChannelRequest{ + Name: formattedName, + NotificationChannel: notificationChannel, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateNotificationChannel(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceUpdateNotificationChannel(t *testing.T) { + var type_ string = "type3575610" + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoringpb.NotificationChannel{ + Type: type_, + Name: name, + DisplayName: displayName, + Description: description, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var request = &monitoringpb.UpdateNotificationChannelRequest{ + NotificationChannel: notificationChannel, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateNotificationChannel(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceUpdateNotificationChannelError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var request = &monitoringpb.UpdateNotificationChannelRequest{ + NotificationChannel: notificationChannel, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateNotificationChannel(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceDeleteNotificationChannel(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]") + var request = &monitoringpb.DeleteNotificationChannelRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteNotificationChannel(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestNotificationChannelServiceDeleteNotificationChannelError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]") + var request = &monitoringpb.DeleteNotificationChannelRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteNotificationChannel(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestUptimeCheckServiceListUptimeCheckConfigs(t *testing.T) { + var nextPageToken string = "" + var uptimeCheckConfigsElement *monitoringpb.UptimeCheckConfig = &monitoringpb.UptimeCheckConfig{} + var uptimeCheckConfigs = []*monitoringpb.UptimeCheckConfig{uptimeCheckConfigsElement} + var expectedResponse = &monitoringpb.ListUptimeCheckConfigsResponse{ + NextPageToken: nextPageToken, + UptimeCheckConfigs: uptimeCheckConfigs, + } + + mockUptimeCheck.err = nil + mockUptimeCheck.reqs = nil + + mockUptimeCheck.resps = append(mockUptimeCheck.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListUptimeCheckConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListUptimeCheckConfigs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockUptimeCheck.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.UptimeCheckConfigs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestUptimeCheckServiceListUptimeCheckConfigsError(t *testing.T) { + errCode := codes.PermissionDenied + mockUptimeCheck.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListUptimeCheckConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListUptimeCheckConfigs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestUptimeCheckServiceGetUptimeCheckConfig(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.UptimeCheckConfig{ + Name: name2, + DisplayName: displayName, + } + + mockUptimeCheck.err = nil + mockUptimeCheck.reqs = nil + + mockUptimeCheck.resps = append(mockUptimeCheck.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", "[PROJECT]", "[UPTIME_CHECK_CONFIG]") + var request = &monitoringpb.GetUptimeCheckConfigRequest{ + Name: formattedName, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetUptimeCheckConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockUptimeCheck.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestUptimeCheckServiceGetUptimeCheckConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockUptimeCheck.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", "[PROJECT]", "[UPTIME_CHECK_CONFIG]") + var request = &monitoringpb.GetUptimeCheckConfigRequest{ + Name: formattedName, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetUptimeCheckConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestUptimeCheckServiceCreateUptimeCheckConfig(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.UptimeCheckConfig{ + Name: name, + DisplayName: displayName, + } + + mockUptimeCheck.err = nil + mockUptimeCheck.reqs = nil + + mockUptimeCheck.resps = append(mockUptimeCheck.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var uptimeCheckConfig *monitoringpb.UptimeCheckConfig = &monitoringpb.UptimeCheckConfig{} + var request = &monitoringpb.CreateUptimeCheckConfigRequest{ + Parent: formattedParent, + UptimeCheckConfig: uptimeCheckConfig, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateUptimeCheckConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockUptimeCheck.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestUptimeCheckServiceCreateUptimeCheckConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockUptimeCheck.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var uptimeCheckConfig *monitoringpb.UptimeCheckConfig = &monitoringpb.UptimeCheckConfig{} + var request = &monitoringpb.CreateUptimeCheckConfigRequest{ + Parent: formattedParent, + UptimeCheckConfig: uptimeCheckConfig, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateUptimeCheckConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestUptimeCheckServiceUpdateUptimeCheckConfig(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.UptimeCheckConfig{ + Name: name, + DisplayName: displayName, + } + + mockUptimeCheck.err = nil + mockUptimeCheck.reqs = nil + + mockUptimeCheck.resps = append(mockUptimeCheck.resps[:0], expectedResponse) + + var uptimeCheckConfig *monitoringpb.UptimeCheckConfig = &monitoringpb.UptimeCheckConfig{} + var request = &monitoringpb.UpdateUptimeCheckConfigRequest{ + UptimeCheckConfig: uptimeCheckConfig, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateUptimeCheckConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockUptimeCheck.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestUptimeCheckServiceUpdateUptimeCheckConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockUptimeCheck.err = gstatus.Error(errCode, "test error") + + var uptimeCheckConfig *monitoringpb.UptimeCheckConfig = &monitoringpb.UptimeCheckConfig{} + var request = &monitoringpb.UpdateUptimeCheckConfigRequest{ + UptimeCheckConfig: uptimeCheckConfig, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateUptimeCheckConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestUptimeCheckServiceDeleteUptimeCheckConfig(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockUptimeCheck.err = nil + mockUptimeCheck.reqs = nil + + mockUptimeCheck.resps = append(mockUptimeCheck.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", "[PROJECT]", "[UPTIME_CHECK_CONFIG]") + var request = &monitoringpb.DeleteUptimeCheckConfigRequest{ + Name: formattedName, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteUptimeCheckConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockUptimeCheck.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestUptimeCheckServiceDeleteUptimeCheckConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockUptimeCheck.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", "[PROJECT]", "[UPTIME_CHECK_CONFIG]") + var request = &monitoringpb.DeleteUptimeCheckConfigRequest{ + Name: formattedName, + } + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteUptimeCheckConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestUptimeCheckServiceListUptimeCheckIps(t *testing.T) { + var nextPageToken string = "" + var uptimeCheckIpsElement *monitoringpb.UptimeCheckIp = &monitoringpb.UptimeCheckIp{} + var uptimeCheckIps = []*monitoringpb.UptimeCheckIp{uptimeCheckIpsElement} + var expectedResponse = &monitoringpb.ListUptimeCheckIpsResponse{ + NextPageToken: nextPageToken, + UptimeCheckIps: uptimeCheckIps, + } + + mockUptimeCheck.err = nil + mockUptimeCheck.reqs = nil + + mockUptimeCheck.resps = append(mockUptimeCheck.resps[:0], expectedResponse) + + var request *monitoringpb.ListUptimeCheckIpsRequest = &monitoringpb.ListUptimeCheckIpsRequest{} + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListUptimeCheckIps(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockUptimeCheck.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.UptimeCheckIps[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestUptimeCheckServiceListUptimeCheckIpsError(t *testing.T) { + errCode := codes.PermissionDenied + mockUptimeCheck.err = gstatus.Error(errCode, "test error") + + var request *monitoringpb.ListUptimeCheckIpsRequest = &monitoringpb.ListUptimeCheckIpsRequest{} + + c, err := NewUptimeCheckClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListUptimeCheckIps(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go new file mode 100644 index 0000000..54fdbc6 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go @@ -0,0 +1,369 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. +type NotificationChannelCallOptions struct { + ListNotificationChannelDescriptors []gax.CallOption + GetNotificationChannelDescriptor []gax.CallOption + ListNotificationChannels []gax.CallOption + GetNotificationChannel []gax.CallOption + CreateNotificationChannel []gax.CallOption + UpdateNotificationChannel []gax.CallOption + DeleteNotificationChannel []gax.CallOption +} + +func defaultNotificationChannelClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &NotificationChannelCallOptions{ + ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}], + GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}], + ListNotificationChannels: retry[[2]string{"default", "idempotent"}], + GetNotificationChannel: retry[[2]string{"default", "idempotent"}], + CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}], + UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}], + DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}], + } +} + +// NotificationChannelClient is a client for interacting with Stackdriver Monitoring API. +type NotificationChannelClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + notificationChannelClient monitoringpb.NotificationChannelServiceClient + + // The call options for this service. + CallOptions *NotificationChannelCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewNotificationChannelClient creates a new notification channel service client. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultNotificationChannelClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &NotificationChannelClient{ + conn: conn, + CallOptions: defaultNotificationChannelCallOptions(), + + notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *NotificationChannelClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *NotificationChannelClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors +// makes it possible for new channel types to be dynamically added. +func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...) + it := &NotificationChannelDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { + var resp *monitoringpb.ListNotificationChannelDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ChannelDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields +// are expected / permitted for a notification channel of the given type. +func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...) + var resp *monitoringpb.NotificationChannelDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListNotificationChannels lists the notification channels that have been created for the project. +func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...) + it := &NotificationChannelIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { + var resp *monitoringpb.ListNotificationChannelsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.NotificationChannels, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetNotificationChannel gets a single notification channel. The channel includes the relevant +// configuration details with which the channel was created. However, the +// response may truncate or omit passwords, API keys, or other private key +// matter and thus the response may not be 100% identical to the information +// that was supplied in the call to the create method. +func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateNotificationChannel creates a new notification channel, representing a single notification +// endpoint such as an email address, SMS number, or pagerduty service. +func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask +// remain unchanged. +func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteNotificationChannel deletes a notification channel. +func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. +type NotificationChannelDescriptorIterator struct { + items []*monitoringpb.NotificationChannelDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { + var item *monitoringpb.NotificationChannelDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. +type NotificationChannelIterator struct { + items []*monitoringpb.NotificationChannel + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { + var item *monitoringpb.NotificationChannel + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client_example_test.go new file mode 100644 index 0000000..eab1179 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client_example_test.go @@ -0,0 +1,170 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewNotificationChannelClient() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleNotificationChannelClient_ListNotificationChannelDescriptors() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListNotificationChannelDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListNotificationChannelDescriptors(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleNotificationChannelClient_GetNotificationChannelDescriptor() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetNotificationChannelDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetNotificationChannelDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleNotificationChannelClient_ListNotificationChannels() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListNotificationChannelsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListNotificationChannels(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleNotificationChannelClient_GetNotificationChannel() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetNotificationChannelRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetNotificationChannel(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleNotificationChannelClient_CreateNotificationChannel() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateNotificationChannelRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateNotificationChannel(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleNotificationChannelClient_UpdateNotificationChannel() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.UpdateNotificationChannelRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateNotificationChannel(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleNotificationChannelClient_DeleteNotificationChannel() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteNotificationChannelRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteNotificationChannel(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go new file mode 100644 index 0000000..b2b514b --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go @@ -0,0 +1,107 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +// GroupProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func GroupProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// GroupGroupPath returns the path for the group resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/groups/%s", project, group) +// instead. +func GroupGroupPath(project, group string) string { + return "" + + "projects/" + + project + + "/groups/" + + group + + "" +} + +// MetricProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func MetricProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// MetricMetricDescriptorPath returns the path for the metric descriptor resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/metricDescriptors/%s", project, metricDescriptor) +// instead. +func MetricMetricDescriptorPath(project, metricDescriptor string) string { + return "" + + "projects/" + + project + + "/metricDescriptors/" + + metricDescriptor + + "" +} + +// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", project, monitoredResourceDescriptor) +// instead. +func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { + return "" + + "projects/" + + project + + "/monitoredResourceDescriptors/" + + monitoredResourceDescriptor + + "" +} + +// UptimeCheckProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func UptimeCheckProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// UptimeCheckUptimeCheckConfigPath returns the path for the uptime check config resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", project, uptimeCheckConfig) +// instead. +func UptimeCheckUptimeCheckConfigPath(project, uptimeCheckConfig string) string { + return "" + + "projects/" + + project + + "/uptimeCheckConfigs/" + + uptimeCheckConfig + + "" +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go new file mode 100644 index 0000000..96a1f12 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go @@ -0,0 +1,355 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient. +type UptimeCheckCallOptions struct { + ListUptimeCheckConfigs []gax.CallOption + GetUptimeCheckConfig []gax.CallOption + CreateUptimeCheckConfig []gax.CallOption + UpdateUptimeCheckConfig []gax.CallOption + DeleteUptimeCheckConfig []gax.CallOption + ListUptimeCheckIps []gax.CallOption +} + +func defaultUptimeCheckClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &UptimeCheckCallOptions{ + ListUptimeCheckConfigs: retry[[2]string{"default", "idempotent"}], + GetUptimeCheckConfig: retry[[2]string{"default", "idempotent"}], + CreateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}], + UpdateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}], + DeleteUptimeCheckConfig: retry[[2]string{"default", "idempotent"}], + ListUptimeCheckIps: retry[[2]string{"default", "idempotent"}], + } +} + +// UptimeCheckClient is a client for interacting with Stackdriver Monitoring API. +type UptimeCheckClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + uptimeCheckClient monitoringpb.UptimeCheckServiceClient + + // The call options for this service. + CallOptions *UptimeCheckCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewUptimeCheckClient creates a new uptime check service client. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// uptime check configurations in the Stackdriver Monitoring product. An uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud Console] +// (http://console.cloud.google.com), selecting the appropriate project, +// clicking on "Monitoring" on the left-hand side to navigate to Stackdriver, +// and then clicking on "Uptime". +func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultUptimeCheckClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &UptimeCheckClient{ + conn: conn, + CallOptions: defaultUptimeCheckCallOptions(), + + uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *UptimeCheckClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *UptimeCheckClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListUptimeCheckConfigs lists the existing valid uptime check configurations for the project, +// leaving out any invalid configurations. +func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListUptimeCheckConfigs[0:len(c.CallOptions.ListUptimeCheckConfigs):len(c.CallOptions.ListUptimeCheckConfigs)], opts...) + it := &UptimeCheckConfigIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) { + var resp *monitoringpb.ListUptimeCheckConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.UptimeCheckConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetUptimeCheckConfig gets a single uptime check configuration. +func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetUptimeCheckConfig[0:len(c.CallOptions.GetUptimeCheckConfig):len(c.CallOptions.GetUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateUptimeCheckConfig creates a new uptime check configuration. +func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateUptimeCheckConfig[0:len(c.CallOptions.CreateUptimeCheckConfig):len(c.CallOptions.CreateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateUptimeCheckConfig updates an uptime check configuration. You can either replace the entire +// configuration with a new one or replace only certain fields in the current +// configuration by specifying the fields to be updated via "updateMask". +// Returns the updated configuration. +func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateUptimeCheckConfig[0:len(c.CallOptions.UpdateUptimeCheckConfig):len(c.CallOptions.UpdateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteUptimeCheckConfig deletes an uptime check configuration. Note that this method will fail +// if the uptime check configuration is referenced by an alert policy or +// other dependent configs that would be rendered invalid by the deletion. +func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteUptimeCheckConfig[0:len(c.CallOptions.DeleteUptimeCheckConfig):len(c.CallOptions.DeleteUptimeCheckConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListUptimeCheckIps returns the list of IPs that checkers run from +func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListUptimeCheckIps[0:len(c.CallOptions.ListUptimeCheckIps):len(c.CallOptions.ListUptimeCheckIps)], opts...) + it := &UptimeCheckIpIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) { + var resp *monitoringpb.ListUptimeCheckIpsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.UptimeCheckIps, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig. +type UptimeCheckConfigIterator struct { + items []*monitoringpb.UptimeCheckConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) { + var item *monitoringpb.UptimeCheckConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp. +type UptimeCheckIpIterator struct { + items []*monitoringpb.UptimeCheckIp + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) { + var item *monitoringpb.UptimeCheckIp + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckIpIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckIpIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client_example_test.go new file mode 100644 index 0000000..49ee0d0 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client_example_test.go @@ -0,0 +1,152 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewUptimeCheckClient() { + ctx := context.Background() + c, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleUptimeCheckClient_ListUptimeCheckConfigs() { + ctx := context.Background() + c, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListUptimeCheckConfigsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListUptimeCheckConfigs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleUptimeCheckClient_GetUptimeCheckConfig() { + ctx := context.Background() + c, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetUptimeCheckConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetUptimeCheckConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleUptimeCheckClient_CreateUptimeCheckConfig() { + ctx := context.Background() + c, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateUptimeCheckConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateUptimeCheckConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleUptimeCheckClient_UpdateUptimeCheckConfig() { + ctx := context.Background() + c, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.UpdateUptimeCheckConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateUptimeCheckConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleUptimeCheckClient_DeleteUptimeCheckConfig() { + ctx := context.Background() + c, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteUptimeCheckConfigRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteUptimeCheckConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleUptimeCheckClient_ListUptimeCheckIps() { + ctx := context.Background() + c, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListUptimeCheckIpsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListUptimeCheckIps(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/old-news.md b/vendor/cloud.google.com/go/old-news.md new file mode 100644 index 0000000..a0bd83b --- /dev/null +++ b/vendor/cloud.google.com/go/old-news.md @@ -0,0 +1,596 @@ +_October 30, 2017_ + +*v0.16.0* + +- Other bigquery changes: + - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). + - UseStandardSQL is deprecated; set UseLegacySQL to true if you need + Legacy SQL. + - Uploader.Put will generate a random insert ID if you do not provide one. + - Support time partitioning for load jobs. + - Support dry-run queries. + - A `Job` remembers its last retrieved status. + - Support retrieving job configuration. + - Support labels for jobs and tables. + - Support dataset access lists. + - Improve support for external data sources, including data from Bigtable and + Google Sheets, and tables with external data. + - Support updating a table's view configuration. + - Fix uploading civil times with nanoseconds. + +- storage: + - Support PubSub notifications. + - Support Requester Pays buckets. + +- profiler: Support goroutine and mutex profile types. + + +_October 3, 2017_ + +*v0.15.0* + +- firestore: beta release. See the + [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). + +- errorreporting: The existing package has been redesigned. + +- errors: This package has been removed. Use errorreporting. + + +_September 28, 2017_ + +*v0.14.0* + +- bigquery BREAKING CHANGES: + - Standard SQL is the default for queries and views. + - `Table.Create` takes `TableMetadata` as a second argument, instead of + options. + - `Dataset.Create` takes `DatasetMetadata` as a second argument. + - `DatasetMetadata` field `ID` renamed to `FullID` + - `TableMetadata` field `ID` renamed to `FullID` + +- Other bigquery changes: + - The client will append a random suffix to a provided job ID if you set + `AddJobIDSuffix` to true in a job config. + - Listing jobs is supported. + - Better retry logic. + +- vision, language, speech: clients are now stable + +- monitoring: client is now beta + +- profiler: + - Rename InstanceName to Instance, ZoneName to Zone + - Auto-detect service name and version on AppEngine. + +_September 8, 2017_ + +*v0.13.0* + +- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these + options to continue using Legacy SQL after the client switches its default + to Standard SQL. + +- bigquery: Support for updating dataset labels. + +- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other + than the client's. DatasetsInProject is no longer needed and is deprecated. + +- bigtable: Fail ListInstances when any zones fail. + +- spanner: support decoding of slices of basic types (e.g. []string, []int64, + etc.) + +- logging/logadmin: UpdateSink no longer creates a sink if it is missing + (actually a change to the underlying service, not the client) + +- profiler: Service and ServiceVersion replace Target in Config. + +_August 22, 2017_ + +*v0.12.0* + +- pubsub: Subscription.Receive now uses streaming pull. + +- pubsub: add Client.TopicInProject to access topics in a different project + than the client. + +- errors: renamed errorreporting. The errors package will be removed shortly. + +- datastore: improved retry behavior. + +- bigquery: support updates to dataset metadata, with etags. + +- bigquery: add etag support to Table.Update (BREAKING: etag argument added). + +- bigquery: generate all job IDs on the client. + +- storage: support bucket lifecycle configurations. + + +_July 31, 2017_ + +*v0.11.0* + +- Clients for spanner, pubsub and video are now in beta. + +- New client for DLP. + +- spanner: performance and testing improvements. + +- storage: requester-pays buckets are supported. + +- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. + +- pubsub: bug fixes and other minor improvements + +_June 17, 2017_ + + +*v0.10.0* + +- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. + +- pubsub: Subscription.Receive now runs concurrently for higher throughput. + +- vision: cloud.google.com/go/vision is deprecated. Use +cloud.google.com/go/vision/apiv1 instead. + +- translation: now stable. + +- trace: several changes to the surface. See the link below. + +[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md) + + +_March 17, 2017_ + +Breaking Pubsub changes. +* Publish is now asynchronous +([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). +* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). +* Message.Done replaced with Message.Ack and Message.Nack. + +_February 14, 2017_ + +Release of a client library for Spanner. See +the +[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). + +Note that although the Spanner service is beta, the Go client library is alpha. + +_December 12, 2016_ + +Beta release of BigQuery, DataStore, Logging and Storage. See the +[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). + +Also, BigQuery now supports structs. Read a row directly into a struct with +`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. +You can also use field tags. See the [package documentation][cloud-bigquery-ref] +for details. + +_December 5, 2016_ + +More changes to BigQuery: + +* The `ValueList` type was removed. It is no longer necessary. Instead of + ```go + var v ValueList + ... it.Next(&v) .. + ``` + use + + ```go + var v []Value + ... it.Next(&v) ... + ``` + +* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or + `ValueList` would append to the slice. Now each call resets the size to zero first. + +* Schema inference will infer the SQL type BYTES for a struct field of + type []byte. Previously it inferred STRING. + +* The types `uint`, `uint64` and `uintptr` are no longer supported in schema + inference. BigQuery's integer type is INT64, and those types may hold values + that are not correctly represented in a 64-bit signed integer. + +* The SQL types DATE, TIME and DATETIME are now supported. They correspond to + the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` + package. + +_November 17, 2016_ + +Change to BigQuery: values from INTEGER columns will now be returned as int64, +not int. This will avoid errors arising from large values on 32-bit systems. + +_November 8, 2016_ + +New datastore feature: datastore now encodes your nested Go structs as Entity values, +instead of a flattened list of the embedded struct's fields. +This means that you may now have twice-nested slices, eg. +```go +type State struct { + Cities []struct{ + Populations []int + } +} +``` + +See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for +more details. + +_November 8, 2016_ + +Breaking changes to datastore: contexts no longer hold namespaces; instead you +must set a key's namespace explicitly. Also, key functions have been changed +and renamed. + +* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: + ```go + q := datastore.NewQuery("Kind").Namespace("ns") + ``` + +* All the fields of Key are exported. That means you can construct any Key with a struct literal: + ```go + k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} + ``` + +* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. + +* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace + ```go + NewIncompleteKey(ctx, kind, parent) + ``` + with + ```go + IncompleteKey(kind, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + +* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace + ```go + NewKey(ctx, kind, name, 0, parent) + NewKey(ctx, kind, "", id, parent) + ``` + with + ```go + NameKey(kind, name, parent) + IDKey(kind, id, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + +* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. + +* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. + +See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for +more details. + +_October 27, 2016_ + +Breaking change to bigquery: `NewGCSReference` is now a function, +not a method on `Client`. + +New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling +loading data into a table from a file or any `io.Reader`. + +_October 21, 2016_ + +Breaking change to pubsub: removed `pubsub.Done`. + +Use `iterator.Done` instead, where `iterator` is the package +`google.golang.org/api/iterator`. + +_October 19, 2016_ + +Breaking changes to cloud.google.com/go/bigquery: + +* Client.Table and Client.OpenTable have been removed. + Replace + ```go + client.OpenTable("project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table") + ``` + +* Client.CreateTable has been removed. + Replace + ```go + client.CreateTable(ctx, "project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table").Create(ctx) + ``` + +* Dataset.ListTables have been replaced with Dataset.Tables. + Replace + ```go + tables, err := ds.ListTables(ctx) + ``` + with + ```go + it := ds.Tables(ctx) + for { + table, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use table. + } + ``` + +* Client.Read has been replaced with Job.Read, Table.Read and Query.Read. + Replace + ```go + it, err := client.Read(ctx, job) + ``` + with + ```go + it, err := job.Read(ctx) + ``` + and similarly for reading from tables or queries. + +* The iterator returned from the Read methods is now named RowIterator. Its + behavior is closer to the other iterators in these libraries. It no longer + supports the Schema method; see the next item. + Replace + ```go + for it.Next(ctx) { + var vals ValueList + if err := it.Get(&vals); err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + if err := it.Err(); err != nil { + // TODO: Handle error. + } + ``` + with + ``` + for { + var vals ValueList + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + ``` + Instead of the `RecordsPerRequest(n)` option, write + ```go + it.PageInfo().MaxSize = n + ``` + Instead of the `StartIndex(i)` option, write + ```go + it.StartIndex = i + ``` + +* ValueLoader.Load now takes a Schema in addition to a slice of Values. + Replace + ```go + func (vl *myValueLoader) Load(v []bigquery.Value) + ``` + with + ```go + func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) + ``` + + +* Table.Patch is replace by Table.Update. + Replace + ```go + p := table.Patch() + p.Description("new description") + metadata, err := p.Apply(ctx) + ``` + with + ```go + metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "new description", + }) + ``` + +* Client.Copy is replaced by separate methods for each of its four functions. + All options have been replaced by struct fields. + + * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. + + Replace + ```go + client.Copy(ctx, table, gcsRef) + ``` + with + ```go + table.LoaderFrom(gcsRef).Run(ctx) + ``` + Instead of passing options to Copy, set fields on the Loader: + ```go + loader := table.LoaderFrom(gcsRef) + loader.WriteDisposition = bigquery.WriteTruncate + ``` + + * To extract data from a table into Google Cloud Storage, use + Table.ExtractorTo. Set fields on the returned Extractor instead of + passing options. + + Replace + ```go + client.Copy(ctx, gcsRef, table) + ``` + with + ```go + table.ExtractorTo(gcsRef).Run(ctx) + ``` + + * To copy data into a table from one or more other tables, use + Table.CopierFrom. Set fields on the returned Copier instead of passing options. + + Replace + ```go + client.Copy(ctx, dstTable, srcTable) + ``` + with + ```go + dst.Table.CopierFrom(srcTable).Run(ctx) + ``` + + * To start a query job, create a Query and call its Run method. Set fields + on the query instead of passing options. + + Replace + ```go + client.Copy(ctx, table, query) + ``` + with + ```go + query.Run(ctx) + ``` + +* Table.NewUploader has been renamed to Table.Uploader. Instead of options, + configure an Uploader by setting its fields. + Replace + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + ``` + with + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + u.IgnoreUnknownValues = true + ``` + +_October 10, 2016_ + +Breaking changes to cloud.google.com/go/storage: + +* AdminClient replaced by methods on Client. + Replace + ```go + adminClient.CreateBucket(ctx, bucketName, attrs) + ``` + with + ```go + client.Bucket(bucketName).Create(ctx, projectID, attrs) + ``` + +* BucketHandle.List replaced by BucketHandle.Objects. + Replace + ```go + for query != nil { + objs, err := bucket.List(d.ctx, query) + if err != nil { ... } + query = objs.Next + for _, obj := range objs.Results { + fmt.Println(obj) + } + } + ``` + with + ```go + iter := bucket.Objects(d.ctx, query) + for { + obj, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { ... } + fmt.Println(obj) + } + ``` + (The `iterator` package is at `google.golang.org/api/iterator`.) + + Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. + + Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. + + +* ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, nil) + ``` + with + ```go + attrs, err := dst.CopierFrom(src).Run(ctx) + ``` + + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + c := dst.CopierFrom(src) + c.ContextType = "text/html" + attrs, err := c.Run(ctx) + ``` + +* ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. + Replace + ```go + attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) + ``` + with + ```go + attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) + ``` + +* ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. + Replace + ```go + attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) + ``` + +* ObjectHandle.WithConditions replaced by ObjectHandle.If. + Replace + ```go + obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) + ``` + with + ```go + obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) + ``` + + Replace + ```go + obj.WithConditions(storage.IfGenerationMatch(0)) + ``` + with + ```go + obj.If(storage.Conditions{DoesNotExist: true}) + ``` + +* `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). + +_October 6, 2016_ + +Package preview/logging deleted. Use logging instead. + +_September 27, 2016_ + +Logging client replaced with preview version (see below). + +_September 8, 2016_ + +* New clients for some of Google's Machine Learning APIs: Vision, Speech, and +Natural Language. + +* Preview version of a new [Stackdriver Logging][cloud-logging] client in +[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). +This client uses gRPC as its transport layer, and supports log reading, sinks +and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. + diff --git a/vendor/cloud.google.com/go/oslogin/apiv1beta/doc.go b/vendor/cloud.google.com/go/oslogin/apiv1beta/doc.go new file mode 100644 index 0000000..7d98972 --- /dev/null +++ b/vendor/cloud.google.com/go/oslogin/apiv1beta/doc.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package oslogin is an auto-generated package for the +// Google Cloud OS Login API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Manages OS login configuration for Google account users. +package oslogin // import "cloud.google.com/go/oslogin/apiv1beta" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly", + } +} diff --git a/vendor/cloud.google.com/go/oslogin/apiv1beta/mock_test.go b/vendor/cloud.google.com/go/oslogin/apiv1beta/mock_test.go new file mode 100644 index 0000000..4239356 --- /dev/null +++ b/vendor/cloud.google.com/go/oslogin/apiv1beta/mock_test.go @@ -0,0 +1,520 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package oslogin + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + commonpb "google.golang.org/genproto/googleapis/cloud/oslogin/common" + osloginpb "google.golang.org/genproto/googleapis/cloud/oslogin/v1beta" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockOsLoginServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + osloginpb.OsLoginServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockOsLoginServer) DeletePosixAccount(ctx context.Context, req *osloginpb.DeletePosixAccountRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockOsLoginServer) DeleteSshPublicKey(ctx context.Context, req *osloginpb.DeleteSshPublicKeyRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockOsLoginServer) GetLoginProfile(ctx context.Context, req *osloginpb.GetLoginProfileRequest) (*osloginpb.LoginProfile, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*osloginpb.LoginProfile), nil +} + +func (s *mockOsLoginServer) GetSshPublicKey(ctx context.Context, req *osloginpb.GetSshPublicKeyRequest) (*commonpb.SshPublicKey, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*commonpb.SshPublicKey), nil +} + +func (s *mockOsLoginServer) ImportSshPublicKey(ctx context.Context, req *osloginpb.ImportSshPublicKeyRequest) (*osloginpb.ImportSshPublicKeyResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*osloginpb.ImportSshPublicKeyResponse), nil +} + +func (s *mockOsLoginServer) UpdateSshPublicKey(ctx context.Context, req *osloginpb.UpdateSshPublicKeyRequest) (*commonpb.SshPublicKey, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*commonpb.SshPublicKey), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockOsLogin mockOsLoginServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + osloginpb.RegisterOsLoginServiceServer(serv, &mockOsLogin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestOsLoginServiceDeletePosixAccount(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockOsLogin.err = nil + mockOsLogin.reqs = nil + + mockOsLogin.resps = append(mockOsLogin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("users/%s/projects/%s", "[USER]", "[PROJECT]") + var request = &osloginpb.DeletePosixAccountRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeletePosixAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOsLogin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestOsLoginServiceDeletePosixAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockOsLogin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("users/%s/projects/%s", "[USER]", "[PROJECT]") + var request = &osloginpb.DeletePosixAccountRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeletePosixAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestOsLoginServiceDeleteSshPublicKey(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockOsLogin.err = nil + mockOsLogin.reqs = nil + + mockOsLogin.resps = append(mockOsLogin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("users/%s/sshPublicKeys/%s", "[USER]", "[FINGERPRINT]") + var request = &osloginpb.DeleteSshPublicKeyRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSshPublicKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOsLogin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestOsLoginServiceDeleteSshPublicKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockOsLogin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("users/%s/sshPublicKeys/%s", "[USER]", "[FINGERPRINT]") + var request = &osloginpb.DeleteSshPublicKeyRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSshPublicKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestOsLoginServiceGetLoginProfile(t *testing.T) { + var name2 string = "name2-1052831874" + var suspended bool = false + var expectedResponse = &osloginpb.LoginProfile{ + Name: name2, + Suspended: suspended, + } + + mockOsLogin.err = nil + mockOsLogin.reqs = nil + + mockOsLogin.resps = append(mockOsLogin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("users/%s", "[USER]") + var request = &osloginpb.GetLoginProfileRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetLoginProfile(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOsLogin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestOsLoginServiceGetLoginProfileError(t *testing.T) { + errCode := codes.PermissionDenied + mockOsLogin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("users/%s", "[USER]") + var request = &osloginpb.GetLoginProfileRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetLoginProfile(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestOsLoginServiceGetSshPublicKey(t *testing.T) { + var key string = "key106079" + var expirationTimeUsec int64 = 2058878882 + var fingerprint string = "fingerprint-1375934236" + var expectedResponse = &commonpb.SshPublicKey{ + Key: key, + ExpirationTimeUsec: expirationTimeUsec, + Fingerprint: fingerprint, + } + + mockOsLogin.err = nil + mockOsLogin.reqs = nil + + mockOsLogin.resps = append(mockOsLogin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("users/%s/sshPublicKeys/%s", "[USER]", "[FINGERPRINT]") + var request = &osloginpb.GetSshPublicKeyRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSshPublicKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOsLogin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestOsLoginServiceGetSshPublicKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockOsLogin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("users/%s/sshPublicKeys/%s", "[USER]", "[FINGERPRINT]") + var request = &osloginpb.GetSshPublicKeyRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSshPublicKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestOsLoginServiceImportSshPublicKey(t *testing.T) { + var expectedResponse *osloginpb.ImportSshPublicKeyResponse = &osloginpb.ImportSshPublicKeyResponse{} + + mockOsLogin.err = nil + mockOsLogin.reqs = nil + + mockOsLogin.resps = append(mockOsLogin.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("users/%s", "[USER]") + var sshPublicKey *commonpb.SshPublicKey = &commonpb.SshPublicKey{} + var request = &osloginpb.ImportSshPublicKeyRequest{ + Parent: formattedParent, + SshPublicKey: sshPublicKey, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ImportSshPublicKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOsLogin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestOsLoginServiceImportSshPublicKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockOsLogin.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("users/%s", "[USER]") + var sshPublicKey *commonpb.SshPublicKey = &commonpb.SshPublicKey{} + var request = &osloginpb.ImportSshPublicKeyRequest{ + Parent: formattedParent, + SshPublicKey: sshPublicKey, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ImportSshPublicKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestOsLoginServiceUpdateSshPublicKey(t *testing.T) { + var key string = "key106079" + var expirationTimeUsec int64 = 2058878882 + var fingerprint string = "fingerprint-1375934236" + var expectedResponse = &commonpb.SshPublicKey{ + Key: key, + ExpirationTimeUsec: expirationTimeUsec, + Fingerprint: fingerprint, + } + + mockOsLogin.err = nil + mockOsLogin.reqs = nil + + mockOsLogin.resps = append(mockOsLogin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("users/%s/sshPublicKeys/%s", "[USER]", "[FINGERPRINT]") + var sshPublicKey *commonpb.SshPublicKey = &commonpb.SshPublicKey{} + var request = &osloginpb.UpdateSshPublicKeyRequest{ + Name: formattedName, + SshPublicKey: sshPublicKey, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSshPublicKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOsLogin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestOsLoginServiceUpdateSshPublicKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockOsLogin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("users/%s/sshPublicKeys/%s", "[USER]", "[FINGERPRINT]") + var sshPublicKey *commonpb.SshPublicKey = &commonpb.SshPublicKey{} + var request = &osloginpb.UpdateSshPublicKeyRequest{ + Name: formattedName, + SshPublicKey: sshPublicKey, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSshPublicKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/oslogin/apiv1beta/os_login_client.go b/vendor/cloud.google.com/go/oslogin/apiv1beta/os_login_client.go new file mode 100644 index 0000000..b27e0e8 --- /dev/null +++ b/vendor/cloud.google.com/go/oslogin/apiv1beta/os_login_client.go @@ -0,0 +1,222 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package oslogin + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + commonpb "google.golang.org/genproto/googleapis/cloud/oslogin/common" + osloginpb "google.golang.org/genproto/googleapis/cloud/oslogin/v1beta" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + DeletePosixAccount []gax.CallOption + DeleteSshPublicKey []gax.CallOption + GetLoginProfile []gax.CallOption + GetSshPublicKey []gax.CallOption + ImportSshPublicKey []gax.CallOption + UpdateSshPublicKey []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("oslogin.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + DeletePosixAccount: retry[[2]string{"default", "idempotent"}], + DeleteSshPublicKey: retry[[2]string{"default", "idempotent"}], + GetLoginProfile: retry[[2]string{"default", "idempotent"}], + GetSshPublicKey: retry[[2]string{"default", "idempotent"}], + ImportSshPublicKey: retry[[2]string{"default", "idempotent"}], + UpdateSshPublicKey: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud OS Login API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client osloginpb.OsLoginServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new os login service client. +// +// Cloud OS Login API +// +// The Cloud OS Login API allows you to manage users and their associated SSH +// public keys for logging into virtual machines on Google Cloud Platform. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: osloginpb.NewOsLoginServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// DeletePosixAccount deletes a POSIX account. +func (c *Client) DeletePosixAccount(ctx context.Context, req *osloginpb.DeletePosixAccountRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeletePosixAccount[0:len(c.CallOptions.DeletePosixAccount):len(c.CallOptions.DeletePosixAccount)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeletePosixAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// DeleteSshPublicKey deletes an SSH public key. +func (c *Client) DeleteSshPublicKey(ctx context.Context, req *osloginpb.DeleteSshPublicKeyRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSshPublicKey[0:len(c.CallOptions.DeleteSshPublicKey):len(c.CallOptions.DeleteSshPublicKey)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteSshPublicKey(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetLoginProfile retrieves the profile information used for logging in to a virtual machine +// on Google Compute Engine. +func (c *Client) GetLoginProfile(ctx context.Context, req *osloginpb.GetLoginProfileRequest, opts ...gax.CallOption) (*osloginpb.LoginProfile, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetLoginProfile[0:len(c.CallOptions.GetLoginProfile):len(c.CallOptions.GetLoginProfile)], opts...) + var resp *osloginpb.LoginProfile + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetLoginProfile(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetSshPublicKey retrieves an SSH public key. +func (c *Client) GetSshPublicKey(ctx context.Context, req *osloginpb.GetSshPublicKeyRequest, opts ...gax.CallOption) (*commonpb.SshPublicKey, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetSshPublicKey[0:len(c.CallOptions.GetSshPublicKey):len(c.CallOptions.GetSshPublicKey)], opts...) + var resp *commonpb.SshPublicKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetSshPublicKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ImportSshPublicKey adds an SSH public key and returns the profile information. Default POSIX +// account information is set when no username and UID exist as part of the +// login profile. +func (c *Client) ImportSshPublicKey(ctx context.Context, req *osloginpb.ImportSshPublicKeyRequest, opts ...gax.CallOption) (*osloginpb.ImportSshPublicKeyResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ImportSshPublicKey[0:len(c.CallOptions.ImportSshPublicKey):len(c.CallOptions.ImportSshPublicKey)], opts...) + var resp *osloginpb.ImportSshPublicKeyResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ImportSshPublicKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSshPublicKey updates an SSH public key and returns the profile information. This method +// supports patch semantics. +func (c *Client) UpdateSshPublicKey(ctx context.Context, req *osloginpb.UpdateSshPublicKeyRequest, opts ...gax.CallOption) (*commonpb.SshPublicKey, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateSshPublicKey[0:len(c.CallOptions.UpdateSshPublicKey):len(c.CallOptions.UpdateSshPublicKey)], opts...) + var resp *commonpb.SshPublicKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateSshPublicKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/oslogin/apiv1beta/os_login_client_example_test.go b/vendor/cloud.google.com/go/oslogin/apiv1beta/os_login_client_example_test.go new file mode 100644 index 0000000..991ee60 --- /dev/null +++ b/vendor/cloud.google.com/go/oslogin/apiv1beta/os_login_client_example_test.go @@ -0,0 +1,137 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package oslogin_test + +import ( + "cloud.google.com/go/oslogin/apiv1beta" + "golang.org/x/net/context" + osloginpb "google.golang.org/genproto/googleapis/cloud/oslogin/v1beta" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := oslogin.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_DeletePosixAccount() { + ctx := context.Background() + c, err := oslogin.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &osloginpb.DeletePosixAccountRequest{ + // TODO: Fill request struct fields. + } + err = c.DeletePosixAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteSshPublicKey() { + ctx := context.Background() + c, err := oslogin.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &osloginpb.DeleteSshPublicKeyRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSshPublicKey(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_GetLoginProfile() { + ctx := context.Background() + c, err := oslogin.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &osloginpb.GetLoginProfileRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetLoginProfile(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetSshPublicKey() { + ctx := context.Background() + c, err := oslogin.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &osloginpb.GetSshPublicKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSshPublicKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ImportSshPublicKey() { + ctx := context.Background() + c, err := oslogin.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &osloginpb.ImportSshPublicKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ImportSshPublicKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateSshPublicKey() { + ctx := context.Background() + c, err := oslogin.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &osloginpb.UpdateSshPublicKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSshPublicKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client.go b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client.go new file mode 100644 index 0000000..4c3282d --- /dev/null +++ b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client.go @@ -0,0 +1,681 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + InspectContent []gax.CallOption + RedactImage []gax.CallOption + DeidentifyContent []gax.CallOption + ReidentifyContent []gax.CallOption + InspectDataSource []gax.CallOption + AnalyzeDataSourceRisk []gax.CallOption + ListInfoTypes []gax.CallOption + CreateInspectTemplate []gax.CallOption + UpdateInspectTemplate []gax.CallOption + GetInspectTemplate []gax.CallOption + ListInspectTemplates []gax.CallOption + DeleteInspectTemplate []gax.CallOption + CreateDeidentifyTemplate []gax.CallOption + UpdateDeidentifyTemplate []gax.CallOption + GetDeidentifyTemplate []gax.CallOption + ListDeidentifyTemplates []gax.CallOption + DeleteDeidentifyTemplate []gax.CallOption + ListDlpJobs []gax.CallOption + GetDlpJob []gax.CallOption + DeleteDlpJob []gax.CallOption + CancelDlpJob []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("dlp.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + InspectContent: retry[[2]string{"default", "idempotent"}], + RedactImage: retry[[2]string{"default", "idempotent"}], + DeidentifyContent: retry[[2]string{"default", "idempotent"}], + ReidentifyContent: retry[[2]string{"default", "idempotent"}], + InspectDataSource: retry[[2]string{"default", "non_idempotent"}], + AnalyzeDataSourceRisk: retry[[2]string{"default", "non_idempotent"}], + ListInfoTypes: retry[[2]string{"default", "idempotent"}], + CreateInspectTemplate: retry[[2]string{"default", "non_idempotent"}], + UpdateInspectTemplate: retry[[2]string{"default", "non_idempotent"}], + GetInspectTemplate: retry[[2]string{"default", "idempotent"}], + ListInspectTemplates: retry[[2]string{"default", "idempotent"}], + DeleteInspectTemplate: retry[[2]string{"default", "idempotent"}], + CreateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}], + UpdateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}], + GetDeidentifyTemplate: retry[[2]string{"default", "idempotent"}], + ListDeidentifyTemplates: retry[[2]string{"default", "idempotent"}], + DeleteDeidentifyTemplate: retry[[2]string{"default", "idempotent"}], + ListDlpJobs: retry[[2]string{"default", "idempotent"}], + GetDlpJob: retry[[2]string{"default", "idempotent"}], + DeleteDlpJob: retry[[2]string{"default", "idempotent"}], + CancelDlpJob: retry[[2]string{"default", "non_idempotent"}], + } +} + +// Client is a client for interacting with DLP API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client dlppb.DlpServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new dlp service client. +// +// The DLP API is a service that allows clients +// to detect the presence of Personally Identifiable Information (PII) and other +// privacy-sensitive data in user-supplied, unstructured data streams, like text +// blocks or images. +// The service also includes methods for sensitive data redaction and +// scheduling of data scans on Google Cloud Platform based data sets. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: dlppb.NewDlpServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// InspectContent finds potentially sensitive info in content. +// This method has limits on input size, processing time, and output size. +// How-to guide for text (at /dlp/docs/inspecting-text), How-to guide for +// images (at /dlp/docs/inspecting-images) +func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...) + var resp *dlppb.InspectContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.InspectContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RedactImage redacts potentially sensitive info from an image. +// This method has limits on input size, processing time, and output size. +// How-to guide (at /dlp/docs/redacting-sensitive-data-images) +func (c *Client) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest, opts ...gax.CallOption) (*dlppb.RedactImageResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RedactImage[0:len(c.CallOptions.RedactImage):len(c.CallOptions.RedactImage)], opts...) + var resp *dlppb.RedactImageResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RedactImage(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeidentifyContent de-identifies potentially sensitive info from a ContentItem. +// This method has limits on input size and output size. +// How-to guide (at /dlp/docs/deidentify-sensitive-data) +func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...) + var resp *dlppb.DeidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ReidentifyContent re-identify content that has been de-identified. +func (c *Client) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest, opts ...gax.CallOption) (*dlppb.ReidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ReidentifyContent[0:len(c.CallOptions.ReidentifyContent):len(c.CallOptions.ReidentifyContent)], opts...) + var resp *dlppb.ReidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ReidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// InspectDataSource schedules a job scanning content in a Google Cloud Platform data +// repository. How-to guide (at /dlp/docs/inspecting-storage) +func (c *Client) InspectDataSource(ctx context.Context, req *dlppb.InspectDataSourceRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.InspectDataSource[0:len(c.CallOptions.InspectDataSource):len(c.CallOptions.InspectDataSource)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.InspectDataSource(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google +// Cloud Platform repository. [How-to guide}(/dlp/docs/compute-risk-analysis) +func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInfoTypes returns sensitive information types DLP supports. +func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...) + var resp *dlppb.ListInfoTypesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateInspectTemplate creates an inspect template for re-using frequently used configuration +// for inspecting content, images, and storage. +func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateInspectTemplate[0:len(c.CallOptions.CreateInspectTemplate):len(c.CallOptions.CreateInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateInspectTemplate updates the inspect template. +func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateInspectTemplate[0:len(c.CallOptions.UpdateInspectTemplate):len(c.CallOptions.UpdateInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetInspectTemplate gets an inspect template. +func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetInspectTemplate[0:len(c.CallOptions.GetInspectTemplate):len(c.CallOptions.GetInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInspectTemplates lists inspect templates. +func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest, opts ...gax.CallOption) *InspectTemplateIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInspectTemplates[0:len(c.CallOptions.ListInspectTemplates):len(c.CallOptions.ListInspectTemplates)], opts...) + it := &InspectTemplateIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.InspectTemplate, string, error) { + var resp *dlppb.ListInspectTemplatesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInspectTemplates(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.InspectTemplates, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteInspectTemplate deletes inspect templates. +func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteInspectTemplate[0:len(c.CallOptions.DeleteInspectTemplate):len(c.CallOptions.DeleteInspectTemplate)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateDeidentifyTemplate creates an Deidentify template for re-using frequently used configuration +// for Deidentifying content, images, and storage. +func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDeidentifyTemplate[0:len(c.CallOptions.CreateDeidentifyTemplate):len(c.CallOptions.CreateDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDeidentifyTemplate updates the inspect template. +func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateDeidentifyTemplate[0:len(c.CallOptions.UpdateDeidentifyTemplate):len(c.CallOptions.UpdateDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetDeidentifyTemplate gets an inspect template. +func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDeidentifyTemplate[0:len(c.CallOptions.GetDeidentifyTemplate):len(c.CallOptions.GetDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDeidentifyTemplates lists inspect templates. +func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest, opts ...gax.CallOption) *DeidentifyTemplateIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDeidentifyTemplates[0:len(c.CallOptions.ListDeidentifyTemplates):len(c.CallOptions.ListDeidentifyTemplates)], opts...) + it := &DeidentifyTemplateIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DeidentifyTemplate, string, error) { + var resp *dlppb.ListDeidentifyTemplatesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDeidentifyTemplates(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.DeidentifyTemplates, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteDeidentifyTemplate deletes inspect templates. +func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDeidentifyTemplate[0:len(c.CallOptions.DeleteDeidentifyTemplate):len(c.CallOptions.DeleteDeidentifyTemplate)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListDlpJobs lists DlpJobs that match the specified filter in the request. +func (c *Client) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest, opts ...gax.CallOption) *DlpJobIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDlpJobs[0:len(c.CallOptions.ListDlpJobs):len(c.CallOptions.ListDlpJobs)], opts...) + it := &DlpJobIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DlpJob, string, error) { + var resp *dlppb.ListDlpJobsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDlpJobs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Jobs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetDlpJob gets the latest state of a long-running DlpJob. +func (c *Client) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDlpJob[0:len(c.CallOptions.GetDlpJob):len(c.CallOptions.GetDlpJob)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteDlpJob deletes a long-running DlpJob. This method indicates that the client is +// no longer interested in the DlpJob result. The job will be cancelled if +// possible. +func (c *Client) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDlpJob[0:len(c.CallOptions.DeleteDlpJob):len(c.CallOptions.DeleteDlpJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CancelDlpJob starts asynchronous cancellation on a long-running DlpJob. The server +// makes a best effort to cancel the DlpJob, but success is not +// guaranteed. +func (c *Client) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelDlpJob[0:len(c.CallOptions.CancelDlpJob):len(c.CallOptions.CancelDlpJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.CancelDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// DeidentifyTemplateIterator manages a stream of *dlppb.DeidentifyTemplate. +type DeidentifyTemplateIterator struct { + items []*dlppb.DeidentifyTemplate + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DeidentifyTemplate, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DeidentifyTemplateIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DeidentifyTemplateIterator) Next() (*dlppb.DeidentifyTemplate, error) { + var item *dlppb.DeidentifyTemplate + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DeidentifyTemplateIterator) bufLen() int { + return len(it.items) +} + +func (it *DeidentifyTemplateIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// DlpJobIterator manages a stream of *dlppb.DlpJob. +type DlpJobIterator struct { + items []*dlppb.DlpJob + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DlpJob, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DlpJobIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DlpJobIterator) Next() (*dlppb.DlpJob, error) { + var item *dlppb.DlpJob + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DlpJobIterator) bufLen() int { + return len(it.items) +} + +func (it *DlpJobIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InspectTemplateIterator manages a stream of *dlppb.InspectTemplate. +type InspectTemplateIterator struct { + items []*dlppb.InspectTemplate + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.InspectTemplate, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InspectTemplateIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InspectTemplateIterator) Next() (*dlppb.InspectTemplate, error) { + var item *dlppb.InspectTemplate + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InspectTemplateIterator) bufLen() int { + return len(it.items) +} + +func (it *InspectTemplateIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client_example_test.go b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client_example_test.go new file mode 100644 index 0000000..17527d7 --- /dev/null +++ b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client_example_test.go @@ -0,0 +1,422 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp_test + +import ( + "cloud.google.com/go/privacy/dlp/apiv2beta2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_InspectContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.InspectContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.InspectContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_RedactImage() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.RedactImageRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RedactImage(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ReidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ReidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ReidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_InspectDataSource() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.InspectDataSourceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.InspectDataSource(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeDataSourceRisk() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.AnalyzeDataSourceRiskRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeDataSourceRisk(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInfoTypes() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInfoTypesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListInfoTypes(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_CreateInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInspectTemplates() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInspectTemplatesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInspectTemplates(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDeidentifyTemplates() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListDeidentifyTemplatesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDeidentifyTemplates(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_ListDlpJobs() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListDlpJobsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDlpJobs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_GetDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetDlpJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteDlpJobRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CancelDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CancelDlpJobRequest{ + // TODO: Fill request struct fields. + } + err = c.CancelDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/doc.go b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/doc.go new file mode 100644 index 0000000..43eb41e --- /dev/null +++ b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/doc.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package dlp is an auto-generated package for the +// DLP API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// The Google Data Loss Prevention API provides methods for detection of +// privacy-sensitive fragments in text, images, and Google Cloud Platform +// storage repositories. +package dlp // import "cloud.google.com/go/privacy/dlp/apiv2beta2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/mock_test.go b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/mock_test.go new file mode 100644 index 0000000..6451737 --- /dev/null +++ b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/mock_test.go @@ -0,0 +1,1596 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDlpServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + dlppb.DlpServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectContentResponse), nil +} + +func (s *mockDlpServer) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest) (*dlppb.RedactImageResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.RedactImageResponse), nil +} + +func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyContentResponse), nil +} + +func (s *mockDlpServer) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest) (*dlppb.ReidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ReidentifyContentResponse), nil +} + +func (s *mockDlpServer) InspectDataSource(ctx context.Context, req *dlppb.InspectDataSourceRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInfoTypesResponse), nil +} + +func (s *mockDlpServer) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest) (*dlppb.ListInspectTemplatesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInspectTemplatesResponse), nil +} + +func (s *mockDlpServer) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest) (*dlppb.ListDeidentifyTemplatesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListDeidentifyTemplatesResponse), nil +} + +func (s *mockDlpServer) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest) (*dlppb.ListDlpJobsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListDlpJobsResponse), nil +} + +func (s *mockDlpServer) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDlp mockDlpServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + dlppb.RegisterDlpServiceServer(serv, &mockDlp) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDlpServiceInspectContent(t *testing.T) { + var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceInspectContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceRedactImage(t *testing.T) { + var redactedImage []byte = []byte("28") + var extractedText string = "extractedText998260012" + var expectedResponse = &dlppb.RedactImageResponse{ + RedactedImage: redactedImage, + ExtractedText: extractedText, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.RedactImageRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactImage(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceRedactImageError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.RedactImageRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactImage(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeidentifyContent(t *testing.T) { + var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.DeidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceDeidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.DeidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceReidentifyContent(t *testing.T) { + var expectedResponse *dlppb.ReidentifyContentResponse = &dlppb.ReidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ReidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceReidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ReidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceInspectDataSource(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &dlppb.DlpJob{ + Name: name, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectDataSourceRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectDataSource(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceInspectDataSourceError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectDataSourceRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectDataSource(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &dlppb.DlpJob{ + Name: name, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.AnalyzeDataSourceRiskRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeDataSourceRisk(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.AnalyzeDataSourceRiskRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeDataSourceRisk(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInfoTypes(t *testing.T) { + var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInfoTypesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceCreateInspectTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateInspectTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateInspectTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceUpdateInspectTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.UpdateInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.UpdateInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetInspectTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInspectTemplates(t *testing.T) { + var nextPageToken string = "" + var inspectTemplatesElement *dlppb.InspectTemplate = &dlppb.InspectTemplate{} + var inspectTemplates = []*dlppb.InspectTemplate{inspectTemplatesElement} + var expectedResponse = &dlppb.ListInspectTemplatesResponse{ + NextPageToken: nextPageToken, + InspectTemplates: inspectTemplates, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListInspectTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectTemplates(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.InspectTemplates[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInspectTemplatesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListInspectTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectTemplates(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteInspectTemplate(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.DeleteInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.DeleteInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCreateDeidentifyTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateDeidentifyTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateDeidentifyTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceUpdateDeidentifyTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.UpdateDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.UpdateDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetDeidentifyTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.GetDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.GetDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListDeidentifyTemplates(t *testing.T) { + var nextPageToken string = "" + var deidentifyTemplatesElement *dlppb.DeidentifyTemplate = &dlppb.DeidentifyTemplate{} + var deidentifyTemplates = []*dlppb.DeidentifyTemplate{deidentifyTemplatesElement} + var expectedResponse = &dlppb.ListDeidentifyTemplatesResponse{ + NextPageToken: nextPageToken, + DeidentifyTemplates: deidentifyTemplates, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListDeidentifyTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.DeidentifyTemplates[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListDeidentifyTemplatesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListDeidentifyTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteDeidentifyTemplate(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.DeleteDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.DeleteDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceListDlpJobs(t *testing.T) { + var nextPageToken string = "" + var jobsElement *dlppb.DlpJob = &dlppb.DlpJob{} + var jobs = []*dlppb.DlpJob{jobsElement} + var expectedResponse = &dlppb.ListDlpJobsResponse{ + NextPageToken: nextPageToken, + Jobs: jobs, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListDlpJobsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDlpJobs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Jobs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListDlpJobsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListDlpJobsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDlpJobs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetDlpJob(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &dlppb.DlpJob{ + Name: name2, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.GetDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.GetDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteDlpJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.DeleteDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.DeleteDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCancelDlpJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.CancelDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceCancelDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.CancelDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/profiler/busybench/busybench.go b/vendor/cloud.google.com/go/profiler/busybench/busybench.go new file mode 100644 index 0000000..44d25b1 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/busybench/busybench.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "cloud.google.com/go/profiler" + "compress/gzip" + "flag" + "log" + "math/rand" + "sync" + "time" +) + +var ( + service = flag.String("service", "", "service name") + mutexProfiling = flag.Bool("mutex_profiling", false, "enable mutex profiling") + duration = flag.Int("duration", 600, "duration of the benchmark in seconds") + apiAddr = flag.String("api_address", "", "API address of the profiler (e.g. 'cloudprofiler.googleapis.com:443')") +) + +// busywork continuously generates 1MiB of random data and compresses it +// throwing away the result. +func busywork(mu *sync.Mutex) { + ticker := time.NewTicker(time.Duration(*duration) * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + return + default: + mu.Lock() + busyworkOnce() + mu.Unlock() + } + } +} + +func busyworkOnce() { + data := make([]byte, 1024*1024) + rand.Read(data) + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write(data); err != nil { + log.Printf("Failed to write to gzip stream: %v", err) + return + } + if err := gz.Flush(); err != nil { + log.Printf("Failed to flush to gzip stream: %v", err) + return + } + if err := gz.Close(); err != nil { + log.Printf("Failed to close gzip stream: %v", err) + } + // Throw away the result. +} + +func main() { + flag.Parse() + + if *service == "" { + log.Print("Service name must be configured using --service flag.") + } else if err := profiler.Start( + profiler.Config{ + Service: *service, + MutexProfiling: *mutexProfiling, + DebugLogging: true, + APIAddr: *apiAddr, + }); err != nil { + log.Printf("Failed to start the profiler: %v", err) + } else { + mu := new(sync.Mutex) + var wg sync.WaitGroup + wg.Add(5) + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + busywork(mu) + }() + } + wg.Wait() + } + + log.Printf("busybench finished profiling.") + // Do not exit, since the pod in the GKE test is set to always restart. + select {} +} diff --git a/vendor/cloud.google.com/go/profiler/integration-test.sh b/vendor/cloud.google.com/go/profiler/integration-test.sh new file mode 100644 index 0000000..15772d4 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/integration-test.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Fail on any error. +set -eo pipefail + +# Display commands being run. +set -x + +cd git/gocloud +COMMIT=$(git rev-parse HEAD) + +# Set $GOPATH +export GOPATH="$HOME/go" +GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go +mkdir -p $GOCLOUD_HOME + +# Move code into $GOPATH and get dependencies +cp -R ./* $GOCLOUD_HOME +cd $GOCLOUD_HOME/internal/kokoro +# Don't print out encryption keys, etc +set +x +key=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_key") +iv=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_iv") +pass=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_pass") + +openssl aes-256-cbc -K $key -iv $iv -pass pass:$pass -in kokoro-key.json.enc -out key.json -d +set -x + +export GOOGLE_APPLICATION_CREDENTIALS="$(pwd)/key.json" +export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" +export GCLOUD_TESTS_GOLANG_ZONE="us-west1-a" +export GCLOUD_TESTS_GOLANG_BUCKET="dulcet-port-762-go-cloud-profiler-test" + +cd $GOCLOUD_HOME/profiler +go get -t -tags=integration . +go test -timeout=60m -parallel=5 -tags=integration -run TestAgentIntegration -commit="$COMMIT" diff --git a/vendor/cloud.google.com/go/profiler/integration_test.go b/vendor/cloud.google.com/go/profiler/integration_test.go new file mode 100644 index 0000000..870010d --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/integration_test.go @@ -0,0 +1,266 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build integration,go1.7 + +package profiler + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + "text/template" + "time" + + "cloud.google.com/go/profiler/proftest" + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + commit = flag.String("commit", "", "git commit to test") + runID = time.Now().Unix() +) + +const ( + cloudScope = "https://www.googleapis.com/auth/cloud-platform" + benchFinishString = "busybench finished profiling" +) + +const startupTemplate = ` +#! /bin/bash + +( +# Shut down the VM in 5 minutes after this script exits +# to stop accounting the VM for billing and cores quota. +trap "sleep 300 && poweroff" EXIT + +# Fail on any error. +set -eo pipefail + +# Display commands being run. +set -x + +# Install git +apt-get update >/dev/null +apt-get -y -q install git >/dev/null + +# Install desired Go version +mkdir -p /tmp/bin +curl -sL -o /tmp/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme +chmod +x /tmp/bin/gimme +export PATH=$PATH:/tmp/bin + +eval "$(gimme {{.GoVersion}})" + +# Set $GOPATH +export GOPATH="$HOME/go" + +export GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go +mkdir -p $GOCLOUD_HOME + +# Install agent +git clone https://code.googlesource.com/gocloud $GOCLOUD_HOME >/dev/null + +cd $GOCLOUD_HOME/profiler/busybench +git reset --hard {{.Commit}} +go get -v >/dev/null + +# Run benchmark with agent +go run busybench.go --service="{{.Service}}" --mutex_profiling="{{.MutexProfiling}}" + +# Write output to serial port 2 with timestamp. +) 2>&1 | while read line; do echo "$(date): ${line}"; done >/dev/ttyS1 +` + +const dockerfileFmt = `FROM golang +RUN git clone https://code.googlesource.com/gocloud /go/src/cloud.google.com/go \ + && cd /go/src/cloud.google.com/go/profiler/busybench && git reset --hard %s \ + && go get -v && go install -v +CMD ["busybench", "--service", "%s"] + ` + +type goGCETestCase struct { + proftest.InstanceConfig + name string + goVersion string + mutexProfiling bool + wantProfileTypes []string +} + +func (tc *goGCETestCase) initializeStartupScript(template *template.Template) error { + var buf bytes.Buffer + err := template.Execute(&buf, + struct { + Service string + GoVersion string + Commit string + MutexProfiling bool + }{ + Service: tc.name, + GoVersion: tc.goVersion, + Commit: *commit, + MutexProfiling: tc.mutexProfiling, + }) + if err != nil { + return fmt.Errorf("failed to render startup script for %s: %v", tc.name, err) + } + tc.StartupScript = buf.String() + return nil +} + +func TestAgentIntegration(t *testing.T) { + projectID := os.Getenv("GCLOUD_TESTS_GOLANG_PROJECT_ID") + if projectID == "" { + t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_PROJECT_ID) got empty string") + } + + zone := os.Getenv("GCLOUD_TESTS_GOLANG_ZONE") + if zone == "" { + t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_ZONE) got empty string") + } + + if *commit == "" { + t.Fatal("commit flag is not set") + } + + ctx := context.Background() + + client, err := google.DefaultClient(ctx, cloudScope) + if err != nil { + t.Fatalf("failed to get default client: %v", err) + } + + computeService, err := compute.New(client) + if err != nil { + t.Fatalf("failed to initialize compute service: %v", err) + } + + template, err := template.New("startupScript").Parse(startupTemplate) + if err != nil { + t.Fatalf("failed to parse startup script template: %v", err) + } + + tr := proftest.TestRunner{ + Client: client, + } + + gceTr := proftest.GCETestRunner{ + TestRunner: tr, + ComputeService: computeService, + } + + testcases := []goGCETestCase{ + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go110-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go110-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION"}, + goVersion: "1.10", + mutexProfiling: true, + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go19-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go19-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION"}, + goVersion: "1.9", + mutexProfiling: true, + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go18-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go18-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION"}, + goVersion: "1.8", + mutexProfiling: true, + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go17-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go17-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS"}, + goVersion: "1.7", + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go16-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go16-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS"}, + goVersion: "1.6", + }, + } + + for _, tc := range testcases { + tc := tc // capture range variable + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + if err := tc.initializeStartupScript(template); err != nil { + t.Fatalf("failed to initialize startup script") + } + + if err := gceTr.StartInstance(ctx, &tc.InstanceConfig); err != nil { + t.Fatal(err) + } + defer func() { + if gceTr.DeleteInstance(ctx, &tc.InstanceConfig); err != nil { + t.Fatal(err) + } + }() + + timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*25) + defer cancel() + if err := gceTr.PollForSerialOutput(timeoutCtx, &tc.InstanceConfig, benchFinishString); err != nil { + t.Fatal(err) + } + + timeNow := time.Now() + endTime := timeNow.Format(time.RFC3339) + startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339) + for _, pType := range tc.wantProfileTypes { + pr, err := tr.QueryProfiles(tc.ProjectID, tc.name, startTime, endTime, pType) + if err != nil { + t.Errorf("QueryProfiles(%s, %s, %s, %s, %s) got error: %v", tc.ProjectID, tc.name, startTime, endTime, pType, err) + continue + } + if err := pr.HasFunction("busywork"); err != nil { + t.Error(err) + } + } + }) + } +} diff --git a/vendor/cloud.google.com/go/profiler/mocks/mock_profiler_client.go b/vendor/cloud.google.com/go/profiler/mocks/mock_profiler_client.go new file mode 100644 index 0000000..13e9e77 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/mocks/mock_profiler_client.go @@ -0,0 +1,78 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Automatically generated by MockGen. DO NOT EDIT! +// Source: google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2 (interfaces: ProfilerServiceClient) + +package mocks + +import ( + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + v2 "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" + grpc "google.golang.org/grpc" +) + +// Mock of ProfilerServiceClient interface +type MockProfilerServiceClient struct { + ctrl *gomock.Controller + recorder *_MockProfilerServiceClientRecorder +} + +// Recorder for MockProfilerServiceClient (not exported) +type _MockProfilerServiceClientRecorder struct { + mock *MockProfilerServiceClient +} + +func NewMockProfilerServiceClient(ctrl *gomock.Controller) *MockProfilerServiceClient { + mock := &MockProfilerServiceClient{ctrl: ctrl} + mock.recorder = &_MockProfilerServiceClientRecorder{mock} + return mock +} + +func (_m *MockProfilerServiceClient) EXPECT() *_MockProfilerServiceClientRecorder { + return _m.recorder +} + +func (_m *MockProfilerServiceClient) CreateProfile(_param0 context.Context, _param1 *v2.CreateProfileRequest, _param2 ...grpc.CallOption) (*v2.Profile, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "CreateProfile", _s...) + ret0, _ := ret[0].(*v2.Profile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockProfilerServiceClientRecorder) CreateProfile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateProfile", _s...) +} + +func (_m *MockProfilerServiceClient) UpdateProfile(_param0 context.Context, _param1 *v2.UpdateProfileRequest, _param2 ...grpc.CallOption) (*v2.Profile, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "UpdateProfile", _s...) + ret0, _ := ret[0].(*v2.Profile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockProfilerServiceClientRecorder) UpdateProfile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "UpdateProfile", _s...) +} diff --git a/vendor/cloud.google.com/go/profiler/mutex.go b/vendor/cloud.google.com/go/profiler/mutex.go new file mode 100644 index 0000000..84e92de --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/mutex.go @@ -0,0 +1,25 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package profiler + +import "runtime" + +func enableMutexProfiling() bool { + // One percent of mutex contention events are profiled. + runtime.SetMutexProfileFraction(100) + return true +} diff --git a/vendor/cloud.google.com/go/profiler/mutex_go17.go b/vendor/cloud.google.com/go/profiler/mutex_go17.go new file mode 100644 index 0000000..4c7a7c0 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/mutex_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package profiler + +func enableMutexProfiling() bool { + return false +} diff --git a/vendor/cloud.google.com/go/profiler/profiler.go b/vendor/cloud.google.com/go/profiler/profiler.go new file mode 100644 index 0000000..3317f28 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/profiler.go @@ -0,0 +1,506 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profiler is a client for the Stackdriver Profiler service. +// +// This package is still experimental and subject to change. +// +// Usage example: +// +// import "cloud.google.com/go/profiler" +// ... +// if err := profiler.Start(profiler.Config{Service: "my-service"}); err != nil { +// // TODO: Handle error. +// } +// +// Calling Start will start a goroutine to collect profiles and upload to +// the profiler server, at the rhythm specified by the server. +// +// The caller must provide the service string in the config, and may provide +// other information as well. See Config for details. +// +// Profiler has CPU, heap and goroutine profiling enabled by default. Mutex +// profiling can be enabled in the config. Note that goroutine and mutex +// profiles are shown as "threads" and "contention" profiles in the profiler +// UI. +package profiler + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" + + gcemd "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/google/pprof/profile" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcmd "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + config Config + startOnce sync.Once + mutexEnabled bool + // The functions below are stubbed to be overrideable for testing. + getProjectID = gcemd.ProjectID + getInstanceName = gcemd.InstanceName + getZone = gcemd.Zone + startCPUProfile = pprof.StartCPUProfile + stopCPUProfile = pprof.StopCPUProfile + writeHeapProfile = pprof.WriteHeapProfile + sleep = gax.Sleep + dialGRPC = gtransport.Dial + onGCE = gcemd.OnGCE +) + +const ( + apiAddress = "cloudprofiler.googleapis.com:443" + xGoogAPIMetadata = "x-goog-api-client" + zoneNameLabel = "zone" + versionLabel = "version" + instanceLabel = "instance" + scope = "https://www.googleapis.com/auth/monitoring.write" + + initialBackoff = time.Second + // Ensure the agent will recover within 1 hour. + maxBackoff = time.Hour + backoffMultiplier = 1.3 // Backoff envelope increases by this factor on each retry. + retryInfoMetadata = "google.rpc.retryinfo-bin" +) + +// Config is the profiler configuration. +type Config struct { + // Service (or deprecated Target) must be provided to start the profiler. + // It specifies the name of the service under which the profiled data + // will be recorded and exposed at the Profiler UI for the project. + // You can specify an arbitrary string, but see Deployment.target at + // https://github.com/googleapis/googleapis/blob/master/google/devtools/cloudprofiler/v2/profiler.proto + // for restrictions. + // NOTE: The string should be the same across different replicas of + // your service so that the globally constant profiling rate is + // maintained. Do not put things like PID or unique pod ID in the name. + Service string + + // ServiceVersion is an optional field specifying the version of the + // service. It can be an arbitrary string. Profiler profiles + // once per minute for each version of each service in each zone. + // ServiceVersion defaults to an empty string. + ServiceVersion string + + // DebugLogging enables detailed debug logging from profiler. It + // defaults to false. + DebugLogging bool + + // MutexProfiling enables mutex profiling. It defaults to false. + // Note that mutex profiling is not supported by Go versions older + // than Go 1.8. + MutexProfiling bool + + // When true, collecting the heap profiles is disabled. + NoHeapProfiling bool + + // When true, collecting the goroutine profiles is disabled. + NoGoroutineProfiling bool + + // ProjectID is the Cloud Console project ID to use instead of + // the one read from the VM metadata server. + // + // Set this if you are running the agent in your local environment + // or anywhere else outside of Google Cloud Platform. + ProjectID string + + // APIAddr is the HTTP endpoint to use to connect to the profiler + // agent API. Defaults to the production environment, overridable + // for testing. + APIAddr string + + // Target is deprecated, use Service instead. + Target string + + instance string + zone string +} + +// startError represents the error occured during the +// initializating and starting of the agent. +var startError error + +// Start starts a goroutine to collect and upload profiles. The +// caller must provide the service string in the config. See +// Config for details. Start should only be called once. Any +// additional calls will be ignored. +func Start(cfg Config, options ...option.ClientOption) error { + startOnce.Do(func() { + startError = start(cfg, options...) + }) + return startError +} + +func start(cfg Config, options ...option.ClientOption) error { + if err := initializeConfig(cfg); err != nil { + debugLog("failed to initialize config: %v", err) + return err + } + if config.MutexProfiling { + if mutexEnabled = enableMutexProfiling(); !mutexEnabled { + return fmt.Errorf("mutex profiling is not supported by %s, requires Go 1.8 or later", runtime.Version()) + } + } + + ctx := context.Background() + + opts := []option.ClientOption{ + option.WithEndpoint(config.APIAddr), + option.WithScopes(scope), + } + opts = append(opts, options...) + + conn, err := dialGRPC(ctx, opts...) + if err != nil { + debugLog("failed to dial GRPC: %v", err) + return err + } + + a := initializeAgent(pb.NewProfilerServiceClient(conn)) + go pollProfilerService(withXGoogHeader(ctx), a) + return nil +} + +func debugLog(format string, e ...interface{}) { + if config.DebugLogging { + log.Printf(format, e...) + } +} + +// agent polls the profiler server for instructions on behalf of a task, +// and collects and uploads profiles as requested. +type agent struct { + client pb.ProfilerServiceClient + deployment *pb.Deployment + profileLabels map[string]string + profileTypes []pb.ProfileType +} + +// abortedBackoffDuration retrieves the retry duration from gRPC trailing +// metadata, which is set by the profiler server. +func abortedBackoffDuration(md grpcmd.MD) (time.Duration, error) { + elem := md[retryInfoMetadata] + if len(elem) <= 0 { + return 0, errors.New("no retry info") + } + + var retryInfo edpb.RetryInfo + if err := proto.Unmarshal([]byte(elem[0]), &retryInfo); err != nil { + return 0, err + } else if time, err := ptypes.Duration(retryInfo.RetryDelay); err != nil { + return 0, err + } else { + if time < 0 { + return 0, errors.New("negative retry duration") + } + return time, nil + } +} + +type retryer struct { + backoff gax.Backoff + md grpcmd.MD +} + +func (r *retryer) Retry(err error) (time.Duration, bool) { + st, _ := status.FromError(err) + if st != nil && st.Code() == codes.Aborted { + dur, err := abortedBackoffDuration(r.md) + if err == nil { + return dur, true + } + debugLog("failed to get backoff duration: %v", err) + } + return r.backoff.Pause(), true +} + +// createProfile talks to the profiler server to create profile. In +// case of error, the goroutine will sleep and retry. Sleep duration may +// be specified by the server. Otherwise it will be an exponentially +// increasing value, bounded by maxBackoff. +func (a *agent) createProfile(ctx context.Context) *pb.Profile { + req := pb.CreateProfileRequest{ + Deployment: a.deployment, + ProfileType: a.profileTypes, + } + + var p *pb.Profile + md := grpcmd.New(map[string]string{}) + + gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + p, err = a.client.CreateProfile(ctx, &req, grpc.Trailer(&md)) + if err != nil { + debugLog("failed to create a profile, will retry: %v", err) + } + return err + }, gax.WithRetry(func() gax.Retryer { + return &retryer{ + backoff: gax.Backoff{ + Initial: initialBackoff, + Max: maxBackoff, + Multiplier: backoffMultiplier, + }, + md: md, + } + })) + + debugLog("successfully created profile %v", p.GetProfileType()) + return p +} + +func (a *agent) profileAndUpload(ctx context.Context, p *pb.Profile) { + var prof bytes.Buffer + pt := p.GetProfileType() + + switch pt { + case pb.ProfileType_CPU: + duration, err := ptypes.Duration(p.Duration) + if err != nil { + debugLog("failed to get profile duration: %v", err) + return + } + if err := startCPUProfile(&prof); err != nil { + debugLog("failed to start CPU profile: %v", err) + return + } + sleep(ctx, duration) + stopCPUProfile() + case pb.ProfileType_HEAP: + if err := writeHeapProfile(&prof); err != nil { + debugLog("failed to write heap profile: %v", err) + return + } + case pb.ProfileType_THREADS: + if err := pprof.Lookup("goroutine").WriteTo(&prof, 0); err != nil { + debugLog("failed to create goroutine profile: %v", err) + return + } + case pb.ProfileType_CONTENTION: + duration, err := ptypes.Duration(p.Duration) + if err != nil { + debugLog("failed to get profile duration: %v", err) + return + } + if err := deltaMutexProfile(ctx, duration, &prof); err != nil { + debugLog("failed to create mutex profile: %v", err) + return + } + default: + debugLog("unexpected profile type: %v", pt) + return + } + + // Starting Go 1.9 the profiles are symbolized by runtime/pprof. + // TODO(jianqiaoli): Remove the symbolization code when we decide to + // stop supporting Go 1.8. + if !shouldAssumeSymbolized && pt != pb.ProfileType_CONTENTION { + if err := parseAndSymbolize(&prof); err != nil { + debugLog("failed to symbolize profile: %v", err) + } + } + + p.ProfileBytes = prof.Bytes() + p.Labels = a.profileLabels + req := pb.UpdateProfileRequest{Profile: p} + + // Upload profile, discard profile in case of error. + debugLog("start uploading profile") + if _, err := a.client.UpdateProfile(ctx, &req); err != nil { + debugLog("failed to upload profile: %v", err) + } +} + +// deltaMutexProfile writes mutex profile changes over a time period specified +// with 'duration' to 'prof'. +func deltaMutexProfile(ctx context.Context, duration time.Duration, prof *bytes.Buffer) error { + if !mutexEnabled { + return errors.New("mutex profiling is not enabled") + } + p0, err := mutexProfile() + if err != nil { + return err + } + sleep(ctx, duration) + p, err := mutexProfile() + if err != nil { + return err + } + + // TODO(jianqiaoli): Remove this check when github.com/google/pprof/issues/242 + // is fixed. + if len(p0.Mapping) > 0 { + p0.Scale(-1) + p, err = profile.Merge([]*profile.Profile{p0, p}) + if err != nil { + return err + } + } + + // The mutex profile is not symbolized by runtime.pprof until + // golang.org/issue/21474 is fixed in go1.10. + symbolize(p) + return p.Write(prof) +} + +func mutexProfile() (*profile.Profile, error) { + p := pprof.Lookup("mutex") + if p == nil { + return nil, errors.New("mutex profiling is not supported") + } + var buf bytes.Buffer + if err := p.WriteTo(&buf, 0); err != nil { + return nil, err + } + return profile.Parse(&buf) +} + +// withXGoogHeader sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func withXGoogHeader(ctx context.Context, keyval ...string) context.Context { + kv := append([]string{"gl-go", version.Go(), "gccl", version.Repo}, keyval...) + kv = append(kv, "gax", gax.Version, "grpc", grpc.Version) + + md, _ := grpcmd.FromOutgoingContext(ctx) + md = md.Copy() + md[xGoogAPIMetadata] = []string{gax.XGoogHeader(kv...)} + return grpcmd.NewOutgoingContext(ctx, md) +} + +func initializeAgent(c pb.ProfilerServiceClient) *agent { + labels := map[string]string{} + if config.zone != "" { + labels[zoneNameLabel] = config.zone + } + if config.ServiceVersion != "" { + labels[versionLabel] = config.ServiceVersion + } + d := &pb.Deployment{ + ProjectId: config.ProjectID, + Target: config.Target, + Labels: labels, + } + + profileLabels := map[string]string{} + + if config.instance != "" { + profileLabels[instanceLabel] = config.instance + } + + profileTypes := []pb.ProfileType{pb.ProfileType_CPU} + if !config.NoHeapProfiling { + profileTypes = append(profileTypes, pb.ProfileType_HEAP) + } + if !config.NoGoroutineProfiling { + profileTypes = append(profileTypes, pb.ProfileType_THREADS) + } + if mutexEnabled { + profileTypes = append(profileTypes, pb.ProfileType_CONTENTION) + } + + return &agent{ + client: c, + deployment: d, + profileLabels: profileLabels, + profileTypes: profileTypes, + } +} + +func initializeConfig(cfg Config) error { + config = cfg + + switch { + case config.Service != "": + config.Target = config.Service + case config.Target == "": + config.Target = os.Getenv("GAE_SERVICE") + } + + if config.Target == "" { + return errors.New("service name must be specified in the configuration") + } + + if config.ServiceVersion == "" { + config.ServiceVersion = os.Getenv("GAE_VERSION") + } + + if projectID := os.Getenv("GOOGLE_CLOUD_PROJECT"); config.ProjectID == "" && projectID != "" { + // Cloud Shell and App Engine set this environment variable to the project + // ID, so use it if present. In case of App Engine the project ID is also + // available from the GCE metadata server, but by using the environment + // variable saves one request to the metadata server. The environment + // project ID is only used if no project ID is provided in the + // configuration. + config.ProjectID = projectID + } + if onGCE() { + var err error + if config.ProjectID == "" { + if config.ProjectID, err = getProjectID(); err != nil { + return fmt.Errorf("failed to get the project ID from Compute Engine: %v", err) + } + } + + if config.zone, err = getZone(); err != nil { + return fmt.Errorf("failed to get zone from Compute Engine: %v", err) + } + + if config.instance, err = getInstanceName(); err != nil { + return fmt.Errorf("failed to get instance from Compute Engine: %v", err) + } + + } else { + if config.ProjectID == "" { + return fmt.Errorf("project ID must be specified in the configuration if running outside of GCP") + } + } + + if config.APIAddr == "" { + config.APIAddr = apiAddress + } + return nil +} + +// pollProfilerService starts an endless loop to poll the profiler +// server for instructions, and collects and uploads profiles as +// requested. +func pollProfilerService(ctx context.Context, a *agent) { + debugLog("profiler has started") + for { + p := a.createProfile(ctx) + a.profileAndUpload(ctx, p) + } +} diff --git a/vendor/cloud.google.com/go/profiler/profiler_example_test.go b/vendor/cloud.google.com/go/profiler/profiler_example_test.go new file mode 100644 index 0000000..e961ee6 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/profiler_example_test.go @@ -0,0 +1,25 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profiler_test + +import ( + "cloud.google.com/go/profiler" +) + +func ExampleStart() { + if err := profiler.Start(profiler.Config{Service: "my-service", ServiceVersion: "v1"}); err != nil { + //TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/profiler/profiler_test.go b/vendor/cloud.google.com/go/profiler/profiler_test.go new file mode 100644 index 0000000..3d30c39 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/profiler_test.go @@ -0,0 +1,876 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profiler + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "log" + "math/rand" + "os" + "runtime" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/profiler/mocks" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/google/pprof/profile" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + gtransport "google.golang.org/api/transport/grpc" + pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + grpcmd "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + testProjectID = "test-project-ID" + testInstance = "test-instance" + testZone = "test-zone" + testTarget = "test-target" + testService = "test-service" + testSvcVersion = "test-service-version" + testProfileDuration = time.Second * 10 + testServerTimeout = time.Second * 15 +) + +func createTestDeployment() *pb.Deployment { + labels := map[string]string{ + zoneNameLabel: testZone, + versionLabel: testSvcVersion, + } + return &pb.Deployment{ + ProjectId: testProjectID, + Target: testService, + Labels: labels, + } +} + +func createTestAgent(psc pb.ProfilerServiceClient) *agent { + return &agent{ + client: psc, + deployment: createTestDeployment(), + profileLabels: map[string]string{instanceLabel: testInstance}, + profileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, + } +} + +func createTrailers(dur time.Duration) map[string]string { + b, _ := proto.Marshal(&edpb.RetryInfo{ + RetryDelay: ptypes.DurationProto(dur), + }) + return map[string]string{ + retryInfoMetadata: string(b), + } +} + +func TestCreateProfile(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mpc := mocks.NewMockProfilerServiceClient(ctrl) + a := createTestAgent(mpc) + p := &pb.Profile{Name: "test_profile"} + wantRequest := pb.CreateProfileRequest{ + Deployment: a.deployment, + ProfileType: a.profileTypes, + } + + mpc.EXPECT().CreateProfile(ctx, gomock.Eq(&wantRequest), gomock.Any()).Times(1).Return(p, nil) + + gotP := a.createProfile(ctx) + + if !testutil.Equal(gotP, p) { + t.Errorf("CreateProfile() got wrong profile, got %v, want %v", gotP, p) + } +} + +func TestProfileAndUpload(t *testing.T) { + oldStartCPUProfile, oldStopCPUProfile, oldWriteHeapProfile, oldSleep := startCPUProfile, stopCPUProfile, writeHeapProfile, sleep + defer func() { + startCPUProfile, stopCPUProfile, writeHeapProfile, sleep = oldStartCPUProfile, oldStopCPUProfile, oldWriteHeapProfile, oldSleep + }() + + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + errFunc := func(io.Writer) error { return errors.New("") } + testDuration := time.Second * 5 + tests := []struct { + profileType pb.ProfileType + duration *time.Duration + startCPUProfileFunc func(io.Writer) error + writeHeapProfileFunc func(io.Writer) error + wantBytes []byte + }{ + { + profileType: pb.ProfileType_CPU, + duration: &testDuration, + startCPUProfileFunc: func(w io.Writer) error { + w.Write([]byte{1}) + return nil + }, + writeHeapProfileFunc: errFunc, + wantBytes: []byte{1}, + }, + { + profileType: pb.ProfileType_CPU, + startCPUProfileFunc: errFunc, + writeHeapProfileFunc: errFunc, + }, + { + profileType: pb.ProfileType_CPU, + duration: &testDuration, + startCPUProfileFunc: func(w io.Writer) error { + w.Write([]byte{2}) + return nil + }, + writeHeapProfileFunc: func(w io.Writer) error { + w.Write([]byte{3}) + return nil + }, + wantBytes: []byte{2}, + }, + { + profileType: pb.ProfileType_HEAP, + startCPUProfileFunc: errFunc, + writeHeapProfileFunc: func(w io.Writer) error { + w.Write([]byte{4}) + return nil + }, + wantBytes: []byte{4}, + }, + { + profileType: pb.ProfileType_HEAP, + startCPUProfileFunc: errFunc, + writeHeapProfileFunc: errFunc, + }, + { + profileType: pb.ProfileType_HEAP, + startCPUProfileFunc: func(w io.Writer) error { + w.Write([]byte{5}) + return nil + }, + writeHeapProfileFunc: func(w io.Writer) error { + w.Write([]byte{6}) + return nil + }, + wantBytes: []byte{6}, + }, + { + profileType: pb.ProfileType_PROFILE_TYPE_UNSPECIFIED, + startCPUProfileFunc: func(w io.Writer) error { + w.Write([]byte{7}) + return nil + }, + writeHeapProfileFunc: func(w io.Writer) error { + w.Write([]byte{8}) + return nil + }, + }, + } + + for _, tt := range tests { + mpc := mocks.NewMockProfilerServiceClient(ctrl) + a := createTestAgent(mpc) + startCPUProfile = tt.startCPUProfileFunc + stopCPUProfile = func() {} + writeHeapProfile = tt.writeHeapProfileFunc + var gotSleep *time.Duration + sleep = func(ctx context.Context, d time.Duration) error { + gotSleep = &d + return nil + } + p := &pb.Profile{ProfileType: tt.profileType} + if tt.duration != nil { + p.Duration = ptypes.DurationProto(*tt.duration) + } + if tt.wantBytes != nil { + wantProfile := &pb.Profile{ + ProfileType: p.ProfileType, + Duration: p.Duration, + ProfileBytes: tt.wantBytes, + Labels: a.profileLabels, + } + wantRequest := pb.UpdateProfileRequest{ + Profile: wantProfile, + } + mpc.EXPECT().UpdateProfile(ctx, gomock.Eq(&wantRequest)).Times(1) + } else { + mpc.EXPECT().UpdateProfile(gomock.Any(), gomock.Any()).MaxTimes(0) + } + + a.profileAndUpload(ctx, p) + + if tt.duration == nil { + if gotSleep != nil { + t.Errorf("profileAndUpload(%v) slept for: %v, want no sleep", p, gotSleep) + } + } else { + if gotSleep == nil { + t.Errorf("profileAndUpload(%v) didn't sleep, want sleep for: %v", p, tt.duration) + } else if *gotSleep != *tt.duration { + t.Errorf("profileAndUpload(%v) slept for wrong duration, got: %v, want: %v", p, gotSleep, tt.duration) + } + } + } +} + +func TestRetry(t *testing.T) { + normalDuration := time.Second * 3 + negativeDuration := time.Second * -3 + + tests := []struct { + trailers map[string]string + wantPause *time.Duration + }{ + { + createTrailers(normalDuration), + &normalDuration, + }, + { + createTrailers(negativeDuration), + nil, + }, + { + map[string]string{retryInfoMetadata: "wrong format"}, + nil, + }, + { + map[string]string{}, + nil, + }, + } + + for _, tt := range tests { + md := grpcmd.New(tt.trailers) + r := &retryer{ + backoff: gax.Backoff{ + Initial: initialBackoff, + Max: maxBackoff, + Multiplier: backoffMultiplier, + }, + md: md, + } + + pause, shouldRetry := r.Retry(status.Error(codes.Aborted, "")) + + if !shouldRetry { + t.Error("retryer.Retry() returned shouldRetry false, want true") + } + + if tt.wantPause != nil { + if pause != *tt.wantPause { + t.Errorf("retryer.Retry() returned wrong pause, got: %v, want: %v", pause, tt.wantPause) + } + } else { + if pause > initialBackoff { + t.Errorf("retryer.Retry() returned wrong pause, got: %v, want: < %v", pause, initialBackoff) + } + } + } + + md := grpcmd.New(map[string]string{}) + + r := &retryer{ + backoff: gax.Backoff{ + Initial: initialBackoff, + Max: maxBackoff, + Multiplier: backoffMultiplier, + }, + md: md, + } + for i := 0; i < 100; i++ { + pause, shouldRetry := r.Retry(errors.New("")) + if !shouldRetry { + t.Errorf("retryer.Retry() called %v times, returned shouldRetry false, want true", i) + } + if pause > maxBackoff { + t.Errorf("retryer.Retry() called %v times, returned wrong pause, got: %v, want: < %v", i, pause, maxBackoff) + } + } +} + +func TestWithXGoogHeader(t *testing.T) { + ctx := withXGoogHeader(context.Background()) + md, _ := grpcmd.FromOutgoingContext(ctx) + + if xg := md[xGoogAPIMetadata]; len(xg) == 0 { + t.Errorf("withXGoogHeader() sets empty xGoogHeader") + } else { + if !strings.Contains(xg[0], "gl-go/") { + t.Errorf("withXGoogHeader() got: %v, want gl-go key", xg[0]) + } + if !strings.Contains(xg[0], "gccl/") { + t.Errorf("withXGoogHeader() got: %v, want gccl key", xg[0]) + } + if !strings.Contains(xg[0], "gax/") { + t.Errorf("withXGoogHeader() got: %v, want gax key", xg[0]) + } + if !strings.Contains(xg[0], "grpc/") { + t.Errorf("withXGoogHeader() got: %v, want grpc key", xg[0]) + } + } +} + +func TestInitializeAgent(t *testing.T) { + oldConfig, oldMutexEnabled := config, mutexEnabled + defer func() { + config, mutexEnabled = oldConfig, oldMutexEnabled + }() + + for _, tt := range []struct { + config Config + enableMutex bool + wantProfileTypes []pb.ProfileType + wantDeploymentLabels map[string]string + wantProfileLabels map[string]string + }{ + { + config: Config{ServiceVersion: testSvcVersion, zone: testZone}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, + wantDeploymentLabels: map[string]string{zoneNameLabel: testZone, versionLabel: testSvcVersion}, + wantProfileLabels: map[string]string{}, + }, + { + config: Config{zone: testZone}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, + wantDeploymentLabels: map[string]string{zoneNameLabel: testZone}, + wantProfileLabels: map[string]string{}, + }, + { + config: Config{ServiceVersion: testSvcVersion}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, + wantDeploymentLabels: map[string]string{versionLabel: testSvcVersion}, + wantProfileLabels: map[string]string{}, + }, + { + config: Config{instance: testInstance}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, + wantDeploymentLabels: map[string]string{}, + wantProfileLabels: map[string]string{instanceLabel: testInstance}, + }, + { + config: Config{instance: testInstance}, + enableMutex: true, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS, pb.ProfileType_CONTENTION}, + wantDeploymentLabels: map[string]string{}, + wantProfileLabels: map[string]string{instanceLabel: testInstance}, + }, + { + config: Config{NoHeapProfiling: true}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_THREADS}, + wantDeploymentLabels: map[string]string{}, + wantProfileLabels: map[string]string{}, + }, + { + config: Config{NoHeapProfiling: true, NoGoroutineProfiling: true}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU}, + wantDeploymentLabels: map[string]string{}, + wantProfileLabels: map[string]string{}, + }, + } { + + config = tt.config + config.ProjectID = testProjectID + config.Target = testTarget + mutexEnabled = tt.enableMutex + a := initializeAgent(nil) + + wantDeployment := &pb.Deployment{ + ProjectId: testProjectID, + Target: testTarget, + Labels: tt.wantDeploymentLabels, + } + if !testutil.Equal(a.deployment, wantDeployment) { + t.Errorf("initializeAgent() got deployment: %v, want %v", a.deployment, wantDeployment) + } + if !testutil.Equal(a.profileLabels, tt.wantProfileLabels) { + t.Errorf("initializeAgent() got profile labels: %v, want %v", a.profileLabels, tt.wantProfileLabels) + } + if !testutil.Equal(a.profileTypes, tt.wantProfileTypes) { + t.Errorf("initializeAgent() got profile types: %v, want %v", a.profileTypes, tt.wantProfileTypes) + } + } +} + +func TestInitializeConfig(t *testing.T) { + oldConfig, oldService, oldVersion, oldEnvProjectID, oldGetProjectID, oldGetInstanceName, oldGetZone, oldOnGCE := config, os.Getenv("GAE_SERVICE"), os.Getenv("GAE_VERSION"), os.Getenv("GOOGLE_CLOUD_PROJECT"), getProjectID, getInstanceName, getZone, onGCE + defer func() { + config, getProjectID, getInstanceName, getZone, onGCE = oldConfig, oldGetProjectID, oldGetInstanceName, oldGetZone, oldOnGCE + if err := os.Setenv("GAE_SERVICE", oldService); err != nil { + t.Fatal(err) + } + if err := os.Setenv("GAE_VERSION", oldVersion); err != nil { + t.Fatal(err) + } + if err := os.Setenv("GOOGLE_CLOUD_PROJECT", oldEnvProjectID); err != nil { + t.Fatal(err) + } + }() + const ( + testGAEService = "test-gae-service" + testGAEVersion = "test-gae-version" + testGCEProjectID = "test-gce-project-id" + testEnvProjectID = "test-env-project-id" + ) + for _, tt := range []struct { + desc string + config Config + wantConfig Config + wantErrorString string + onGAE bool + onGCE bool + envProjectID bool + }{ + { + "accepts service name", + Config{Service: testService}, + Config{Target: testService, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + false, + }, + { + "accepts target name", + Config{Target: testTarget}, + Config{Target: testTarget, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + false, + }, + { + "env project overrides GCE project", + Config{Service: testService}, + Config{Target: testService, ProjectID: testEnvProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + true, + }, + { + "requires service name", + Config{}, + Config{}, + "service name must be specified in the configuration", + false, + true, + false, + }, + { + "accepts service name from config and service version from GAE", + Config{Service: testService}, + Config{Target: testService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + true, + true, + false, + }, + { + "accepts target name from config and service version from GAE", + Config{Target: testTarget}, + Config{Target: testTarget, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + true, + true, + false, + }, + { + "reads both service name and version from GAE env vars", + Config{}, + Config{Target: testGAEService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + true, + true, + false, + }, + { + "accepts service version from config", + Config{Service: testService, ServiceVersion: testSvcVersion}, + Config{Target: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + false, + }, + { + "configured version has priority over GAE-provided version", + Config{Service: testService, ServiceVersion: testSvcVersion}, + Config{Target: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + true, + true, + false, + }, + { + "configured project ID has priority over metadata-provided project ID", + Config{Service: testService, ProjectID: testProjectID}, + Config{Target: testService, ProjectID: testProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + false, + }, + { + "configured project ID has priority over environment project ID", + Config{Service: testService, ProjectID: testProjectID}, + Config{Target: testService, ProjectID: testProjectID}, + "", + false, + false, + true, + }, + { + "requires project ID if not on GCE", + Config{Service: testService}, + Config{Target: testService}, + "project ID must be specified in the configuration if running outside of GCP", + false, + false, + false, + }, + } { + t.Logf("Running test: %s", tt.desc) + envService, envVersion := "", "" + if tt.onGAE { + envService, envVersion = testGAEService, testGAEVersion + } + if err := os.Setenv("GAE_SERVICE", envService); err != nil { + t.Fatal(err) + } + if err := os.Setenv("GAE_VERSION", envVersion); err != nil { + t.Fatal(err) + } + if tt.onGCE { + onGCE = func() bool { return true } + getProjectID = func() (string, error) { return testGCEProjectID, nil } + getZone = func() (string, error) { return testZone, nil } + getInstanceName = func() (string, error) { return testInstance, nil } + } else { + onGCE = func() bool { return false } + getProjectID = func() (string, error) { return "", fmt.Errorf("test get project id error") } + getZone = func() (string, error) { return "", fmt.Errorf("test get zone error") } + getInstanceName = func() (string, error) { return "", fmt.Errorf("test get instance error") } + } + envProjectID := "" + if tt.envProjectID { + envProjectID = testEnvProjectID + } + if err := os.Setenv("GOOGLE_CLOUD_PROJECT", envProjectID); err != nil { + t.Fatal(err) + } + + errorString := "" + if err := initializeConfig(tt.config); err != nil { + errorString = err.Error() + } + + if !strings.Contains(errorString, tt.wantErrorString) { + t.Errorf("initializeConfig(%v) got error: %v, want contain %v", tt.config, errorString, tt.wantErrorString) + } + if tt.wantErrorString == "" { + tt.wantConfig.APIAddr = apiAddress + } + tt.wantConfig.Service = tt.config.Service + if config != tt.wantConfig { + t.Errorf("initializeConfig(%v) got: %v, want %v", tt.config, config, tt.wantConfig) + } + } + + for _, tt := range []struct { + wantErrorString string + getProjectIDError bool + getZoneError bool + getInstanceError bool + }{ + { + wantErrorString: "failed to get the project ID from Compute Engine:", + getProjectIDError: true, + }, + { + wantErrorString: "failed to get zone from Compute Engine:", + getZoneError: true, + }, + { + wantErrorString: "failed to get instance from Compute Engine:", + getInstanceError: true, + }, + } { + onGCE = func() bool { return true } + if tt.getProjectIDError { + getProjectID = func() (string, error) { return "", fmt.Errorf("test get project ID error") } + } else { + getProjectID = func() (string, error) { return testGCEProjectID, nil } + } + + if tt.getZoneError { + getZone = func() (string, error) { return "", fmt.Errorf("test get zone error") } + } else { + getZone = func() (string, error) { return testZone, nil } + } + + if tt.getInstanceError { + getInstanceName = func() (string, error) { return "", fmt.Errorf("test get instance error") } + } else { + getInstanceName = func() (string, error) { return testInstance, nil } + } + errorString := "" + if err := initializeConfig(Config{Service: testService}); err != nil { + errorString = err.Error() + } + + if !strings.Contains(errorString, tt.wantErrorString) { + t.Errorf("initializeConfig() got error: %v, want contain %v", errorString, tt.wantErrorString) + } + } +} + +type fakeProfilerServer struct { + pb.ProfilerServiceServer + count int + gotProfiles map[string][]byte + done chan bool +} + +func (fs *fakeProfilerServer) CreateProfile(ctx context.Context, in *pb.CreateProfileRequest) (*pb.Profile, error) { + fs.count++ + switch fs.count { + case 1: + return &pb.Profile{Name: "testCPU", ProfileType: pb.ProfileType_CPU, Duration: ptypes.DurationProto(testProfileDuration)}, nil + case 2: + return &pb.Profile{Name: "testHeap", ProfileType: pb.ProfileType_HEAP}, nil + default: + select {} + } +} + +func (fs *fakeProfilerServer) UpdateProfile(ctx context.Context, in *pb.UpdateProfileRequest) (*pb.Profile, error) { + switch in.Profile.ProfileType { + case pb.ProfileType_CPU: + fs.gotProfiles["CPU"] = in.Profile.ProfileBytes + case pb.ProfileType_HEAP: + fs.gotProfiles["HEAP"] = in.Profile.ProfileBytes + fs.done <- true + } + + return in.Profile, nil +} + +func profileeLoop(quit chan bool) { + for { + select { + case <-quit: + return + default: + profileeWork() + } + } +} + +func profileeWork() { + data := make([]byte, 1024*1024) + rand.Read(data) + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write(data); err != nil { + log.Println("failed to write to gzip stream", err) + return + } + if err := gz.Flush(); err != nil { + log.Println("failed to flush to gzip stream", err) + return + } + if err := gz.Close(); err != nil { + log.Println("failed to close gzip stream", err) + } +} + +func validateProfile(rawData []byte, wantFunctionName string) error { + p, err := profile.ParseData(rawData) + if err != nil { + return fmt.Errorf("ParseData failed: %v", err) + } + + if len(p.Sample) == 0 { + return fmt.Errorf("profile contains zero samples: %v", p) + } + + if len(p.Location) == 0 { + return fmt.Errorf("profile contains zero locations: %v", p) + } + + if len(p.Function) == 0 { + return fmt.Errorf("profile contains zero functions: %v", p) + } + + for _, l := range p.Location { + if len(l.Line) > 0 && l.Line[0].Function != nil && strings.Contains(l.Line[0].Function.Name, wantFunctionName) { + return nil + } + } + return fmt.Errorf("wanted function name %s not found in the profile", wantFunctionName) +} + +func TestDeltaMutexProfile(t *testing.T) { + oldMutexEnabled, oldMaxProcs := mutexEnabled, runtime.GOMAXPROCS(10) + defer func() { + mutexEnabled = oldMutexEnabled + runtime.GOMAXPROCS(oldMaxProcs) + }() + if mutexEnabled = enableMutexProfiling(); !mutexEnabled { + t.Skip("Go too old - mutex profiling not supported.") + } + + hog(time.Second, mutexHog) + go func() { + hog(2*time.Second, backgroundHog) + }() + + var prof bytes.Buffer + if err := deltaMutexProfile(context.Background(), time.Second, &prof); err != nil { + t.Fatalf("deltaMutexProfile() got error: %v", err) + } + p, err := profile.Parse(&prof) + if err != nil { + t.Fatalf("profile.Parse() got error: %v", err) + } + + if s := sum(p, "mutexHog"); s != 0 { + t.Errorf("mutexHog found in the delta mutex profile (sum=%d):\n%s", s, p) + } + if s := sum(p, "backgroundHog"); s <= 0 { + t.Errorf("backgroundHog not in the delta mutex profile (sum=%d):\n%s", s, p) + } +} + +// sum returns the sum of all mutex counts from the samples whose +// stacks include the specified function name. +func sum(p *profile.Profile, fname string) int64 { + locIDs := map[*profile.Location]bool{} + for _, loc := range p.Location { + for _, l := range loc.Line { + if strings.Contains(l.Function.Name, fname) { + locIDs[loc] = true + break + } + } + } + var s int64 + for _, sample := range p.Sample { + for _, loc := range sample.Location { + if locIDs[loc] { + s += sample.Value[0] + break + } + } + } + return s +} + +func mutexHog(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) { + for time.Since(start) < dt { + mu1.Lock() + runtime.Gosched() + mu2.Lock() + mu1.Unlock() + mu2.Unlock() + } +} + +// backgroundHog is identical to mutexHog. We keep them separate +// in order to distinguish them with function names in the stack trace. +func backgroundHog(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) { + for time.Since(start) < dt { + mu1.Lock() + runtime.Gosched() + mu2.Lock() + mu1.Unlock() + mu2.Unlock() + } +} + +func hog(dt time.Duration, hogger func(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration)) { + start := time.Now() + mu1 := new(sync.Mutex) + mu2 := new(sync.Mutex) + var wg sync.WaitGroup + wg.Add(10) + for i := 0; i < 10; i++ { + go func() { + defer wg.Done() + hogger(mu1, mu2, start, dt) + }() + } + wg.Wait() +} + +func TestAgentWithServer(t *testing.T) { + oldDialGRPC, oldConfig := dialGRPC, config + defer func() { + dialGRPC, config = oldDialGRPC, oldConfig + }() + + srv, err := testutil.NewServer() + if err != nil { + t.Fatalf("testutil.NewServer(): %v", err) + } + fakeServer := &fakeProfilerServer{gotProfiles: map[string][]byte{}, done: make(chan bool)} + pb.RegisterProfilerServiceServer(srv.Gsrv, fakeServer) + + srv.Start() + + dialGRPC = gtransport.DialInsecure + if err := Start(Config{ + Target: testTarget, + ProjectID: testProjectID, + APIAddr: srv.Addr, + instance: testInstance, + zone: testZone, + }); err != nil { + t.Fatalf("Start(): %v", err) + } + + quitProfilee := make(chan bool) + go profileeLoop(quitProfilee) + + select { + case <-fakeServer.done: + case <-time.After(testServerTimeout): + t.Errorf("got timeout after %v, want fake server done", testServerTimeout) + } + quitProfilee <- true + + for _, pType := range []string{"CPU", "HEAP"} { + if profile, ok := fakeServer.gotProfiles[pType]; !ok { + t.Errorf("fakeServer.gotProfiles[%s] got no profile, want profile", pType) + } else if err := validateProfile(profile, "profilee"); err != nil { + t.Errorf("validateProfile(%s) got error: %v", pType, err) + } + } +} diff --git a/vendor/cloud.google.com/go/profiler/proftest/proftest.go b/vendor/cloud.google.com/go/profiler/proftest/proftest.go new file mode 100644 index 0000000..21ff0de --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/proftest/proftest.go @@ -0,0 +1,501 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Package proftest contains test helpers for profiler agent integration tests. +// This package is experimental. + +// golang.org/x/build/kubernetes/dialer.go imports "context" package (rather +// than "golang.org/x/net/context") and that does not exist in Go 1.6 or +// earlier. +// +build go1.7 + +package proftest + +import ( + "archive/zip" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "cloud.google.com/go/storage" + "golang.org/x/build/kubernetes" + k8sapi "golang.org/x/build/kubernetes/api" + "golang.org/x/build/kubernetes/gke" + "golang.org/x/net/context" + cloudbuild "google.golang.org/api/cloudbuild/v1" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + "google.golang.org/api/googleapi" +) + +const ( + monitorWriteScope = "https://www.googleapis.com/auth/monitoring.write" + storageReadScope = "https://www.googleapis.com/auth/devstorage.read_only" +) + +// TestRunner has common elements used for testing profiling agents on a range +// of environments. +type TestRunner struct { + Client *http.Client +} + +// GCETestRunner supports testing a profiling agent on GCE. +type GCETestRunner struct { + TestRunner + ComputeService *compute.Service +} + +// GKETestRunner supports testing a profiling agent on GKE. +type GKETestRunner struct { + TestRunner + ContainerService *container.Service + StorageClient *storage.Client + Dockerfile string +} + +// ProfileResponse contains the response produced when querying profile server. +type ProfileResponse struct { + Profile ProfileData `json:"profile"` + NumProfiles int32 `json:"numProfiles"` + Deployments []interface{} `json:"deployments"` +} + +// ProfileData has data of a single profile. +type ProfileData struct { + Samples []int32 `json:"samples"` + SampleMetrics interface{} `json:"sampleMetrics"` + DefaultMetricType string `json:"defaultMetricType"` + TreeNodes interface{} `json:"treeNodes"` + Functions functionArray `json:"functions"` + SourceFiles interface{} `json:"sourceFiles"` +} + +type functionArray struct { + Name []string `json:"name"` + Sourcefile []int32 `json:"sourceFile"` +} + +// InstanceConfig is configuration for starting single GCE instance for +// profiling agent test case. +type InstanceConfig struct { + ProjectID string + Zone string + Name string + StartupScript string + MachineType string +} + +// ClusterConfig is configuration for starting single GKE cluster for profiling +// agent test case. +type ClusterConfig struct { + ProjectID string + Zone string + ClusterName string + PodName string + ImageSourceName string + ImageName string + Bucket string + Dockerfile string +} + +// HasFunction returns nil if the function is present, or, if the function is +// not present, and error providing more details why the function is not +// present. +func (pr *ProfileResponse) HasFunction(functionName string) error { + if pr.NumProfiles == 0 { + return fmt.Errorf("failed to find function name %s in profile: profile response contains zero profiles: %v", functionName, pr) + } + if len(pr.Deployments) == 0 { + return fmt.Errorf("failed to find function name %s in profile: profile response contains zero deployments: %v", functionName, pr) + } + if len(pr.Profile.Functions.Name) == 0 { + return fmt.Errorf("failed to find function name %s in profile: profile does not have function data", functionName) + } + + for _, name := range pr.Profile.Functions.Name { + if strings.Contains(name, functionName) { + return nil + } + } + return fmt.Errorf("failed to find function name %s in profile", functionName) +} + +// StartInstance starts a GCE Instance with name, zone, and projectId specified +// by the inst, and which runs the startup script specified in inst. +func (tr *GCETestRunner) StartInstance(ctx context.Context, inst *InstanceConfig) error { + img, err := tr.ComputeService.Images.GetFromFamily("debian-cloud", "debian-9").Context(ctx).Do() + if err != nil { + return err + } + + _, err = tr.ComputeService.Instances.Insert(inst.ProjectID, inst.Zone, &compute.Instance{ + MachineType: fmt.Sprintf("zones/%s/machineTypes/%s", inst.Zone, inst.MachineType), + Name: inst.Name, + Disks: []*compute.AttachedDisk{{ + AutoDelete: true, // delete the disk when the VM is deleted. + Boot: true, + Type: "PERSISTENT", + Mode: "READ_WRITE", + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: img.SelfLink, + DiskType: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/pd-standard", inst.ProjectID, inst.Zone), + }, + }}, + NetworkInterfaces: []*compute.NetworkInterface{{ + Network: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/default", inst.ProjectID), + AccessConfigs: []*compute.AccessConfig{{ + Name: "External NAT", + }}, + }}, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{{ + Key: "startup-script", + Value: googleapi.String(inst.StartupScript), + }}, + }, + ServiceAccounts: []*compute.ServiceAccount{{ + Email: "default", + Scopes: []string{ + monitorWriteScope, + }, + }}, + }).Do() + + return err +} + +// DeleteInstance deletes an instance with project id, name, and zone matched +// by inst. +func (tr *GCETestRunner) DeleteInstance(ctx context.Context, inst *InstanceConfig) error { + if _, err := tr.ComputeService.Instances.Delete(inst.ProjectID, inst.Zone, inst.Name).Context(ctx).Do(); err != nil { + return fmt.Errorf("Instances.Delete(%s) got error: %v", inst.Name, err) + } + return nil +} + +// PollForSerialOutput polls serial port 2 of the GCE instance specified by +// inst and returns when the finishString appears in the serial output +// of the instance, or when the context times out. +func (tr *GCETestRunner) PollForSerialOutput(ctx context.Context, inst *InstanceConfig, finishString string) error { + var output string + defer func() { + log.Printf("Serial port output for %s:\n%s", inst.Name, output) + }() + + for { + select { + case <-ctx.Done(): + case <-time.After(20 * time.Second): + resp, err := tr.ComputeService.Instances.GetSerialPortOutput(inst.ProjectID, inst.Zone, inst.Name).Port(2).Context(ctx).Do() + if err != nil { + // Transient failure. + log.Printf("Transient error getting serial port output from instance %s (will retry): %v", inst.Name, err) + continue + } + + if output = resp.Contents; strings.Contains(output, finishString) { + return nil + } + } + } +} + +// QueryProfiles retrieves profiles of a specific type, from a specific time +// range, associated with a particular service and project. +func (tr *TestRunner) QueryProfiles(projectID, service, startTime, endTime, profileType string) (ProfileResponse, error) { + queryURL := fmt.Sprintf("https://cloudprofiler.googleapis.com/v2/projects/%s/profiles:query", projectID) + const queryJSONFmt = `{"endTime": "%s", "profileType": "%s","startTime": "%s", "target": "%s"}` + + queryRequest := fmt.Sprintf(queryJSONFmt, endTime, profileType, startTime, service) + + resp, err := tr.Client.Post(queryURL, "application/json", strings.NewReader(queryRequest)) + if err != nil { + return ProfileResponse{}, fmt.Errorf("failed to query API: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return ProfileResponse{}, fmt.Errorf("failed to read response body: %v", err) + } + + var pr ProfileResponse + if err := json.Unmarshal(body, &pr); err != nil { + return ProfileResponse{}, err + } + + return pr, nil +} + +// createAndPublishDockerImage creates a docker image from source code in a GCS +// bucket and pushes the image to Google Container Registry. +func (tr *GKETestRunner) createAndPublishDockerImage(ctx context.Context, projectID, sourceBucket, sourceObject, ImageName string) error { + cloudbuildService, err := cloudbuild.New(tr.Client) + + build := &cloudbuild.Build{ + Source: &cloudbuild.Source{ + StorageSource: &cloudbuild.StorageSource{ + Bucket: sourceBucket, + Object: sourceObject, + }, + }, + Steps: []*cloudbuild.BuildStep{ + { + Name: "gcr.io/cloud-builders/docker", + Args: []string{"build", "-t", ImageName, "."}, + }, + }, + Images: []string{ImageName}, + } + + op, err := cloudbuildService.Projects.Builds.Create(projectID, build).Context(ctx).Do() + if err != nil { + return fmt.Errorf("failed to create image: %v", err) + } + opID := op.Name + + // Wait for creating image. + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting creating image") + + case <-time.After(10 * time.Second): + op, err := cloudbuildService.Operations.Get(opID).Context(ctx).Do() + if err != nil { + log.Printf("Transient error getting operation (will retry): %v", err) + break + } + if op.Done == true { + log.Printf("Published image %s to Google Container Registry.", ImageName) + return nil + } + } + } +} + +type imageResponse struct { + Manifest map[string]interface{} `json:"manifest"` + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// deleteDockerImage deletes a docker image from Google Container Registry. +func (tr *GKETestRunner) deleteDockerImage(ctx context.Context, ImageName string) []error { + queryImageURL := fmt.Sprintf("https://gcr.io/v2/%s/tags/list", ImageName) + resp, err := tr.Client.Get(queryImageURL) + if err != nil { + return []error{fmt.Errorf("failed to list tags: %v", err)} + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return []error{err} + } + var ir imageResponse + if err := json.Unmarshal(body, &ir); err != nil { + return []error{err} + } + + const deleteImageURLFmt = "https://gcr.io/v2/%s/manifests/%s" + var errs []error + for _, tag := range ir.Tags { + if err := deleteDockerImageResource(tr.Client, fmt.Sprintf(deleteImageURLFmt, ImageName, tag)); err != nil { + errs = append(errs, fmt.Errorf("failed to delete tag %s: %v", tag, err)) + } + } + + for manifest := range ir.Manifest { + if err := deleteDockerImageResource(tr.Client, fmt.Sprintf(deleteImageURLFmt, ImageName, manifest)); err != nil { + errs = append(errs, fmt.Errorf("failed to delete manifest %s: %v", manifest, err)) + } + } + return errs +} + +func deleteDockerImageResource(client *http.Client, url string) error { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return fmt.Errorf("failed to get request: %v", err) + } + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("failed to delete resource: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { + return fmt.Errorf("failed to delete resource: status code = %d", resp.StatusCode) + } + return nil +} + +func (tr *GKETestRunner) createCluster(ctx context.Context, client *http.Client, projectID, zone, ClusterName string) error { + request := &container.CreateClusterRequest{Cluster: &container.Cluster{ + Name: ClusterName, + InitialNodeCount: 3, + NodeConfig: &container.NodeConfig{ + OauthScopes: []string{ + storageReadScope, + }, + }, + }} + op, err := tr.ContainerService.Projects.Zones.Clusters.Create(projectID, zone, request).Context(ctx).Do() + if err != nil { + return fmt.Errorf("failed to create cluster %s: %v", ClusterName, err) + } + opID := op.Name + + // Wait for creating cluster. + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting creating cluster") + + case <-time.After(10 * time.Second): + op, err := tr.ContainerService.Projects.Zones.Operations.Get(projectID, zone, opID).Context(ctx).Do() + if err != nil { + log.Printf("Transient error getting operation (will retry): %v", err) + break + } + if op.Status == "DONE" { + log.Printf("Created cluster %s.", ClusterName) + return nil + } + if op.Status == "ABORTING" { + return fmt.Errorf("create cluster operation is aborted") + } + } + } +} + +func (tr *GKETestRunner) deployContainer(ctx context.Context, kubernetesClient *kubernetes.Client, podName, ImageName string) error { + pod := &k8sapi.Pod{ + ObjectMeta: k8sapi.ObjectMeta{ + Name: podName, + }, + Spec: k8sapi.PodSpec{ + Containers: []k8sapi.Container{ + { + Name: "profiler-test", + Image: fmt.Sprintf("gcr.io/%s:latest", ImageName), + }, + }, + }, + } + if _, err := kubernetesClient.RunLongLivedPod(ctx, pod); err != nil { + return fmt.Errorf("failed to run pod %s: %v", podName, err) + } + return nil +} + +// PollPodLog polls the log of the kubernetes client and returns when the +// finishString appears in the log, or when the context times out. +func (tr *GKETestRunner) PollPodLog(ctx context.Context, kubernetesClient *kubernetes.Client, podName, finishString string) error { + var output string + defer func() { + log.Printf("Log for pod %s:\n%s", podName, output) + }() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting profiling finishing on container") + + case <-time.After(20 * time.Second): + var err error + output, err = kubernetesClient.PodLog(ctx, podName) + if err != nil { + // Transient failure. + log.Printf("Transient error getting log (will retry): %v", err) + continue + } + if strings.Contains(output, finishString) { + return nil + } + } + } +} + +// DeleteClusterAndImage deletes cluster and images used to create cluster. +func (tr *GKETestRunner) DeleteClusterAndImage(ctx context.Context, cfg *ClusterConfig) []error { + var errs []error + if err := tr.StorageClient.Bucket(cfg.Bucket).Object(cfg.ImageSourceName).Delete(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to delete storage client: %v", err)) + } + for _, err := range tr.deleteDockerImage(ctx, cfg.ImageName) { + errs = append(errs, fmt.Errorf("failed to delete docker image: %v", err)) + } + if _, err := tr.ContainerService.Projects.Zones.Clusters.Delete(cfg.ProjectID, cfg.Zone, cfg.ClusterName).Context(ctx).Do(); err != nil { + errs = append(errs, fmt.Errorf("failed to delete cluster %s: %v", cfg.ClusterName, err)) + } + + return errs +} + +// StartAndDeployCluster creates image needed for cluster, then starts and +// deploys to cluster. +func (tr *GKETestRunner) StartAndDeployCluster(ctx context.Context, cfg *ClusterConfig) error { + if err := tr.uploadImageSource(ctx, cfg.Bucket, cfg.ImageSourceName, cfg.Dockerfile); err != nil { + return fmt.Errorf("failed to upload image source: %v", err) + } + + createImageCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if err := tr.createAndPublishDockerImage(createImageCtx, cfg.ProjectID, cfg.Bucket, cfg.ImageSourceName, fmt.Sprintf("gcr.io/%s", cfg.ImageName)); err != nil { + return fmt.Errorf("failed to create and publish docker image %s: %v", cfg.ImageName, err) + } + + kubernetesClient, err := gke.NewClient(ctx, cfg.ClusterName, gke.OptZone(cfg.Zone), gke.OptProject(cfg.ProjectID)) + if err != nil { + return fmt.Errorf("failed to create new GKE client: %v", err) + } + + deployContainerCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if err := tr.deployContainer(deployContainerCtx, kubernetesClient, cfg.PodName, cfg.ImageName); err != nil { + return fmt.Errorf("failed to deploy image %q to pod %q: %v", cfg.PodName, cfg.ImageName, err) + } + return nil +} + +// uploadImageSource uploads source code for building docker image to GCS. +func (tr *GKETestRunner) uploadImageSource(ctx context.Context, bucket, objectName, dockerfile string) error { + zipBuf := new(bytes.Buffer) + z := zip.NewWriter(zipBuf) + f, err := z.Create("Dockerfile") + if err != nil { + return err + } + + if _, err := f.Write([]byte(dockerfile)); err != nil { + return err + } + + if err := z.Close(); err != nil { + return err + } + wc := tr.StorageClient.Bucket(bucket).Object(objectName).NewWriter(ctx) + wc.ContentType = "application/zip" + wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} + if _, err := wc.Write(zipBuf.Bytes()); err != nil { + return err + } + return wc.Close() +} diff --git a/vendor/cloud.google.com/go/profiler/symbolizer.go b/vendor/cloud.google.com/go/profiler/symbolizer.go new file mode 100644 index 0000000..15e0b45 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/symbolizer.go @@ -0,0 +1,143 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profiler + +import ( + "bytes" + "regexp" + "runtime" + "strings" + + "github.com/google/pprof/profile" +) + +var shouldAssumeSymbolized = isSymbolizedGoVersion(runtime.Version()) + +type function interface { + Name() string + FileLine(pc uintptr) (string, int) +} + +// funcForPC is a wrapper for runtime.FuncForPC. Defined as var for testing. +var funcForPC = func(pc uintptr) function { + if f := runtime.FuncForPC(pc); f != nil { + return f + } + return nil +} + +// parseAndSymbolize parses a profile from a buffer, symbolizes it +// if it's not yet symbolized, and writes the profile back as a +// gzip-compressed marshaled protobuf. +func parseAndSymbolize(data *bytes.Buffer) error { + p, err := profile.ParseData(data.Bytes()) + if err != nil { + return err + } + + // Do nothing if the profile is already symbolized. + if symbolized(p) { + return nil + } + // Clear the profile functions to avoid creating duplicates. + p.Function = nil + symbolize(p) + data.Reset() + return p.Write(data) +} + +// isSymbolizedGoVersion returns true if Go version equals to or is +// higher than Go 1.9. Starting Go 1.9 the profiles are symbolized +// by runtime/pprof. +func isSymbolizedGoVersion(goVersion string) bool { + r, err := regexp.Compile(`go(1\.9|1\.[1-9][0-9]|[2-9]).*`) + if err == nil && r.MatchString(goVersion) { + return true + } + return false +} + +// symbolized checks if all locations have symbolized function +// information. +func symbolized(p *profile.Profile) bool { + for _, l := range p.Location { + if len(l.Line) == 0 || l.Line[0].Function == nil { + return false + } + } + return true +} + +func symbolize(p *profile.Profile) { + fns := profileFunctionMap{} + for _, l := range p.Location { + pc := uintptr(l.Address) + f := funcForPC(pc) + if f == nil { + continue + } + file, lineno := f.FileLine(pc) + l.Line = []profile.Line{ + { + Function: fns.findOrAddFunction(f.Name(), file, p), + Line: int64(lineno), + }, + } + } + // Trim runtime functions. Always hide runtime.goexit. Other runtime + // functions are only hidden for heap profile when they appear at the beginning. + isHeapProfile := p.PeriodType != nil && p.PeriodType.Type == "space" + for _, s := range p.Sample { + show := !isHeapProfile + var i int + for _, l := range s.Location { + if len(l.Line) > 0 && l.Line[0].Function != nil { + name := l.Line[0].Function.Name + if name == "runtime.goexit" || !show && strings.HasPrefix(name, "runtime.") { + continue + } + } + show = true + s.Location[i] = l + i++ + } + // If all locations of a sample are trimmed, keep the root location. + if i == 0 && len(s.Location) > 0 { + s.Location[0] = s.Location[len(s.Location)-1] + i = 1 + } + s.Location = s.Location[:i] + } +} + +type profileFunctionMap map[profile.Function]*profile.Function + +func (fns profileFunctionMap) findOrAddFunction(name, filename string, p *profile.Profile) *profile.Function { + f := profile.Function{ + Name: name, + SystemName: name, + Filename: filename, + } + if fp := fns[f]; fp != nil { + return fp + } + fp := new(profile.Function) + fns[f] = fp + + *fp = f + fp.ID = uint64(len(p.Function) + 1) + p.Function = append(p.Function, fp) + return fp +} diff --git a/vendor/cloud.google.com/go/profiler/symbolizer_test.go b/vendor/cloud.google.com/go/profiler/symbolizer_test.go new file mode 100644 index 0000000..779f7f3 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/symbolizer_test.go @@ -0,0 +1,229 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profiler + +import ( + "bytes" + "testing" + + "cloud.google.com/go/internal/testutil" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/pprof/profile" +) + +type fakeFunc struct { + name string + file string + lineno int +} + +func (f *fakeFunc) Name() string { + return f.name +} +func (f *fakeFunc) FileLine(_ uintptr) (string, int) { + return f.file, f.lineno +} + +var cmpOpt = cmpopts.IgnoreUnexported(profile.Profile{}, profile.Function{}, + profile.Line{}, profile.Location{}, profile.Sample{}, profile.ValueType{}) + +// TestRuntimeFunctionTrimming tests if symbolize trims runtime functions as intended. +func TestRuntimeFunctionTrimming(t *testing.T) { + fakeFuncMap := map[uintptr]*fakeFunc{ + 0x10: &fakeFunc{"runtime.goexit", "runtime.go", 10}, + 0x20: &fakeFunc{"runtime.other", "runtime.go", 20}, + 0x30: &fakeFunc{"foo", "foo.go", 30}, + 0x40: &fakeFunc{"bar", "bar.go", 40}, + } + backupFuncForPC := funcForPC + funcForPC = func(pc uintptr) function { + return fakeFuncMap[pc] + } + defer func() { + funcForPC = backupFuncForPC + }() + testLoc := []*profile.Location{ + {ID: 1, Address: 0x10}, + {ID: 2, Address: 0x20}, + {ID: 3, Address: 0x30}, + {ID: 4, Address: 0x40}, + } + testProfile := &profile.Profile{ + Sample: []*profile.Sample{ + {Location: []*profile.Location{testLoc[0], testLoc[1], testLoc[3], testLoc[2]}}, + {Location: []*profile.Location{testLoc[1], testLoc[3], testLoc[2]}}, + {Location: []*profile.Location{testLoc[3], testLoc[2], testLoc[1]}}, + {Location: []*profile.Location{testLoc[3], testLoc[2], testLoc[0]}}, + {Location: []*profile.Location{testLoc[0], testLoc[1], testLoc[3], testLoc[0]}}, + {Location: []*profile.Location{testLoc[1], testLoc[0]}}, + }, + Location: testLoc, + } + testProfiles := make([]*profile.Profile, 2) + testProfiles[0] = testProfile.Copy() + testProfiles[1] = testProfile.Copy() + // Test case for CPU profile. + testProfiles[0].PeriodType = &profile.ValueType{Type: "cpu", Unit: "nanoseconds"} + // Test case for heap profile. + testProfiles[1].PeriodType = &profile.ValueType{Type: "space", Unit: "bytes"} + wantFunc := []*profile.Function{ + {ID: 1, Name: "runtime.goexit", SystemName: "runtime.goexit", Filename: "runtime.go"}, + {ID: 2, Name: "runtime.other", SystemName: "runtime.other", Filename: "runtime.go"}, + {ID: 3, Name: "foo", SystemName: "foo", Filename: "foo.go"}, + {ID: 4, Name: "bar", SystemName: "bar", Filename: "bar.go"}, + } + wantLoc := []*profile.Location{ + {ID: 1, Address: 0x10, Line: []profile.Line{{Function: wantFunc[0], Line: 10}}}, + {ID: 2, Address: 0x20, Line: []profile.Line{{Function: wantFunc[1], Line: 20}}}, + {ID: 3, Address: 0x30, Line: []profile.Line{{Function: wantFunc[2], Line: 30}}}, + {ID: 4, Address: 0x40, Line: []profile.Line{{Function: wantFunc[3], Line: 40}}}, + } + wantProfiles := []*profile.Profile{ + { + PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + Sample: []*profile.Sample{ + {Location: []*profile.Location{wantLoc[1], wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[1], wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2], wantLoc[1]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[1], wantLoc[3]}}, + {Location: []*profile.Location{wantLoc[1]}}, + }, + Location: wantLoc, + Function: wantFunc, + }, + { + PeriodType: &profile.ValueType{Type: "space", Unit: "bytes"}, + Sample: []*profile.Sample{ + {Location: []*profile.Location{wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2], wantLoc[1]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[3]}}, + {Location: []*profile.Location{wantLoc[0]}}, + }, + Location: wantLoc, + Function: wantFunc, + }, + } + for i := 0; i < 2; i++ { + symbolize(testProfiles[i]) + if !testutil.Equal(testProfiles[i], wantProfiles[i], cmpOpt) { + t.Errorf("incorrect trimming (testcase = %d): got {%v}, want {%v}", i, testProfiles[i], wantProfiles[i]) + } + } +} + +// TestParseAndSymbolize tests if parseAndSymbolize parses and symbolizes +// profiles as intended. +func TestParseAndSymbolize(t *testing.T) { + fakeFuncMap := map[uintptr]*fakeFunc{ + 0x10: &fakeFunc{"foo", "foo.go", 10}, + 0x20: &fakeFunc{"bar", "bar.go", 20}, + } + backupFuncForPC := funcForPC + funcForPC = func(pc uintptr) function { + return fakeFuncMap[pc] + } + defer func() { + funcForPC = backupFuncForPC + }() + + testLoc := []*profile.Location{ + {ID: 1, Address: 0x10}, + {ID: 2, Address: 0x20}, + } + testProfile := &profile.Profile{ + SampleType: []*profile.ValueType{ + &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + }, + PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + Sample: []*profile.Sample{ + {Location: []*profile.Location{testLoc[0], testLoc[1]}, Value: []int64{1}}, + {Location: []*profile.Location{testLoc[1]}, Value: []int64{1}}, + }, + Location: testLoc, + } + testProfiles := make([]*profile.Profile, 2) + testProfiles[0] = testProfile.Copy() + testProfiles[1] = testProfile.Copy() + + wantFunc := []*profile.Function{ + {ID: 1, Name: "foo", SystemName: "foo", Filename: "foo.go"}, + {ID: 2, Name: "bar", SystemName: "bar", Filename: "bar.go"}, + } + wantLoc := []*profile.Location{ + {ID: 1, Address: 0x10, Line: []profile.Line{{Function: wantFunc[0], Line: 10}}}, + {ID: 2, Address: 0x20, Line: []profile.Line{{Function: wantFunc[1], Line: 20}}}, + } + wantProfile := &profile.Profile{ + SampleType: []*profile.ValueType{ + &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + }, + PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + Sample: []*profile.Sample{ + {Location: []*profile.Location{wantLoc[0], wantLoc[1]}, Value: []int64{1}}, + {Location: []*profile.Location{wantLoc[1]}, Value: []int64{1}}, + }, + Location: wantLoc, + Function: wantFunc, + } + + // Profile already symbolized. + testProfiles[1].Location = []*profile.Location{ + {ID: 1, Address: 0x10, Line: []profile.Line{{Function: wantFunc[0], Line: 10}}}, + {ID: 2, Address: 0x20, Line: []profile.Line{{Function: wantFunc[1], Line: 20}}}, + } + testProfiles[1].Function = []*profile.Function{ + {ID: 1, Name: "foo", SystemName: "foo", Filename: "foo.go"}, + {ID: 2, Name: "bar", SystemName: "bar", Filename: "bar.go"}, + } + for i := 0; i < 2; i++ { + var prof bytes.Buffer + testProfiles[i].Write(&prof) + + parseAndSymbolize(&prof) + gotProfile, err := profile.ParseData(prof.Bytes()) + if err != nil { + t.Errorf("parsing symbolized profile (testcase = %d) got err: %v, want no error", i, err) + } + if !testutil.Equal(gotProfile, wantProfile, cmpOpt) { + t.Errorf("incorrect symbolization (testcase = %d): got {%v}, want {%v}", i, gotProfile, wantProfile) + } + } +} + +func TestIsSymbolizedGoVersion(t *testing.T) { + for _, tc := range []struct { + input string + want bool + }{ + {"go1.9beta2", true}, + {"go1.9", true}, + {"go1.9.1", true}, + {"go1.10", true}, + {"go1.10.1", true}, + {"go2.0", true}, + {"go3.1", true}, + {"go1.8", false}, + {"go1.8.1", false}, + {"go1.7", false}, + {"devel ", false}, + } { + if got := isSymbolizedGoVersion(tc.input); got != tc.want { + t.Errorf("isSymbolizedGoVersion(%v) got %v, want %v", tc.input, got, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/ListTopics_smoke_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/ListTopics_smoke_test.go new file mode 100644 index 0000000..bece095 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/ListTopics_smoke_test.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestPublisherSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewPublisherClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var formattedProject string = fmt.Sprintf("projects/%s", projectId) + var request = &pubsubpb.ListTopicsRequest{ + Project: formattedProject, + } + + iter := c.ListTopics(ctx, request) + if _, err := iter.Next(); err != nil && err != iterator.Done { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/README.md b/vendor/cloud.google.com/go/pubsub/apiv1/README.md new file mode 100644 index 0000000..b5967ab --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/README.md @@ -0,0 +1,9 @@ +Auto-generated pubsub v1 clients +================================= + +This package includes auto-generated clients for the pubsub v1 API. + +Use the handwritten client (in the parent directory, +cloud.google.com/go/pubsub) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go new file mode 100644 index 0000000..ccdce27 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package pubsub is an auto-generated package for the +// Google Cloud Pub/Sub API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Provides reliable, many-to-many, asynchronous messaging between +// applications. +// +// Use the client at cloud.google.com/go/pubsub in preference to this. +package pubsub // import "cloud.google.com/go/pubsub/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub", + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go new file mode 100644 index 0000000..6eaad6d --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go @@ -0,0 +1,1878 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + iampb "google.golang.org/genproto/googleapis/iam/v1" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockPublisherServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + pubsubpb.PublisherServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockPublisherServer) CreateTopic(ctx context.Context, req *pubsubpb.Topic) (*pubsubpb.Topic, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Topic), nil +} + +func (s *mockPublisherServer) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest) (*pubsubpb.Topic, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Topic), nil +} + +func (s *mockPublisherServer) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.PublishResponse), nil +} + +func (s *mockPublisherServer) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest) (*pubsubpb.Topic, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Topic), nil +} + +func (s *mockPublisherServer) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest) (*pubsubpb.ListTopicsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListTopicsResponse), nil +} + +func (s *mockPublisherServer) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest) (*pubsubpb.ListTopicSubscriptionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListTopicSubscriptionsResponse), nil +} + +func (s *mockPublisherServer) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +type mockIamPolicyServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + iampb.IAMPolicyServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockIamPolicyServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamPolicyServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamPolicyServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +type mockSubscriberServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + pubsubpb.SubscriberServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSubscriberServer) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription) (*pubsubpb.Subscription, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Subscription), nil +} + +func (s *mockSubscriberServer) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest) (*pubsubpb.Subscription, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Subscription), nil +} + +func (s *mockSubscriberServer) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest) (*pubsubpb.Subscription, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Subscription), nil +} + +func (s *mockSubscriberServer) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest) (*pubsubpb.ListSubscriptionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListSubscriptionsResponse), nil +} + +func (s *mockSubscriberServer) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) Pull(ctx context.Context, req *pubsubpb.PullRequest) (*pubsubpb.PullResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.PullResponse), nil +} + +func (s *mockSubscriberServer) StreamingPull(stream pubsubpb.Subscriber_StreamingPullServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*pubsubpb.StreamingPullResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockSubscriberServer) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest) (*pubsubpb.ListSnapshotsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListSnapshotsResponse), nil +} + +func (s *mockSubscriberServer) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest) (*pubsubpb.Snapshot, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Snapshot), nil +} + +func (s *mockSubscriberServer) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest) (*pubsubpb.Snapshot, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Snapshot), nil +} + +func (s *mockSubscriberServer) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) Seek(ctx context.Context, req *pubsubpb.SeekRequest) (*pubsubpb.SeekResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.SeekResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockPublisher mockPublisherServer + mockIamPolicy mockIamPolicyServer + mockSubscriber mockSubscriberServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + pubsubpb.RegisterPublisherServer(serv, &mockPublisher) + iampb.RegisterIAMPolicyServer(serv, &mockIamPolicy) + pubsubpb.RegisterSubscriberServer(serv, &mockSubscriber) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestPublisherCreateTopic(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &pubsubpb.Topic{ + Name: name2, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Topic{ + Name: formattedName, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherCreateTopicError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Topic{ + Name: formattedName, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTopic(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherUpdateTopic(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &pubsubpb.Topic{ + Name: name, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var topic *pubsubpb.Topic = &pubsubpb.Topic{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &pubsubpb.UpdateTopicRequest{ + Topic: topic, + UpdateMask: updateMask, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherUpdateTopicError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var topic *pubsubpb.Topic = &pubsubpb.Topic{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &pubsubpb.UpdateTopicRequest{ + Topic: topic, + UpdateMask: updateMask, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateTopic(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherPublish(t *testing.T) { + var messageIdsElement string = "messageIdsElement-744837059" + var messageIds = []string{messageIdsElement} + var expectedResponse = &pubsubpb.PublishResponse{ + MessageIds: messageIds, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var data []byte = []byte("-86") + var messagesElement = &pubsubpb.PubsubMessage{ + Data: data, + } + var messages = []*pubsubpb.PubsubMessage{messagesElement} + var request = &pubsubpb.PublishRequest{ + Topic: formattedTopic, + Messages: messages, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Publish(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherPublishError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var data []byte = []byte("-86") + var messagesElement = &pubsubpb.PubsubMessage{ + Data: data, + } + var messages = []*pubsubpb.PubsubMessage{messagesElement} + var request = &pubsubpb.PublishRequest{ + Topic: formattedTopic, + Messages: messages, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Publish(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherGetTopic(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &pubsubpb.Topic{ + Name: name, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.GetTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherGetTopicError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.GetTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTopic(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherListTopics(t *testing.T) { + var nextPageToken string = "" + var topicsElement *pubsubpb.Topic = &pubsubpb.Topic{} + var topics = []*pubsubpb.Topic{topicsElement} + var expectedResponse = &pubsubpb.ListTopicsResponse{ + NextPageToken: nextPageToken, + Topics: topics, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedProject string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &pubsubpb.ListTopicsRequest{ + Project: formattedProject, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopics(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Topics[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherListTopicsError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedProject string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &pubsubpb.ListTopicsRequest{ + Project: formattedProject, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopics(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherListTopicSubscriptions(t *testing.T) { + var nextPageToken string = "" + var subscriptionsElement string = "subscriptionsElement1698708147" + var subscriptions = []string{subscriptionsElement} + var expectedResponse = &pubsubpb.ListTopicSubscriptionsResponse{ + NextPageToken: nextPageToken, + Subscriptions: subscriptions, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.ListTopicSubscriptionsRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopicSubscriptions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Subscriptions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherListTopicSubscriptionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.ListTopicSubscriptionsRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopicSubscriptions(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherDeleteTopic(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.DeleteTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestPublisherDeleteTopicError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.DeleteTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTopic(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberCreateSubscription(t *testing.T) { + var name2 string = "name2-1052831874" + var topic2 string = "topic2-1139259102" + var ackDeadlineSeconds int32 = 2135351438 + var retainAckedMessages bool = false + var expectedResponse = &pubsubpb.Subscription{ + Name: name2, + Topic: topic2, + AckDeadlineSeconds: ackDeadlineSeconds, + RetainAckedMessages: retainAckedMessages, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Subscription{ + Name: formattedName, + Topic: formattedTopic, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberCreateSubscriptionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var formattedTopic string = fmt.Sprintf("projects/%s/topics/%s", "[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Subscription{ + Name: formattedName, + Topic: formattedTopic, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSubscription(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberGetSubscription(t *testing.T) { + var name string = "name3373707" + var topic string = "topic110546223" + var ackDeadlineSeconds int32 = 2135351438 + var retainAckedMessages bool = false + var expectedResponse = &pubsubpb.Subscription{ + Name: name, + Topic: topic, + AckDeadlineSeconds: ackDeadlineSeconds, + RetainAckedMessages: retainAckedMessages, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.GetSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberGetSubscriptionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.GetSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSubscription(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberUpdateSubscription(t *testing.T) { + var name string = "name3373707" + var topic string = "topic110546223" + var ackDeadlineSeconds2 int32 = 921632575 + var retainAckedMessages bool = false + var expectedResponse = &pubsubpb.Subscription{ + Name: name, + Topic: topic, + AckDeadlineSeconds: ackDeadlineSeconds2, + RetainAckedMessages: retainAckedMessages, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var ackDeadlineSeconds int32 = 42 + var subscription = &pubsubpb.Subscription{ + AckDeadlineSeconds: ackDeadlineSeconds, + } + var pathsElement string = "ack_deadline_seconds" + var paths = []string{pathsElement} + var updateMask = &field_maskpb.FieldMask{ + Paths: paths, + } + var request = &pubsubpb.UpdateSubscriptionRequest{ + Subscription: subscription, + UpdateMask: updateMask, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberUpdateSubscriptionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var ackDeadlineSeconds int32 = 42 + var subscription = &pubsubpb.Subscription{ + AckDeadlineSeconds: ackDeadlineSeconds, + } + var pathsElement string = "ack_deadline_seconds" + var paths = []string{pathsElement} + var updateMask = &field_maskpb.FieldMask{ + Paths: paths, + } + var request = &pubsubpb.UpdateSubscriptionRequest{ + Subscription: subscription, + UpdateMask: updateMask, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSubscription(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberListSubscriptions(t *testing.T) { + var nextPageToken string = "" + var subscriptionsElement *pubsubpb.Subscription = &pubsubpb.Subscription{} + var subscriptions = []*pubsubpb.Subscription{subscriptionsElement} + var expectedResponse = &pubsubpb.ListSubscriptionsResponse{ + NextPageToken: nextPageToken, + Subscriptions: subscriptions, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedProject string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &pubsubpb.ListSubscriptionsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSubscriptions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Subscriptions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberListSubscriptionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedProject string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &pubsubpb.ListSubscriptionsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSubscriptions(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberDeleteSubscription(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.DeleteSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberDeleteSubscriptionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.DeleteSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSubscription(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberModifyAckDeadline(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var ackDeadlineSeconds int32 = 2135351438 + var request = &pubsubpb.ModifyAckDeadlineRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + AckDeadlineSeconds: ackDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyAckDeadline(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberModifyAckDeadlineError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var ackDeadlineSeconds int32 = 2135351438 + var request = &pubsubpb.ModifyAckDeadlineRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + AckDeadlineSeconds: ackDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyAckDeadline(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberAcknowledge(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var request = &pubsubpb.AcknowledgeRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Acknowledge(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberAcknowledgeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var request = &pubsubpb.AcknowledgeRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Acknowledge(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberPull(t *testing.T) { + var expectedResponse *pubsubpb.PullResponse = &pubsubpb.PullResponse{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var maxMessages int32 = 496131527 + var request = &pubsubpb.PullRequest{ + Subscription: formattedSubscription, + MaxMessages: maxMessages, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Pull(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberPullError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var maxMessages int32 = 496131527 + var request = &pubsubpb.PullRequest{ + Subscription: formattedSubscription, + MaxMessages: maxMessages, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Pull(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberStreamingPull(t *testing.T) { + var receivedMessagesElement *pubsubpb.ReceivedMessage = &pubsubpb.ReceivedMessage{} + var receivedMessages = []*pubsubpb.ReceivedMessage{receivedMessagesElement} + var expectedResponse = &pubsubpb.StreamingPullResponse{ + ReceivedMessages: receivedMessages, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var streamAckDeadlineSeconds int32 = 1875467245 + var request = &pubsubpb.StreamingPullRequest{ + Subscription: formattedSubscription, + StreamAckDeadlineSeconds: streamAckDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingPull(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberStreamingPullError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var streamAckDeadlineSeconds int32 = 1875467245 + var request = &pubsubpb.StreamingPullRequest{ + Subscription: formattedSubscription, + StreamAckDeadlineSeconds: streamAckDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingPull(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberModifyPushConfig(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var pushConfig *pubsubpb.PushConfig = &pubsubpb.PushConfig{} + var request = &pubsubpb.ModifyPushConfigRequest{ + Subscription: formattedSubscription, + PushConfig: pushConfig, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyPushConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberModifyPushConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var pushConfig *pubsubpb.PushConfig = &pubsubpb.PushConfig{} + var request = &pubsubpb.ModifyPushConfigRequest{ + Subscription: formattedSubscription, + PushConfig: pushConfig, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyPushConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberListSnapshots(t *testing.T) { + var nextPageToken string = "" + var snapshotsElement *pubsubpb.Snapshot = &pubsubpb.Snapshot{} + var snapshots = []*pubsubpb.Snapshot{snapshotsElement} + var expectedResponse = &pubsubpb.ListSnapshotsResponse{ + NextPageToken: nextPageToken, + Snapshots: snapshots, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedProject string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &pubsubpb.ListSnapshotsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSnapshots(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Snapshots[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberListSnapshotsError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedProject string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &pubsubpb.ListSnapshotsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSnapshots(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberCreateSnapshot(t *testing.T) { + var name2 string = "name2-1052831874" + var topic string = "topic110546223" + var expectedResponse = &pubsubpb.Snapshot{ + Name: name2, + Topic: topic, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/snapshots/%s", "[PROJECT]", "[SNAPSHOT]") + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.CreateSnapshotRequest{ + Name: formattedName, + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSnapshot(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberCreateSnapshotError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/snapshots/%s", "[PROJECT]", "[SNAPSHOT]") + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.CreateSnapshotRequest{ + Name: formattedName, + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSnapshot(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberUpdateSnapshot(t *testing.T) { + var name string = "name3373707" + var topic string = "topic110546223" + var expectedResponse = &pubsubpb.Snapshot{ + Name: name, + Topic: topic, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var seconds int64 = 123456 + var expireTime = ×tamppb.Timestamp{ + Seconds: seconds, + } + var snapshot = &pubsubpb.Snapshot{ + ExpireTime: expireTime, + } + var pathsElement string = "expire_time" + var paths = []string{pathsElement} + var updateMask = &field_maskpb.FieldMask{ + Paths: paths, + } + var request = &pubsubpb.UpdateSnapshotRequest{ + Snapshot: snapshot, + UpdateMask: updateMask, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSnapshot(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberUpdateSnapshotError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var seconds int64 = 123456 + var expireTime = ×tamppb.Timestamp{ + Seconds: seconds, + } + var snapshot = &pubsubpb.Snapshot{ + ExpireTime: expireTime, + } + var pathsElement string = "expire_time" + var paths = []string{pathsElement} + var updateMask = &field_maskpb.FieldMask{ + Paths: paths, + } + var request = &pubsubpb.UpdateSnapshotRequest{ + Snapshot: snapshot, + UpdateMask: updateMask, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSnapshot(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberDeleteSnapshot(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSnapshot string = fmt.Sprintf("projects/%s/snapshots/%s", "[PROJECT]", "[SNAPSHOT]") + var request = &pubsubpb.DeleteSnapshotRequest{ + Snapshot: formattedSnapshot, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSnapshot(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberDeleteSnapshotError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSnapshot string = fmt.Sprintf("projects/%s/snapshots/%s", "[PROJECT]", "[SNAPSHOT]") + var request = &pubsubpb.DeleteSnapshotRequest{ + Snapshot: formattedSnapshot, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSnapshot(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberSeek(t *testing.T) { + var expectedResponse *pubsubpb.SeekResponse = &pubsubpb.SeekResponse{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.SeekRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Seek(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberSeekError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = fmt.Sprintf("projects/%s/subscriptions/%s", "[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.SeekRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Seek(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go new file mode 100644 index 0000000..b9ab484 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go @@ -0,0 +1,95 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// PublisherProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func PublisherProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// PublisherTopicPath returns the path for the topic resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// instead. +func PublisherTopicPath(project, topic string) string { + return "" + + "projects/" + + project + + "/topics/" + + topic + + "" +} + +// SubscriberProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func SubscriberProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// SubscriberSnapshotPath returns the path for the snapshot resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot) +// instead. +func SubscriberSnapshotPath(project, snapshot string) string { + return "" + + "projects/" + + project + + "/snapshots/" + + snapshot + + "" +} + +// SubscriberSubscriptionPath returns the path for the subscription resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) +// instead. +func SubscriberSubscriptionPath(project, subscription string) string { + return "" + + "projects/" + + project + + "/subscriptions/" + + subscription + + "" +} + +// SubscriberTopicPath returns the path for the topic resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// instead. +func SubscriberTopicPath(project, topic string) string { + return "" + + "projects/" + + project + + "/topics/" + + topic + + "" +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go new file mode 100644 index 0000000..6e9b1fa --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -0,0 +1,398 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + "math" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// PublisherCallOptions contains the retry settings for each method of PublisherClient. +type PublisherCallOptions struct { + CreateTopic []gax.CallOption + UpdateTopic []gax.CallOption + Publish []gax.CallOption + GetTopic []gax.CallOption + ListTopics []gax.CallOption + ListTopicSubscriptions []gax.CallOption + DeleteTopic []gax.CallOption +} + +func defaultPublisherClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultPublisherCallOptions() *PublisherCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"messaging", "one_plus_delivery"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Aborted, + codes.Canceled, + codes.DeadlineExceeded, + codes.Internal, + codes.ResourceExhausted, + codes.Unavailable, + codes.Unknown, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &PublisherCallOptions{ + CreateTopic: retry[[2]string{"default", "idempotent"}], + UpdateTopic: retry[[2]string{"default", "idempotent"}], + Publish: retry[[2]string{"messaging", "one_plus_delivery"}], + GetTopic: retry[[2]string{"default", "idempotent"}], + ListTopics: retry[[2]string{"default", "idempotent"}], + ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}], + DeleteTopic: retry[[2]string{"default", "idempotent"}], + } +} + +// PublisherClient is a client for interacting with Google Cloud Pub/Sub API. +type PublisherClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + publisherClient pubsubpb.PublisherClient + + // The call options for this service. + CallOptions *PublisherCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewPublisherClient creates a new publisher client. +// +// The service that an application uses to manipulate topics, and to send +// messages to a topic. +func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &PublisherClient{ + conn: conn, + CallOptions: defaultPublisherCallOptions(), + + publisherClient: pubsubpb.NewPublisherClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *PublisherClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *PublisherClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), subscription.Name) +} + +func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), topic.Name) +} + +// CreateTopic creates the given topic with the given name. +func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateTopic updates an existing topic. Note that certain properties of a topic are not +// modifiable. Options settings follow the style guide: +// NOTE: The style guide requires body: "topic" instead of body: "*". +// Keeping the latter for internal consistency in V1, however it should be +// corrected in V2. See +// https://cloud.google.com/apis/design/standard_methods#update for details. +func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic +// does not exist. The message payload must not be empty; it must contain +// either a non-empty data field, or at least one attribute. +func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...) + var resp *pubsubpb.PublishResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetTopic gets the configuration of a topic. +func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTopics lists matching topics. +func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...) + it := &TopicIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { + var resp *pubsubpb.ListTopicsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Topics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListTopicSubscriptions lists the name of the subscriptions for this topic. +func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *pubsubpb.ListTopicSubscriptionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Subscriptions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic +// does not exist. After a topic is deleted, a new topic may be created with +// the same name; this is an entirely new topic with none of the old +// configuration or subscriptions. Existing subscriptions to this topic are +// not deleted, but their topic field is set to _deleted-topic_. +func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TopicIterator manages a stream of *pubsubpb.Topic. +type TopicIterator struct { + items []*pubsubpb.Topic + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TopicIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TopicIterator) Next() (*pubsubpb.Topic, error) { + var item *pubsubpb.Topic + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TopicIterator) bufLen() int { + return len(it.items) +} + +func (it *TopicIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go new file mode 100644 index 0000000..0f60bca --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go @@ -0,0 +1,204 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub_test + +import ( + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleNewPublisherClient() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExamplePublisherClient_SubscriptionIAM() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + subscription := &pubsubpb.Subscription{} + h := c.SubscriptionIAM(subscription) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExamplePublisherClient_TopicIAM() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + topic := &pubsubpb.Topic{} + h := c.TopicIAM(topic) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExamplePublisherClient_CreateTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.Topic{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_UpdateTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.UpdateTopicRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_Publish() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.PublishRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Publish(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_GetTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.GetTopicRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_ListTopics() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListTopicsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTopics(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExamplePublisherClient_ListTopicSubscriptions() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListTopicSubscriptionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTopicSubscriptions(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExamplePublisherClient_DeleteTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteTopicRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/pubsub_pull_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsub_pull_example_test.go new file mode 100644 index 0000000..df13c3d --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsub_pull_example_test.go @@ -0,0 +1,106 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + "log" + "time" + + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleSubscriberClient_Pull_lengthyClientProcessing() { + projectID := "some-project" + subscriptionID := "some-subscription" + + ctx := context.Background() + client, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + log.Fatal(err) + } + defer client.Close() + + sub := fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subscriptionID) + // Be sure to tune the MaxMessages parameter per your project's needs, and accordingly + // adjust the ack behavior below to batch acknowledgements. + req := pubsubpb.PullRequest{ + Subscription: sub, + MaxMessages: 1, + } + + fmt.Println("Listening..") + + for { + res, err := client.Pull(ctx, &req) + if err != nil { + log.Fatal(err) + } + + // client.Pull returns an empty list if there are no messages available in the + // backlog. We should skip processing steps when that happens. + if len(res.ReceivedMessages) == 0 { + continue + } + + var recvdAckIDs []string + for _, m := range res.ReceivedMessages { + recvdAckIDs = append(recvdAckIDs, m.AckId) + } + + var done = make(chan struct{}) + var delay = 0 * time.Second // Tick immediately upon reception + var ackDeadline = 10 * time.Second + + // Continuously notify the server that processing is still happening on this batch. + go func() { + for { + select { + case <-ctx.Done(): + return + case <-done: + return + case <-time.After(delay): + err := client.ModifyAckDeadline(ctx, &pubsubpb.ModifyAckDeadlineRequest{ + Subscription: sub, + AckIds: recvdAckIDs, + AckDeadlineSeconds: int32(ackDeadline.Seconds()), + }) + if err != nil { + log.Fatal(err) + } + delay = ackDeadline - 5*time.Second // 5 seconds grace period. + } + } + }() + + for _, m := range res.ReceivedMessages { + // Process the message here, possibly in a goroutine. + log.Printf("Got message: %s", string(m.Message.Data)) + + err := client.Acknowledge(ctx, &pubsubpb.AcknowledgeRequest{ + Subscription: sub, + AckIds: []string{m.AckId}, + }) + if err != nil { + log.Fatal(err) + } + } + + close(done) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go new file mode 100644 index 0000000..a2266a3 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -0,0 +1,593 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + "math" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// SubscriberCallOptions contains the retry settings for each method of SubscriberClient. +type SubscriberCallOptions struct { + CreateSubscription []gax.CallOption + GetSubscription []gax.CallOption + UpdateSubscription []gax.CallOption + ListSubscriptions []gax.CallOption + DeleteSubscription []gax.CallOption + ModifyAckDeadline []gax.CallOption + Acknowledge []gax.CallOption + Pull []gax.CallOption + StreamingPull []gax.CallOption + ModifyPushConfig []gax.CallOption + ListSnapshots []gax.CallOption + CreateSnapshot []gax.CallOption + UpdateSnapshot []gax.CallOption + DeleteSnapshot []gax.CallOption + Seek []gax.CallOption +} + +func defaultSubscriberClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultSubscriberCallOptions() *SubscriberCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"messaging", "pull"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Canceled, + codes.DeadlineExceeded, + codes.Internal, + codes.ResourceExhausted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"streaming_messaging", "pull"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Canceled, + codes.DeadlineExceeded, + codes.Internal, + codes.ResourceExhausted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &SubscriberCallOptions{ + CreateSubscription: retry[[2]string{"default", "idempotent"}], + GetSubscription: retry[[2]string{"default", "idempotent"}], + UpdateSubscription: retry[[2]string{"default", "idempotent"}], + ListSubscriptions: retry[[2]string{"default", "idempotent"}], + DeleteSubscription: retry[[2]string{"default", "idempotent"}], + ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}], + Acknowledge: retry[[2]string{"messaging", "non_idempotent"}], + Pull: retry[[2]string{"messaging", "pull"}], + StreamingPull: retry[[2]string{"streaming_messaging", "pull"}], + ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}], + ListSnapshots: retry[[2]string{"default", "idempotent"}], + CreateSnapshot: retry[[2]string{"default", "idempotent"}], + UpdateSnapshot: retry[[2]string{"default", "idempotent"}], + DeleteSnapshot: retry[[2]string{"default", "idempotent"}], + Seek: retry[[2]string{"default", "non_idempotent"}], + } +} + +// SubscriberClient is a client for interacting with Google Cloud Pub/Sub API. +type SubscriberClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + subscriberClient pubsubpb.SubscriberClient + + // The call options for this service. + CallOptions *SubscriberCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewSubscriberClient creates a new subscriber client. +// +// The service that an application uses to manipulate subscriptions and to +// consume messages from a subscription via the Pull method. +func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &SubscriberClient{ + conn: conn, + CallOptions: defaultSubscriberCallOptions(), + + subscriberClient: pubsubpb.NewSubscriberClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *SubscriberClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SubscriberClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), subscription.Name) +} + +func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), topic.Name) +} + +// CreateSubscription creates a subscription to a given topic. +// If the subscription already exists, returns ALREADY_EXISTS. +// If the corresponding topic doesn't exist, returns NOT_FOUND. +// +// If the name is not provided in the request, the server will assign a random +// name for this subscription on the same project as the topic, conforming +// to the +// resource name format (at https://cloud.google.com/pubsub/docs/overview#names). +// The generated name is populated in the returned Subscription object. +// Note that for REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetSubscription gets the configuration details of a subscription. +func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSubscription updates an existing subscription. Note that certain properties of a +// subscription, such as its topic, are not modifiable. +// NOTE: The style guide requires body: "subscription" instead of body: "*". +// Keeping the latter for internal consistency in V1, however it should be +// corrected in V2. See +// https://cloud.google.com/apis/design/standard_methods#update for details. +func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListSubscriptions lists matching subscriptions. +func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...) + it := &SubscriptionIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { + var resp *pubsubpb.ListSubscriptionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Subscriptions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteSubscription deletes an existing subscription. All messages retained in the subscription +// are immediately dropped. Calls to Pull after deletion will return +// NOT_FOUND. After a subscription is deleted, a new one may be created with +// the same name, but the new one has no association with the old +// subscription or its topic unless the same topic is specified. +func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful +// to indicate that more time is needed to process a message by the +// subscriber, or to make the message available for redelivery if the +// processing was interrupted. Note that this does not modify the +// subscription-level ackDeadlineSeconds used for subsequent messages. +func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Acknowledge acknowledges the messages associated with the ack_ids in the +// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages +// from the subscription. +// +// Acknowledging a message whose ack deadline has expired may succeed, +// but such a message may be redelivered later. Acknowledging a message more +// than once will not result in an error. +func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Pull pulls messages from the server. Returns an empty list if there are no +// messages available in the backlog. The server may return UNAVAILABLE if +// there are too many concurrent pull requests pending for the given +// subscription. +func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...) + var resp *pubsubpb.PullResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StreamingPull (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will +// respond with UNIMPLEMENTED errors unless you have been invited to test +// this feature. Contact cloud-pubsub@google.com with any questions. +// +// Establishes a stream with the server, which sends messages down to the +// client. The client streams acknowledgements and ack deadline modifications +// back to the server. The server will close the stream and return the status +// on any error. The server may close the stream with status OK to reassign +// server-side resources, in which case, the client should re-establish the +// stream. UNAVAILABLE may also be returned in the case of a transient error +// (e.g., a server restart). These should also be retried by the client. Flow +// control can be achieved by configuring the underlying RPC channel. +func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...) + var resp pubsubpb.Subscriber_StreamingPullClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ModifyPushConfig modifies the PushConfig for a specified subscription. +// +// This may be used to change a push subscription to a pull one (signified by +// an empty PushConfig) or vice versa, or change the endpoint URL and other +// attributes of a push subscription. Messages will accumulate for delivery +// continuously through the call regardless of changes to the PushConfig. +func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListSnapshots lists the existing snapshots. +func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...) + it := &SnapshotIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) { + var resp *pubsubpb.ListSnapshotsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Snapshots, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateSnapshot creates a snapshot from the requested subscription. +// If the snapshot already exists, returns ALREADY_EXISTS. +// If the requested subscription doesn't exist, returns NOT_FOUND. +// +// If the name is not provided in the request, the server will assign a random +// name for this snapshot on the same project as the subscription, conforming +// to the +// resource name format (at https://cloud.google.com/pubsub/docs/overview#names). +// The generated name is populated in the returned Snapshot object. +// Note that for REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...) + var resp *pubsubpb.Snapshot + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSnapshot updates an existing snapshot. Note that certain properties of a snapshot +// are not modifiable. +// NOTE: The style guide requires body: "snapshot" instead of body: "*". +// Keeping the latter for internal consistency in V1, however it should be +// corrected in V2. See +// https://cloud.google.com/apis/design/standard_methods#update for details. +func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...) + var resp *pubsubpb.Snapshot + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteSnapshot removes an existing snapshot. All messages retained in the snapshot +// are immediately dropped. After a snapshot is deleted, a new one may be +// created with the same name, but the new one has no association with the old +// snapshot or its subscription, unless the same subscription is specified. +func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Seek seeks an existing subscription to a point in time or to a given snapshot, +// whichever is provided in the request. +func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...) + var resp *pubsubpb.SeekResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SnapshotIterator manages a stream of *pubsubpb.Snapshot. +type SnapshotIterator struct { + items []*pubsubpb.Snapshot + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SnapshotIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) { + var item *pubsubpb.Snapshot + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SnapshotIterator) bufLen() int { + return len(it.items) +} + +func (it *SnapshotIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// SubscriptionIterator manages a stream of *pubsubpb.Subscription. +type SubscriptionIterator struct { + items []*pubsubpb.Subscription + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) { + var item *pubsubpb.Subscription + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SubscriptionIterator) bufLen() int { + return len(it.items) +} + +func (it *SubscriptionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go new file mode 100644 index 0000000..581f2bf --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go @@ -0,0 +1,358 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub_test + +import ( + "io" + + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleNewSubscriberClient() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleSubscriberClient_SubscriptionIAM() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + subscription := &pubsubpb.Subscription{} + h := c.SubscriptionIAM(subscription) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExampleSubscriberClient_TopicIAM() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + topic := &pubsubpb.Topic{} + h := c.TopicIAM(topic) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExampleSubscriberClient_CreateSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.Subscription{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_GetSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.GetSubscriptionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_UpdateSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.UpdateSubscriptionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_ListSubscriptions() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListSubscriptionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSubscriptions(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_DeleteSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteSubscriptionRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_ModifyAckDeadline() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ModifyAckDeadlineRequest{ + // TODO: Fill request struct fields. + } + err = c.ModifyAckDeadline(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Acknowledge() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.AcknowledgeRequest{ + // TODO: Fill request struct fields. + } + err = c.Acknowledge(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Pull() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.PullRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Pull(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_StreamingPull() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.StreamingPull(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*pubsubpb.StreamingPullRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_ModifyPushConfig() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ModifyPushConfigRequest{ + // TODO: Fill request struct fields. + } + err = c.ModifyPushConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_ListSnapshots() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListSnapshotsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSnapshots(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_CreateSnapshot() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.CreateSnapshotRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSnapshot(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_UpdateSnapshot() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.UpdateSnapshotRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSnapshot(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_DeleteSnapshot() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteSnapshotRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSnapshot(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Seek() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.SeekRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Seek(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go new file mode 100644 index 0000000..7995065 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -0,0 +1,126 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub +messages, hiding the the details of the underlying server RPCs. Google Cloud +Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders +and receivers. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + +More information about Google Cloud Pub/Sub is available at +https://cloud.google.com/pubsub/docs + +Publishing + +Google Cloud Pub/Sub messages are published to topics. Topics may be created +using the pubsub package like so: + + topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") + +Messages may then be published to a topic: + + res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) + +Publish queues the message for publishing and returns immediately. When enough +messages have accumulated, or enough time has elapsed, the batch of messages is +sent to the Pub/Sub service. + +Publish returns a PublishResult, which behaves like a future: its Get method +blocks until the message has been sent to the service. + +The first time you call Publish on a topic, goroutines are started in the +background. To clean up these goroutines, call Stop: + + topic.Stop() + +Receiving + +To receive messages published to a topic, clients create subscriptions +to the topic. There may be more than one subscription per topic; each message +that is published to the topic will be delivered to all of its subscriptions. + +Subsciptions may be created like so: + + sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", + pubsub.SubscriptionConfig{Topic: topic}) + +Messages are then consumed from a subscription via callback. + + err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { + log.Printf("Got message: %s", m.Data) + m.Ack() + }) + if err != nil { + // Handle error. + } + +The callback is invoked concurrently by multiple goroutines, maximizing +throughput. To terminate a call to Receive, cancel its context. + +Once client code has processed the message, it must call Message.Ack, otherwise +the message will eventually be redelivered. As an optimization, if the client +cannot or doesn't want to process the message, it can call Message.Nack to +speed redelivery. For more information and configuration options, see +"Deadlines" below. + +Note: It is possible for Messages to be redelivered, even if Message.Ack has +been called. Client code must be robust to multiple deliveries of messages. + +Deadlines + +The default pubsub deadlines are suitable for most use cases, but may be +overridden. This section describes the tradeoffs that should be considered +when overriding the defaults. + +Behind the scenes, each message returned by the Pub/Sub server has an +associated lease, known as an "ACK deadline". +Unless a message is acknowledged within the ACK deadline, or the client requests that +the ACK deadline be extended, the message will become elegible for redelivery. +As a convenience, the pubsub package will automatically extend deadlines until +either: + * Message.Ack or Message.Nack is called, or + * the "MaxExtension" period elapses from the time the message is fetched from the server. + +The initial ACK deadline given to each messages defaults to 10 seconds, but may +be overridden during subscription creation. Selecting an ACK deadline is a +tradeoff between message redelivery latency and RPC volume. If the pubsub +package fails to acknowledge or extend a message (e.g. due to unexpected +termination of the process), a shorter ACK deadline will generally result in +faster message redelivery by the Pub/Sub system. However, a short ACK deadline +may also increase the number of deadline extension RPCs that the pubsub package +sends to the server. + +The default max extension period is DefaultReceiveSettings.MaxExtension, and can +be overridden by setting Subscription.ReceiveSettings.MaxExtension. Selecting a +max extension period is a tradeoff between the speed at which client code must +process messages, and the redelivery delay if messages fail to be acknowledged +(e.g. because client code neglects to do so). Using a large MaxExtension +increases the available time for client code to process messages. However, if +the client code neglects to call Message.Ack/Nack, a large MaxExtension will +increase the delay before the message is redelivered. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + +Slow Message Processing + +For use cases where message processing exceeds 30 minutes, we recommend using +the base client in a pull model, since long-lived streams are periodically killed +by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example-SubscriberClient-Pull-LengthyClientProcessing +*/ +package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/endtoend_test.go b/vendor/cloud.google.com/go/pubsub/endtoend_test.go new file mode 100644 index 0000000..6fe0d21 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/endtoend_test.go @@ -0,0 +1,234 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "bytes" + "fmt" + "log" + "math/rand" + "os" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/option" +) + +const ( + timeout = time.Minute * 10 + ackDeadline = time.Second * 10 + nMessages = 1e4 + acceptableDupPercentage = .05 + numAcceptableDups = int(nMessages * acceptableDupPercentage / 100) +) + +// Buffer log messages to debug failures. +var logBuf bytes.Buffer + +// TestEndToEnd pumps many messages into a topic and tests that they are all +// delivered to each subscription for the topic. It also tests that messages +// are not unexpectedly redelivered. +func TestEndToEnd(t *testing.T) { + t.Parallel() + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + log.SetOutput(&logBuf) + ctx := context.Background() + ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + now := time.Now() + topicName := fmt.Sprintf("endtoend-%d", now.UnixNano()) + subPrefix := fmt.Sprintf("endtoend-%d", now.UnixNano()) + + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("Creating client error: %v", err) + } + + var topic *Topic + if topic, err = client.CreateTopic(ctx, topicName); err != nil { + t.Fatalf("CreateTopic error: %v", err) + } + defer topic.Delete(ctx) + + // Two subscriptions to the same topic. + var subs [2]*Subscription + for i := 0; i < len(subs); i++ { + subs[i], err = client.CreateSubscription(ctx, fmt.Sprintf("%s-%d", subPrefix, i), SubscriptionConfig{ + Topic: topic, + AckDeadline: ackDeadline, + }) + if err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer subs[i].Delete(ctx) + } + + err = publish(ctx, topic, nMessages) + topic.Stop() + if err != nil { + t.Fatalf("publish: %v", err) + } + + // recv provides an indication that messages are still arriving. + recv := make(chan struct{}) + // We have two subscriptions to our topic. + // Each subscription will get a copy of each published message. + var wg sync.WaitGroup + cctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + consumers := []*consumer{ + {counts: make(map[string]int), recv: recv, durations: []time.Duration{time.Hour}}, + {counts: make(map[string]int), recv: recv, + durations: []time.Duration{ackDeadline, ackDeadline, ackDeadline / 2, ackDeadline / 2, time.Hour}}, + } + for i, con := range consumers { + con := con + sub := subs[i] + wg.Add(1) + go func() { + defer wg.Done() + con.consume(t, cctx, sub) + }() + } + // Wait for a while after the last message before declaring quiescence. + // We wait a multiple of the ack deadline, for two reasons: + // 1. To detect if messages are redelivered after having their ack + // deadline extended. + // 2. To wait for redelivery of messages that were en route when a Receive + // is canceled. This can take considerably longer than the ack deadline. + quiescenceDur := ackDeadline * 6 + quiescenceTimer := time.NewTimer(quiescenceDur) + +loop: + for { + select { + case <-recv: + // Reset timer so we wait quiescenceDur after the last message. + // See https://godoc.org/time#Timer.Reset for why the Stop + // and channel drain are necessary. + if !quiescenceTimer.Stop() { + <-quiescenceTimer.C + } + quiescenceTimer.Reset(quiescenceDur) + + case <-quiescenceTimer.C: + cancel() + log.Println("quiesced") + break loop + + case <-cctx.Done(): + t.Fatal("timed out") + } + } + wg.Wait() + ok := true + for i, con := range consumers { + var numDups int + var zeroes int + for _, v := range con.counts { + if v == 0 { + zeroes += 1 + } + numDups += v - 1 + } + + if zeroes > 0 { + t.Errorf("Consumer %d: %d messages never arrived", i, zeroes) + ok = false + } else if numDups > numAcceptableDups { + t.Errorf("Consumer %d: Willing to accept %d dups (%f%% duplicated of %d messages), but got %d", i, numAcceptableDups, acceptableDupPercentage, int(nMessages), numDups) + ok = false + } + } + if !ok { + logBuf.WriteTo(os.Stdout) + } +} + +// publish publishes n messages to topic, and returns the published message IDs. +func publish(ctx context.Context, topic *Topic, n int) error { + var rs []*PublishResult + for i := 0; i < n; i++ { + m := &Message{Data: []byte(fmt.Sprintf("msg %d", i))} + rs = append(rs, topic.Publish(ctx, m)) + } + var ids []string + for _, r := range rs { + id, err := r.Get(ctx) + if err != nil { + return err + } + ids = append(ids, id) + } + return nil +} + +// consumer consumes messages according to its configuration. +type consumer struct { + durations []time.Duration + + // A value is sent to recv each time Inc is called. + recv chan struct{} + + mu sync.Mutex + counts map[string]int + total int +} + +// consume reads messages from a subscription, and keeps track of what it receives in mc. +// After consume returns, the caller should wait on wg to ensure that no more updates to mc will be made. +func (c *consumer) consume(t *testing.T, ctx context.Context, sub *Subscription) { + for _, dur := range c.durations { + ctx2, cancel := context.WithTimeout(ctx, dur) + defer cancel() + id := sub.name[len(sub.name)-2:] + log.Printf("%s: start receive", id) + prev := c.total + err := sub.Receive(ctx2, c.process) + log.Printf("%s: end receive; read %d", id, c.total-prev) + if err != nil { + t.Errorf("error from Receive: %v", err) + return + } + select { + case <-ctx.Done(): + return + default: + } + } +} + +// process handles a message and records it in mc. +func (c *consumer) process(_ context.Context, m *Message) { + c.mu.Lock() + c.counts[m.ID] += 1 + c.total++ + c.mu.Unlock() + c.recv <- struct{}{} + // Simulate time taken to process m, while continuing to process more messages. + // Some messages will need to have their ack deadline extended due to this delay. + delay := rand.Intn(int(ackDeadline * 3)) + time.AfterFunc(time.Duration(delay), m.Ack) +} diff --git a/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go b/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go new file mode 100644 index 0000000..da74b1b --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go @@ -0,0 +1,54 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Subscriptions() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all subscriptions of the project. + it := client.Subscriptions(ctx) + _ = it // TODO: iterate using Next. +} + +func ExampleSubscriptionIterator_Next() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all subscriptions of the project. + it := client.Subscriptions(ctx) + for { + sub, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(sub) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/example_test.go b/vendor/cloud.google.com/go/pubsub/example_test.go new file mode 100644 index 0000000..0bd8c8c --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_test.go @@ -0,0 +1,369 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + "time" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + _, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // See the other examples to learn how to use the Client. +} + +func ExampleClient_CreateTopic() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Create a new topic with the given name. + topic, err := client.CreateTopic(ctx, "topicName") + if err != nil { + // TODO: Handle error. + } + + _ = topic // TODO: use the topic. +} + +// Use TopicInProject to refer to a topic that is not in the client's project, such +// as a public topic. +func ExampleClient_TopicInProject() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + topic := client.TopicInProject("topicName", "another-project-id") + _ = topic // TODO: use the topic. +} + +func ExampleClient_CreateSubscription() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Create a new topic with the given name. + topic, err := client.CreateTopic(ctx, "topicName") + if err != nil { + // TODO: Handle error. + } + + // Create a new subscription to the previously created topic + // with the given name. + sub, err := client.CreateSubscription(ctx, "subName", pubsub.SubscriptionConfig{ + Topic: topic, + AckDeadline: 10 * time.Second, + }) + if err != nil { + // TODO: Handle error. + } + + _ = sub // TODO: use the subscription. +} + +func ExampleTopic_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + if err := topic.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleTopic_Exists() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + ok, err := topic.Exists(ctx) + if err != nil { + // TODO: Handle error. + } + if !ok { + // Topic doesn't exist. + } +} + +func ExampleTopic_Publish() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + defer topic.Stop() + var results []*pubsub.PublishResult + r := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), + }) + results = append(results, r) + // Do other work ... + for _, r := range results { + id, err := r.Get(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("Published a message with a message ID: %s\n", id) + } +} + +func ExampleTopic_Subscriptions() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + topic := client.Topic("topic-name") + // List all subscriptions of the topic (maybe of multiple projects). + for subs := topic.Subscriptions(ctx); ; { + sub, err := subs.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + _ = sub // TODO: use the subscription. + } +} + +func ExampleSubscription_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + if err := sub.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscription_Exists() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + ok, err := sub.Exists(ctx) + if err != nil { + // TODO: Handle error. + } + if !ok { + // Subscription doesn't exist. + } +} + +func ExampleSubscription_Config() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + config, err := sub.Config(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(config) +} + +func ExampleSubscription_Receive() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + // TODO: Handle message. + // NOTE: May be called concurrently; synchronize access to shared memory. + m.Ack() + }) + if err != context.Canceled { + // TODO: Handle error. + } +} + +// This example shows how to configure keepalive so that unacknoweldged messages +// expire quickly, allowing other subscribers to take them. +func ExampleSubscription_Receive_maxExtension() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + // This program is expected to process and acknowledge messages in 30 seconds. If + // not, the Pub/Sub API will assume the message is not acknowledged. + sub.ReceiveSettings.MaxExtension = 30 * time.Second + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + // TODO: Handle message. + m.Ack() + }) + if err != context.Canceled { + // TODO: Handle error. + } +} + +// This example shows how to throttle Subscription.Receive, which aims for high +// throughput by default. By limiting the number of messages and/or bytes being +// processed at once, you can bound your program's resource consumption. +func ExampleSubscription_Receive_maxOutstanding() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + sub.ReceiveSettings.MaxOutstandingMessages = 5 + sub.ReceiveSettings.MaxOutstandingBytes = 10e6 + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + // TODO: Handle message. + m.Ack() + }) + if err != context.Canceled { + // TODO: Handle error. + } +} + +func ExampleSubscription_Update() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + subConfig, err := sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ + PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, + }) + if err != nil { + // TODO: Handle error. + } + _ = subConfig // TODO: Use SubscriptionConfig. +} + +func ExampleSubscription_CreateSnapshot() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + snapConfig, err := sub.CreateSnapshot(ctx, "snapshotName") + if err != nil { + // TODO: Handle error. + } + _ = snapConfig // TODO: Use SnapshotConfig. +} + +func ExampleSubscription_SeekToSnapshot() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + snap := client.Snapshot("snapshotName") + if err := sub.SeekToSnapshot(ctx, snap); err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscription_SeekToTime() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + if err := sub.SeekToTime(ctx, time.Now().Add(-time.Hour)); err != nil { + // TODO: Handle error. + } +} + +func ExampleSnapshot_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + snap := client.Snapshot("snapshotName") + if err := snap.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Snapshots() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all snapshots for the project. + iter := client.Snapshots(ctx) + _ = iter // TODO: iterate using Next. +} + +func ExampleSnapshotConfigIterator_Next() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all snapshots for the project. + iter := client.Snapshots(ctx) + for { + snapConfig, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + _ = snapConfig // TODO: use the SnapshotConfig. + } +} + +// TODO(jba): write an example for PublishResult.Ready +// TODO(jba): write an example for Subscription.IAM +// TODO(jba): write an example for Topic.IAM +// TODO(jba): write an example for Topic.Stop diff --git a/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go b/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go new file mode 100644 index 0000000..0c227ed --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Topics() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Topics(ctx) + _ = it // TODO: iterate using Next. +} + +func ExampleTopicIterator_Next() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all topics. + it := client.Topics(ctx) + for { + t, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(t) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/fake_test.go b/vendor/cloud.google.com/go/pubsub/fake_test.go new file mode 100644 index 0000000..330d334 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/fake_test.go @@ -0,0 +1,322 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// This file provides a fake/mock in-memory pubsub server. + +import ( + "io" + "sort" + "strings" + "sync" + "time" + + "cloud.google.com/go/internal/testutil" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + emptypb "github.com/golang/protobuf/ptypes/empty" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type fakeServer struct { + pb.PublisherServer + pb.SubscriberServer + + Addr string + + mu sync.Mutex + Acked map[string]bool // acked message IDs + Deadlines map[string]int32 // deadlines by message ID + pullResponses []*pullResponse + wg sync.WaitGroup + subs map[string]*pb.Subscription + topics map[string]*pb.Topic +} + +type pullResponse struct { + msgs []*pb.ReceivedMessage + err error +} + +func newFakeServer() (*fakeServer, error) { + srv, err := testutil.NewServer() + if err != nil { + return nil, err + } + fake := &fakeServer{ + Addr: srv.Addr, + Acked: map[string]bool{}, + Deadlines: map[string]int32{}, + subs: map[string]*pb.Subscription{}, + topics: map[string]*pb.Topic{}, + } + pb.RegisterPublisherServer(srv.Gsrv, fake) + pb.RegisterSubscriberServer(srv.Gsrv, fake) + srv.Start() + return fake, nil +} + +// Each call to addStreamingPullMessages results in one StreamingPullResponse. +func (s *fakeServer) addStreamingPullMessages(msgs []*pb.ReceivedMessage) { + s.pullResponses = append(s.pullResponses, &pullResponse{msgs, nil}) +} + +func (s *fakeServer) addStreamingPullError(err error) { + s.pullResponses = append(s.pullResponses, &pullResponse{nil, err}) +} + +func (s *fakeServer) wait() { + s.wg.Wait() +} + +func (s *fakeServer) StreamingPull(stream pb.Subscriber_StreamingPullServer) error { + s.wg.Add(1) + defer s.wg.Done() + errc := make(chan error, 1) + s.wg.Add(1) + go func() { + defer s.wg.Done() + for { + req, err := stream.Recv() + if err != nil { + errc <- err + return + } + s.mu.Lock() + for _, id := range req.AckIds { + s.Acked[id] = true + } + for i, id := range req.ModifyDeadlineAckIds { + s.Deadlines[id] = req.ModifyDeadlineSeconds[i] + } + s.mu.Unlock() + } + }() + // Send responses. + for { + s.mu.Lock() + if len(s.pullResponses) == 0 { + s.mu.Unlock() + // Nothing to send, so wait for the client to shut down the stream. + err := <-errc // a real error, or at least EOF + if err == io.EOF { + return nil + } + return err + } + pr := s.pullResponses[0] + s.pullResponses = s.pullResponses[1:] + s.mu.Unlock() + if pr.err != nil { + // Add a slight delay to ensure the server receives any + // messages en route from the client before shutting down the stream. + // This reduces flakiness of tests involving retry. + time.Sleep(200 * time.Millisecond) + } + if pr.err == io.EOF { + return nil + } + if pr.err != nil { + return pr.err + } + // Return any error from Recv. + select { + case err := <-errc: + return err + default: + } + res := &pb.StreamingPullResponse{ReceivedMessages: pr.msgs} + if err := stream.Send(res); err != nil { + return err + } + } +} + +const ( + minMessageRetentionDuration = 10 * time.Minute + maxMessageRetentionDuration = 168 * time.Hour +) + +var defaultMessageRetentionDuration = ptypes.DurationProto(maxMessageRetentionDuration) + +func checkMRD(pmrd *durpb.Duration) error { + mrd, err := ptypes.Duration(pmrd) + if err != nil || mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { + return status.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) + } + return nil +} + +func checkAckDeadline(ads int32) error { + if ads < 10 || ads > 600 { + // PubSub service returns Unknown. + return status.Errorf(codes.Unknown, "bad ack_deadline_seconds: %d", ads) + } + return nil +} + +func (s *fakeServer) CreateSubscription(ctx context.Context, sub *pb.Subscription) (*pb.Subscription, error) { + if s.subs[sub.Name] != nil { + return nil, status.Errorf(codes.AlreadyExists, "subscription %q", sub.Name) + } + sub2 := proto.Clone(sub).(*pb.Subscription) + if err := checkAckDeadline(sub.AckDeadlineSeconds); err != nil { + return nil, err + } + if sub.MessageRetentionDuration == nil { + sub2.MessageRetentionDuration = defaultMessageRetentionDuration + } + if err := checkMRD(sub2.MessageRetentionDuration); err != nil { + return nil, err + } + if sub.PushConfig == nil { + sub2.PushConfig = &pb.PushConfig{} + } + s.subs[sub.Name] = sub2 + return sub2, nil +} + +func (s *fakeServer) GetSubscription(ctx context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { + if sub := s.subs[req.Subscription]; sub != nil { + return sub, nil + } + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription) +} + +func (s *fakeServer) UpdateSubscription(ctx context.Context, req *pb.UpdateSubscriptionRequest) (*pb.Subscription, error) { + sub := s.subs[req.Subscription.Name] + if sub == nil { + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription.Name) + } + for _, path := range req.UpdateMask.Paths { + switch path { + case "push_config": + sub.PushConfig = req.Subscription.PushConfig + + case "ack_deadline_seconds": + a := req.Subscription.AckDeadlineSeconds + if err := checkAckDeadline(a); err != nil { + return nil, err + } + sub.AckDeadlineSeconds = a + + case "retain_acked_messages": + sub.RetainAckedMessages = req.Subscription.RetainAckedMessages + + case "message_retention_duration": + if err := checkMRD(req.Subscription.MessageRetentionDuration); err != nil { + return nil, err + } + sub.MessageRetentionDuration = req.Subscription.MessageRetentionDuration + + // TODO(jba): labels + default: + return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) + } + } + return sub, nil +} + +func (s *fakeServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { + if s.subs[req.Subscription] == nil { + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription) + } + delete(s.subs, req.Subscription) + return &emptypb.Empty{}, nil +} + +func (s *fakeServer) CreateTopic(_ context.Context, t *pb.Topic) (*pb.Topic, error) { + if s.topics[t.Name] != nil { + return nil, status.Errorf(codes.AlreadyExists, "topic %q", t.Name) + } + t2 := proto.Clone(t).(*pb.Topic) + s.topics[t.Name] = t2 + return t2, nil +} + +func (s *fakeServer) GetTopic(_ context.Context, req *pb.GetTopicRequest) (*pb.Topic, error) { + if t := s.topics[req.Topic]; t != nil { + return t, nil + } + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) +} + +func (s *fakeServer) DeleteTopic(_ context.Context, req *pb.DeleteTopicRequest) (*emptypb.Empty, error) { + if s.topics[req.Topic] == nil { + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) + } + delete(s.topics, req.Topic) + return &emptypb.Empty{}, nil +} + +func (s *fakeServer) ListTopics(_ context.Context, req *pb.ListTopicsRequest) (*pb.ListTopicsResponse, error) { + var names []string + for n := range s.topics { + if strings.HasPrefix(n, req.Project) { + names = append(names, n) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + res := &pb.ListTopicsResponse{NextPageToken: nextToken} + for i := from; i < to; i++ { + res.Topics = append(res.Topics, s.topics[names[i]]) + } + return res, nil +} + +func (s *fakeServer) ListSubscriptions(_ context.Context, req *pb.ListSubscriptionsRequest) (*pb.ListSubscriptionsResponse, error) { + var names []string + for _, sub := range s.subs { + if strings.HasPrefix(sub.Name, req.Project) { + names = append(names, sub.Name) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + res := &pb.ListSubscriptionsResponse{NextPageToken: nextToken} + for i := from; i < to; i++ { + res.Subscriptions = append(res.Subscriptions, s.subs[names[i]]) + } + return res, nil +} + +func (s *fakeServer) ListTopicSubscriptions(_ context.Context, req *pb.ListTopicSubscriptionsRequest) (*pb.ListTopicSubscriptionsResponse, error) { + var names []string + for _, sub := range s.subs { + if sub.Topic == req.Topic { + names = append(names, sub.Name) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + return &pb.ListTopicSubscriptionsResponse{ + Subscriptions: names[from:to], + NextPageToken: nextToken, + }, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller.go b/vendor/cloud.google.com/go/pubsub/flow_controller.go new file mode 100644 index 0000000..0fd7bd6 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/flow_controller.go @@ -0,0 +1,106 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "golang.org/x/net/context" + "golang.org/x/sync/semaphore" +) + +// flowController implements flow control for Subscription.Receive. +type flowController struct { + maxSize int // max total size of messages + semCount, semSize *semaphore.Weighted // enforces max number and size of messages +} + +// newFlowController creates a new flowController that ensures no more than +// maxCount messages or maxSize bytes are outstanding at once. If maxCount or +// maxSize is < 1, then an unlimited number of messages or bytes is permitted, +// respectively. +func newFlowController(maxCount, maxSize int) *flowController { + fc := &flowController{ + maxSize: maxSize, + semCount: nil, + semSize: nil, + } + if maxCount > 0 { + fc.semCount = semaphore.NewWeighted(int64(maxCount)) + } + if maxSize > 0 { + fc.semSize = semaphore.NewWeighted(int64(maxSize)) + } + return fc +} + +// acquire blocks until one message of size bytes can proceed or ctx is done. +// It returns nil in the first case, or ctx.Err() in the second. +// +// acquire allows large messages to proceed by treating a size greater than maxSize +// as if it were equal to maxSize. +func (f *flowController) acquire(ctx context.Context, size int) error { + if f.semCount != nil { + if err := f.semCount.Acquire(ctx, 1); err != nil { + return err + } + } + if f.semSize != nil { + if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { + if f.semCount != nil { + f.semCount.Release(1) + } + return err + } + } + return nil +} + +// tryAcquire returns false if acquire would block. Otherwise, it behaves like +// acquire and returns true. +// +// tryAcquire allows large messages to proceed by treating a size greater than +// maxSize as if it were equal to maxSize. +func (f *flowController) tryAcquire(size int) bool { + if f.semCount != nil { + if !f.semCount.TryAcquire(1) { + return false + } + } + if f.semSize != nil { + if !f.semSize.TryAcquire(f.bound(size)) { + if f.semCount != nil { + f.semCount.Release(1) + } + return false + } + } + return true +} + +// release notes that one message of size bytes is no longer outstanding. +func (f *flowController) release(size int) { + if f.semCount != nil { + f.semCount.Release(1) + } + if f.semSize != nil { + f.semSize.Release(f.bound(size)) + } +} + +func (f *flowController) bound(size int) int64 { + if size > f.maxSize { + return int64(f.maxSize) + } + return int64(size) +} diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller_test.go b/vendor/cloud.google.com/go/pubsub/flow_controller_test.go new file mode 100644 index 0000000..a0f0260 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/flow_controller_test.go @@ -0,0 +1,236 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "fmt" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +func TestFlowControllerCancel(t *testing.T) { + // Test canceling a flow controller's context. + t.Parallel() + fc := newFlowController(3, 10) + if err := fc.acquire(context.Background(), 5); err != nil { + t.Fatal(err) + } + // Experiment: a context that times out should always return an error. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond) + defer cancel() + if err := fc.acquire(ctx, 6); err != context.DeadlineExceeded { + t.Fatalf("got %v, expected DeadlineExceeded", err) + } + // Control: a context that is not done should always return nil. + go func() { + time.Sleep(5 * time.Millisecond) + fc.release(5) + }() + if err := fc.acquire(context.Background(), 6); err != nil { + t.Errorf("got %v, expected nil", err) + } +} + +func TestFlowControllerLargeRequest(t *testing.T) { + // Large requests succeed, consuming the entire allotment. + t.Parallel() + fc := newFlowController(3, 10) + err := fc.acquire(context.Background(), 11) + if err != nil { + t.Fatal(err) + } +} + +func TestFlowControllerNoStarve(t *testing.T) { + // A large request won't starve, because the flowController is + // (best-effort) FIFO. + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + fc := newFlowController(10, 10) + first := make(chan int) + for i := 0; i < 20; i++ { + go func() { + for { + if err := fc.acquire(ctx, 1); err != nil { + if err != context.Canceled { + t.Error(err) + } + return + } + select { + case first <- 1: + default: + } + fc.release(1) + } + }() + } + <-first // Wait until the flowController's state is non-zero. + if err := fc.acquire(ctx, 11); err != nil { + t.Errorf("got %v, want nil", err) + } +} + +func TestFlowControllerSaturation(t *testing.T) { + t.Parallel() + const ( + maxCount = 6 + maxSize = 10 + ) + for _, test := range []struct { + acquireSize int + wantCount, wantSize int64 + }{ + { + // Many small acquires cause the flow controller to reach its max count. + acquireSize: 1, + wantCount: 6, + wantSize: 6, + }, + { + // Five acquires of size 2 will cause the flow controller to reach its max size, + // but not its max count. + acquireSize: 2, + wantCount: 5, + wantSize: 10, + }, + { + // If the requests are the right size (relatively prime to maxSize), + // the flow controller will not saturate on size. (In this case, not on count either.) + acquireSize: 3, + wantCount: 3, + wantSize: 9, + }, + } { + fc := newFlowController(maxCount, maxSize) + // Atomically track flow controller state. + var curCount, curSize int64 + success := errors.New("") + // Time out if wantSize or wantCount is never reached. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + g, ctx := errgroup.WithContext(ctx) + for i := 0; i < 10; i++ { + g.Go(func() error { + var hitCount, hitSize bool + // Run at least until we hit the expected values, and at least + // for enough iterations to exceed them if the flow controller + // is broken. + for i := 0; i < 100 || !hitCount || !hitSize; i++ { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := fc.acquire(ctx, test.acquireSize); err != nil { + return err + } + c := atomic.AddInt64(&curCount, 1) + if c > test.wantCount { + return fmt.Errorf("count %d exceeds want %d", c, test.wantCount) + } + if c == test.wantCount { + hitCount = true + } + s := atomic.AddInt64(&curSize, int64(test.acquireSize)) + if s > test.wantSize { + return fmt.Errorf("size %d exceeds want %d", s, test.wantSize) + } + if s == test.wantSize { + hitSize = true + } + time.Sleep(5 * time.Millisecond) // Let other goroutines make progress. + if atomic.AddInt64(&curCount, -1) < 0 { + return errors.New("negative count") + } + if atomic.AddInt64(&curSize, -int64(test.acquireSize)) < 0 { + return errors.New("negative size") + } + fc.release(test.acquireSize) + } + return success + }) + } + if err := g.Wait(); err != success { + t.Errorf("%+v: %v", test, err) + continue + } + } +} + +func TestFlowControllerTryAcquire(t *testing.T) { + fc := newFlowController(3, 10) + + // Successfully tryAcquire 4 bytes. + if !fc.tryAcquire(4) { + t.Error("got false, wanted true") + } + + // Fail to tryAcquire 7 bytes. + if fc.tryAcquire(7) { + t.Error("got true, wanted false") + } + + // Successfully tryAcquire 6 byte. + if !fc.tryAcquire(6) { + t.Error("got false, wanted true") + } +} + +func TestFlowControllerUnboundedCount(t *testing.T) { + ctx := context.Background() + fc := newFlowController(0, 10) + + // Successfully acquire 4 bytes. + if err := fc.acquire(ctx, 4); err != nil { + t.Errorf("got %v, wanted no error", err) + } + + // Successfully tryAcquire 4 bytes. + if !fc.tryAcquire(4) { + t.Error("got false, wanted true") + } + + // Fail to tryAcquire 3 bytes. + if fc.tryAcquire(3) { + t.Error("got true, wanted false") + } +} + +func TestFlowControllerUnboundedBytes(t *testing.T) { + ctx := context.Background() + fc := newFlowController(2, 0) + + // Successfully acquire 4GB. + if err := fc.acquire(ctx, 4e9); err != nil { + t.Errorf("got %v, wanted no error", err) + } + + // Successfully tryAcquire 4GB bytes. + if !fc.tryAcquire(4e9) { + t.Error("got false, wanted true") + } + + // Fail to tryAcquire a third message. + if fc.tryAcquire(3) { + t.Error("got true, wanted false") + } +} diff --git a/vendor/cloud.google.com/go/pubsub/go18.go b/vendor/cloud.google.com/go/pubsub/go18.go new file mode 100644 index 0000000..7a72294 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/go18.go @@ -0,0 +1,150 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package pubsub + +import ( + "log" + "sync" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func openCensusOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), + } +} + +var subscriptionKey tag.Key + +func init() { + var err error + if subscriptionKey, err = tag.NewKey("subscription"); err != nil { + log.Fatal("cannot create 'subscription' key") + } +} + +const statsPrefix = "cloud.google.com/go/pubsub/" + +var ( + // PullCount is a measure of the number of messages pulled. + // It is EXPERIMENTAL and subject to change or removal without notice. + PullCount = stats.Int64(statsPrefix+"pull_count", "Number of PubSub messages pulled", stats.UnitNone) + + // AckCount is a measure of the number of messages acked. + // It is EXPERIMENTAL and subject to change or removal without notice. + AckCount = stats.Int64(statsPrefix+"ack_count", "Number of PubSub messages acked", stats.UnitNone) + + // NackCount is a measure of the number of messages nacked. + // It is EXPERIMENTAL and subject to change or removal without notice. + NackCount = stats.Int64(statsPrefix+"nack_count", "Number of PubSub messages nacked", stats.UnitNone) + + // ModAckCount is a measure of the number of messages whose ack-deadline was modified. + // It is EXPERIMENTAL and subject to change or removal without notice. + ModAckCount = stats.Int64(statsPrefix+"mod_ack_count", "Number of ack-deadlines modified", stats.UnitNone) + + // StreamOpenCount is a measure of the number of times a streaming-pull stream was opened. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamOpenCount = stats.Int64(statsPrefix+"stream_open_count", "Number of calls opening a new streaming pull", stats.UnitNone) + + // StreamRetryCount is a measure of the number of times a streaming-pull operation was retried. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRetryCount = stats.Int64(statsPrefix+"stream_retry_count", "Number of retries of a stream send or receive", stats.UnitNone) + + // StreamRequestCount is a measure of the number of requests sent on a streaming-pull stream. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRequestCount = stats.Int64(statsPrefix+"stream_request_count", "Number gRPC StreamingPull request messages sent", stats.UnitNone) + + // StreamResponseCount is a measure of the number of responses received on a streaming-pull stream. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamResponseCount = stats.Int64(statsPrefix+"stream_response_count", "Number of gRPC StreamingPull response messages received", stats.UnitNone) + + // PullCountView is a cumulative sum of PullCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + PullCountView *view.View + + // AckCountView is a cumulative sum of AckCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + AckCountView *view.View + + // NackCountView is a cumulative sum of NackCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + NackCountView *view.View + + // ModAckCountView is a cumulative sum of ModAckCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + ModAckCountView *view.View + + // StreamOpenCountView is a cumulative sum of StreamOpenCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamOpenCountView *view.View + + // StreamRetryCountView is a cumulative sum of StreamRetryCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRetryCountView *view.View + + // StreamRequestCountView is a cumulative sum of StreamRequestCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRequestCountView *view.View + + // StreamResponseCountView is a cumulative sum of StreamResponseCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamResponseCountView *view.View +) + +func init() { + PullCountView = countView(PullCount) + AckCountView = countView(AckCount) + NackCountView = countView(NackCount) + ModAckCountView = countView(ModAckCount) + StreamOpenCountView = countView(StreamOpenCount) + StreamRetryCountView = countView(StreamRetryCount) + StreamRequestCountView = countView(StreamRequestCount) + StreamResponseCountView = countView(StreamResponseCount) +} + +func countView(m *stats.Int64Measure) *view.View { + return &view.View{ + Name: m.Name(), + Description: m.Description(), + TagKeys: []tag.Key{subscriptionKey}, + Measure: m, + Aggregation: view.Sum(), + } +} + +var logOnce sync.Once + +func withSubscriptionKey(ctx context.Context, subName string) context.Context { + ctx, err := tag.New(ctx, tag.Upsert(subscriptionKey, subName)) + if err != nil { + logOnce.Do(func() { + log.Printf("pubsub: error creating tag map: %v", err) + }) + } + return ctx +} + +func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) { + stats.Record(ctx, m.M(n)) +} diff --git a/vendor/cloud.google.com/go/pubsub/integration_test.go b/vendor/cloud.google.com/go/pubsub/integration_test.go new file mode 100644 index 0000000..7b1a924 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/integration_test.go @@ -0,0 +1,448 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "testing" + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + topicIDs = testutil.NewUIDSpace("topic") + subIDs = testutil.NewUIDSpace("sub") +) + +// messageData is used to hold the contents of a message so that it can be compared against the contents +// of another message without regard to irrelevant fields. +type messageData struct { + ID string + Data []byte + Attributes map[string]string +} + +func extractMessageData(m *Message) *messageData { + return &messageData{ + ID: m.ID, + Data: m.Data, + Attributes: m.Attributes, + } +} + +func integrationTestClient(t *testing.T, ctx context.Context) *Client { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + projID := testutil.ProjID() + if projID == "" { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, projID, option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("Creating client error: %v", err) + } + return client +} + +func TestAll(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := integrationTestClient(t, ctx) + defer client.Close() + + topic, err := client.CreateTopic(ctx, topicIDs.New()) + if err != nil { + t.Errorf("CreateTopic error: %v", err) + } + defer topic.Stop() + + var sub *Subscription + if sub, err = client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{Topic: topic}); err != nil { + t.Errorf("CreateSub error: %v", err) + } + + exists, err := topic.Exists(ctx) + if err != nil { + t.Fatalf("TopicExists error: %v", err) + } + if !exists { + t.Errorf("topic %v should exist, but it doesn't", topic) + } + + exists, err = sub.Exists(ctx) + if err != nil { + t.Fatalf("SubExists error: %v", err) + } + if !exists { + t.Errorf("subscription %s should exist, but it doesn't", sub.ID()) + } + + var msgs []*Message + for i := 0; i < 10; i++ { + text := fmt.Sprintf("a message with an index %d", i) + attrs := make(map[string]string) + attrs["foo"] = "bar" + msgs = append(msgs, &Message{ + Data: []byte(text), + Attributes: attrs, + }) + } + + // Publish the messages. + type pubResult struct { + m *Message + r *PublishResult + } + var rs []pubResult + for _, m := range msgs { + r := topic.Publish(ctx, m) + rs = append(rs, pubResult{m, r}) + } + want := make(map[string]*messageData) + for _, res := range rs { + id, err := res.r.Get(ctx) + if err != nil { + t.Fatal(err) + } + md := extractMessageData(res.m) + md.ID = id + want[md.ID] = md + } + + // Use a timeout to ensure that Pull does not block indefinitely if there are unexpectedly few messages available. + timeoutCtx, _ := context.WithTimeout(ctx, time.Minute) + gotMsgs, err := pullN(timeoutCtx, sub, len(want), func(ctx context.Context, m *Message) { + m.Ack() + }) + if err != nil { + t.Fatalf("Pull: %v", err) + } + got := make(map[string]*messageData) + for _, m := range gotMsgs { + md := extractMessageData(m) + got[md.ID] = md + } + if !testutil.Equal(got, want) { + t.Errorf("messages: got: %v ; want: %v", got, want) + } + + if msg, ok := testIAM(ctx, topic.IAM(), "pubsub.topics.get"); !ok { + t.Errorf("topic IAM: %s", msg) + } + if msg, ok := testIAM(ctx, sub.IAM(), "pubsub.subscriptions.get"); !ok { + t.Errorf("sub IAM: %s", msg) + } + + snap, err := sub.CreateSnapshot(ctx, "") + if err != nil { + t.Fatalf("CreateSnapshot error: %v", err) + } + + timeoutCtx, _ = context.WithTimeout(ctx, time.Minute) + err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { + snapIt := client.Snapshots(timeoutCtx) + for { + s, err := snapIt.Next() + if err == nil && s.name == snap.name { + return true, nil + } + if err == iterator.Done { + return false, fmt.Errorf("cannot find snapshot: %q", snap.name) + } + if err != nil { + return false, err + } + } + }) + if err != nil { + t.Error(err) + } + + err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { + err := sub.SeekToSnapshot(timeoutCtx, snap.Snapshot) + return err == nil, err + }) + if err != nil { + t.Error(err) + } + + err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { + err := sub.SeekToTime(timeoutCtx, time.Now()) + return err == nil, err + }) + if err != nil { + t.Error(err) + } + + err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { + snapHandle := client.Snapshot(snap.ID()) + err := snapHandle.Delete(timeoutCtx) + return err == nil, err + }) + if err != nil { + t.Error(err) + } + + if err := sub.Delete(ctx); err != nil { + t.Errorf("DeleteSub error: %v", err) + } + + if err := topic.Delete(ctx); err != nil { + t.Errorf("DeleteTopic error: %v", err) + } +} + +// IAM tests. +// NOTE: for these to succeed, the test runner identity must have the Pub/Sub Admin or Owner roles. +// To set, visit https://console.developers.google.com, select "IAM & Admin" from the top-left +// menu, choose the account, click the Roles dropdown, and select "Pub/Sub > Pub/Sub Admin". +// TODO(jba): move this to a testing package within cloud.google.com/iam, so we can re-use it. +func testIAM(ctx context.Context, h *iam.Handle, permission string) (msg string, ok bool) { + // Attempting to add an non-existent identity (e.g. "alice@example.com") causes the service + // to return an internal error, so use a real identity. + const member = "domain:google.com" + + var policy *iam.Policy + var err error + + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + // The resource is new, so the policy should be empty. + if got := policy.Roles(); len(got) > 0 { + return fmt.Sprintf("initially: got roles %v, want none", got), false + } + // Add a member, set the policy, then check that the member is present. + policy.Add(member, iam.Viewer) + if err := h.SetPolicy(ctx, policy); err != nil { + return fmt.Sprintf("SetPolicy: %v", err), false + } + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + if got, want := policy.Members(iam.Viewer), []string{member}; !testutil.Equal(got, want) { + return fmt.Sprintf("after Add: got %v, want %v", got, want), false + } + // Now remove that member, set the policy, and check that it's empty again. + policy.Remove(member, iam.Viewer) + if err := h.SetPolicy(ctx, policy); err != nil { + return fmt.Sprintf("SetPolicy: %v", err), false + } + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + if got := policy.Roles(); len(got) > 0 { + return fmt.Sprintf("after Remove: got roles %v, want none", got), false + } + // Call TestPermissions. + // Because this user is an admin, it has all the permissions on the + // resource type. Note: the service fails if we ask for inapplicable + // permissions (e.g. a subscription permission on a topic, or a topic + // create permission on a topic rather than its parent). + wantPerms := []string{permission} + gotPerms, err := h.TestPermissions(ctx, wantPerms) + if err != nil { + return fmt.Sprintf("TestPermissions: %v", err), false + } + if !testutil.Equal(gotPerms, wantPerms) { + return fmt.Sprintf("TestPermissions: got %v, want %v", gotPerms, wantPerms), false + } + return "", true +} + +func TestSubscriptionUpdate(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := integrationTestClient(t, ctx) + defer client.Close() + + topic, err := client.CreateTopic(ctx, topicIDs.New()) + if err != nil { + t.Fatalf("CreateTopic error: %v", err) + } + defer topic.Stop() + defer topic.Delete(ctx) + + var sub *Subscription + if sub, err = client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{Topic: topic}); err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer sub.Delete(ctx) + + got, err := sub.Config(ctx) + if err != nil { + t.Fatal(err) + } + want := SubscriptionConfig{ + Topic: topic, + AckDeadline: 10 * time.Second, + RetainAckedMessages: false, + RetentionDuration: defaultRetentionDuration, + } + if !testutil.Equal(got, want) { + t.Fatalf("\ngot %+v\nwant %+v", got, want) + } + // Add a PushConfig and change other fields. + projID := testutil.ProjID() + pc := PushConfig{ + Endpoint: "https://" + projID + ".appspot.com/_ah/push-handlers/push", + Attributes: map[string]string{"x-goog-version": "v1"}, + } + got, err = sub.Update(ctx, SubscriptionConfigToUpdate{ + PushConfig: &pc, + AckDeadline: 2 * time.Minute, + RetainAckedMessages: true, + RetentionDuration: 2 * time.Hour, + }) + if err != nil { + t.Fatal(err) + } + want = SubscriptionConfig{ + Topic: topic, + PushConfig: pc, + AckDeadline: 2 * time.Minute, + RetainAckedMessages: true, + RetentionDuration: 2 * time.Hour, + } + if !testutil.Equal(got, want) { + t.Fatalf("\ngot %+v\nwant %+v", got, want) + } + // Remove the PushConfig, turning the subscription back into pull mode. + // Change AckDeadline, but nothing else. + pc = PushConfig{} + got, err = sub.Update(ctx, SubscriptionConfigToUpdate{ + PushConfig: &pc, + AckDeadline: 30 * time.Second, + }) + if err != nil { + t.Fatal(err) + } + want.PushConfig = pc + want.AckDeadline = 30 * time.Second + // service issue: PushConfig attributes are not removed. + // TODO(jba): remove when issue resolved. + want.PushConfig.Attributes = map[string]string{"x-goog-version": "v1"} + if !testutil.Equal(got, want) { + t.Fatalf("\ngot %+v\nwant %+v", got, want) + } + // If nothing changes, our client returns an error. + _, err = sub.Update(ctx, SubscriptionConfigToUpdate{}) + if err == nil { + t.Fatal("got nil, wanted error") + } +} + +func TestPublicTopic(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := integrationTestClient(t, ctx) + defer client.Close() + + sub, err := client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{ + Topic: client.TopicInProject("taxirides-realtime", "pubsub-public-data"), + }) + if err != nil { + t.Fatal(err) + } + defer sub.Delete(ctx) + // Confirm that Receive works. It doesn't matter if we actually get any + // messages. + ctxt, cancel := context.WithTimeout(ctx, 5*time.Second) + err = sub.Receive(ctxt, func(_ context.Context, msg *Message) { + msg.Ack() + cancel() + }) + if err != nil { + t.Fatal(err) + } +} + +func TestIntegration_Errors(t *testing.T) { + // Test various edge conditions. + t.Parallel() + ctx := context.Background() + client := integrationTestClient(t, ctx) + defer client.Close() + + topic, err := client.CreateTopic(ctx, topicIDs.New()) + if err != nil { + t.Fatalf("CreateTopic error: %v", err) + } + defer topic.Stop() + defer topic.Delete(ctx) + + // Out-of-range retention duration. + sub, err := client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{ + Topic: topic, + RetentionDuration: 1 * time.Second, + }) + if want := codes.InvalidArgument; grpc.Code(err) != want { + t.Errorf("got <%v>, want %s", err, want) + } + if err == nil { + sub.Delete(ctx) + } + + // Ack deadline less than minimum. + sub, err = client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{ + Topic: topic, + AckDeadline: 5 * time.Second, + }) + if want := codes.Unknown; grpc.Code(err) != want { + t.Errorf("got <%v>, want %s", err, want) + } + if err == nil { + sub.Delete(ctx) + } + + // Updating a non-existent subscription. + sub = client.Subscription(subIDs.New()) + _, err = sub.Update(ctx, SubscriptionConfigToUpdate{AckDeadline: 20 * time.Second}) + if want := codes.NotFound; grpc.Code(err) != want { + t.Errorf("got <%v>, want %s", err, want) + } + // Deleting a non-existent subscription. + err = sub.Delete(ctx) + if want := codes.NotFound; grpc.Code(err) != want { + t.Errorf("got <%v>, want %s", err, want) + } + + // Updating out-of-range retention duration. + sub, err = client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{Topic: topic}) + if err != nil { + t.Fatal(err) + } + defer sub.Delete(ctx) + _, err = sub.Update(ctx, SubscriptionConfigToUpdate{RetentionDuration: 1000 * time.Hour}) + if want := codes.InvalidArgument; grpc.Code(err) != want { + t.Errorf("got <%v>, want %s", err, want) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go new file mode 100644 index 0000000..c13fd63 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go @@ -0,0 +1,70 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package distribution + +import ( + "log" + "math" + "sort" + "sync/atomic" +) + +// D is a distribution. Methods of D can be called concurrently by multiple +// goroutines. +type D struct { + buckets []uint64 +} + +// New creates a new distribution capable of holding values from 0 to n-1. +func New(n int) *D { + return &D{ + buckets: make([]uint64, n), + } +} + +// Record records value v to the distribution. +// To help with distributions with long tails, if v is larger than the maximum value, +// Record records the maximum value instead. +// If v is negative, Record panics. +func (d *D) Record(v int) { + if v < 0 { + log.Panicf("Record: value out of range: %d", v) + } else if v >= len(d.buckets) { + v = len(d.buckets) - 1 + } + atomic.AddUint64(&d.buckets[v], 1) +} + +// Percentile computes the p-th percentile of the distribution where +// p is between 0 and 1. +func (d *D) Percentile(p float64) int { + // NOTE: This implementation uses the nearest-rank method. + // https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method + + if p < 0 || p > 1 { + log.Panicf("Percentile: percentile out of range: %f", p) + } + + bucketSums := make([]uint64, len(d.buckets)) + var sum uint64 + for i := range bucketSums { + sum += atomic.LoadUint64(&d.buckets[i]) + bucketSums[i] = sum + } + + total := bucketSums[len(bucketSums)-1] + target := uint64(math.Ceil(float64(total) * p)) + return sort.Search(len(bucketSums), func(i int) bool { return bucketSums[i] >= target }) +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution_test.go b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution_test.go new file mode 100644 index 0000000..552dd90 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution_test.go @@ -0,0 +1,94 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package distribution + +import ( + "sync" + "testing" +) + +func TestDistribution(t *testing.T) { + // These tests come from examples in https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method + tests := []struct { + // values in distribution + vals []int + + // percentiles and expected percentile values + pp []float64 + vv []int + }{ + { + vals: []int{15, 20, 35, 40, 50}, + pp: []float64{0.05, 0.3, 0.4, 0.5, 1}, + vv: []int{15, 20, 20, 35, 50}, + }, + { + vals: []int{3, 6, 7, 8, 8, 10, 13, 15, 16, 20}, + pp: []float64{0.25, 0.5, 0.75, 1}, + vv: []int{7, 8, 15, 20}, + }, + { + vals: []int{3, 6, 7, 8, 8, 9, 10, 13, 15, 16, 20}, + pp: []float64{0.25, 0.5, 0.75, 1}, + vv: []int{7, 9, 15, 20}, + }, + } + + maxVal := 0 + for _, tst := range tests { + for _, v := range tst.vals { + if maxVal < v { + maxVal = v + } + } + } + + for _, tst := range tests { + d := New(maxVal + 1) + for _, v := range tst.vals { + d.Record(v) + } + for i, p := range tst.pp { + got, want := d.Percentile(p), tst.vv[i] + if got != want { + t.Errorf("d=%v, d.Percentile(%f)=%d, want %d", d, p, got, want) + } + } + } +} + +func TestRace(t *testing.T) { + const N int = 1e3 + const parallel = 2 + + d := New(N) + + var wg sync.WaitGroup + wg.Add(parallel) + for i := 0; i < parallel; i++ { + go func() { + for i := 0; i < N; i++ { + d.Record(i) + } + wg.Done() + }() + } + + for i := 0; i < N; i++ { + if p := d.Percentile(0.5); p > N { + t.Fatalf("d.Percentile(0.5)=%d, expected to be at most %d", p, N) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go new file mode 100644 index 0000000..78934e2 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -0,0 +1,294 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + "time" + + vkit "cloud.google.com/go/pubsub/apiv1" + "cloud.google.com/go/pubsub/internal/distribution" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// newMessageIterator starts a new streamingMessageIterator. Stop must be called on the messageIterator +// when it is no longer needed. +// subName is the full name of the subscription to pull messages from. +// ctx is the context to use for acking messages and extending message deadlines. +func newMessageIterator(ctx context.Context, subc *vkit.SubscriberClient, subName string, po *pullOptions) *streamingMessageIterator { + ps := newPullStream(ctx, subc, subName, int32(po.ackDeadline.Seconds())) + return newStreamingMessageIterator(ctx, ps, po) +} + +type streamingMessageIterator struct { + ctx context.Context + po *pullOptions + ps *pullStream + kaTicker *time.Ticker // keep-alive (deadline extensions) + ackTicker *time.Ticker // message acks + nackTicker *time.Ticker // message nacks (more frequent than acks) + failed chan struct{} // closed on stream error + stopped chan struct{} // closed when Stop is called + drained chan struct{} // closed when stopped && no more pending messages + wg sync.WaitGroup + + mu sync.Mutex + ackTimeDist *distribution.D + keepAliveDeadlines map[string]time.Time + pendingReq *pb.StreamingPullRequest + pendingModAcks map[string]int32 // ack IDs whose ack deadline is to be modified + err error // error from stream failure +} + +func newStreamingMessageIterator(ctx context.Context, ps *pullStream, po *pullOptions) *streamingMessageIterator { + // TODO: make kaTicker frequency more configurable. (ackDeadline - 5s) is a + // reasonable default for now, because the minimum ack period is 10s. This + // gives us 5s grace. + keepAlivePeriod := po.ackDeadline - 5*time.Second + kaTicker := time.NewTicker(keepAlivePeriod) + + // Ack promptly so users don't lose work if client crashes. + ackTicker := time.NewTicker(100 * time.Millisecond) + nackTicker := time.NewTicker(100 * time.Millisecond) + it := &streamingMessageIterator{ + ctx: ctx, + ps: ps, + po: po, + kaTicker: kaTicker, + ackTicker: ackTicker, + nackTicker: nackTicker, + failed: make(chan struct{}), + stopped: make(chan struct{}), + drained: make(chan struct{}), + ackTimeDist: distribution.New(int(maxAckDeadline/time.Second) + 1), + keepAliveDeadlines: map[string]time.Time{}, + pendingReq: &pb.StreamingPullRequest{}, + pendingModAcks: map[string]int32{}, + } + it.wg.Add(1) + go it.sender() + return it +} + +// Subscription.receive will call stop on its messageIterator when finished with it. +// Stop will block until Done has been called on all Messages that have been +// returned by Next, or until the context with which the messageIterator was created +// is cancelled or exceeds its deadline. +func (it *streamingMessageIterator) stop() { + it.mu.Lock() + select { + case <-it.stopped: + default: + close(it.stopped) + } + it.checkDrained() + it.mu.Unlock() + it.wg.Wait() +} + +// checkDrained closes the drained channel if the iterator has been stopped and all +// pending messages have either been n/acked or expired. +// +// Called with the lock held. +func (it *streamingMessageIterator) checkDrained() { + select { + case <-it.drained: + return + default: + } + select { + case <-it.stopped: + if len(it.keepAliveDeadlines) == 0 { + close(it.drained) + } + default: + } +} + +// Called when a message is acked/nacked. +func (it *streamingMessageIterator) done(ackID string, ack bool, receiveTime time.Time) { + it.ackTimeDist.Record(int(time.Since(receiveTime) / time.Second)) + it.mu.Lock() + defer it.mu.Unlock() + delete(it.keepAliveDeadlines, ackID) + if ack { + it.pendingReq.AckIds = append(it.pendingReq.AckIds, ackID) + } else { + it.pendingModAcks[ackID] = 0 // Nack indicated by modifying the deadline to zero. + } + it.checkDrained() +} + +// fail is called when a stream method returns a permanent error. +func (it *streamingMessageIterator) fail(err error) { + it.mu.Lock() + if it.err == nil { + it.err = err + close(it.failed) + } + it.mu.Unlock() +} + +// receive makes a call to the stream's Recv method and returns +// its messages. +func (it *streamingMessageIterator) receive() ([]*Message, error) { + // Stop retrieving messages if the context is done, the stream + // failed, or the iterator's Stop method was called. + select { + case <-it.ctx.Done(): + return nil, it.ctx.Err() + default: + } + it.mu.Lock() + err := it.err + it.mu.Unlock() + if err != nil { + return nil, err + } + // Receive messages from stream. This may block indefinitely. + res, err := it.ps.Recv() + // The pullStream handles retries, so any error here is fatal. + if err != nil { + it.fail(err) + return nil, err + } + msgs, err := convertMessages(res.ReceivedMessages) + if err != nil { + it.fail(err) + return nil, err + } + + // We received some messages. Remember them so we can keep them alive. Also, + // arrange for a receipt mod-ack (which will occur at the next firing of + // nackTicker). + maxExt := time.Now().Add(it.po.maxExtension) + deadline := trunc32(int64(it.po.ackDeadline.Seconds())) + it.mu.Lock() + now := time.Now() + for _, m := range msgs { + m.receiveTime = now + m.doneFunc = it.done + it.keepAliveDeadlines[m.ackID] = maxExt + // The receipt mod-ack uses the subscription's configured ack deadline. Don't + // change the mod-ack if one is already pending. This is possible if there + // are retries. + if _, ok := it.pendingModAcks[m.ackID]; !ok { + it.pendingModAcks[m.ackID] = deadline + } + } + it.mu.Unlock() + return msgs, nil +} + +// sender runs in a goroutine and handles all sends to the stream. +func (it *streamingMessageIterator) sender() { + defer it.wg.Done() + defer it.kaTicker.Stop() + defer it.ackTicker.Stop() + defer it.nackTicker.Stop() + defer it.ps.CloseSend() + + done := false + for !done { + send := false + select { + case <-it.ctx.Done(): + // Context canceled or timed out: stop immediately, without + // another RPC. + return + + case <-it.failed: + // Stream failed: nothing to do, so stop immediately. + return + + case <-it.drained: + // All outstanding messages have been marked done: + // nothing left to do except send the final request. + it.mu.Lock() + send = (len(it.pendingReq.AckIds) > 0 || len(it.pendingModAcks) > 0) + done = true + + case <-it.kaTicker.C: + it.mu.Lock() + it.handleKeepAlives() + send = (len(it.pendingModAcks) > 0) + + case <-it.nackTicker.C: + it.mu.Lock() + send = (len(it.pendingModAcks) > 0) + + case <-it.ackTicker.C: + it.mu.Lock() + send = (len(it.pendingReq.AckIds) > 0) + } + // Lock is held here. + if send { + req := it.pendingReq + it.pendingReq = &pb.StreamingPullRequest{} + modAcks := it.pendingModAcks + it.pendingModAcks = map[string]int32{} + it.mu.Unlock() + for id, s := range modAcks { + req.ModifyDeadlineAckIds = append(req.ModifyDeadlineAckIds, id) + req.ModifyDeadlineSeconds = append(req.ModifyDeadlineSeconds, s) + } + err := it.send(req) + if err != nil { + // The streamingPuller handles retries, so any error here + // is fatal to the iterator. + it.fail(err) + return + } + } else { + it.mu.Unlock() + } + } +} + +func (it *streamingMessageIterator) send(req *pb.StreamingPullRequest) error { + // Note: len(modAckIDs) == len(modSecs) + var rest *pb.StreamingPullRequest + for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 { + req, rest = splitRequest(req, maxPayload) + if err := it.ps.Send(req); err != nil { + return err + } + req = rest + } + return nil +} + +// handleKeepAlives modifies the pending request to include deadline extensions +// for live messages. It also purges expired messages. +// +// Called with the lock held. +func (it *streamingMessageIterator) handleKeepAlives() { + now := time.Now() + dl := trunc32(int64(it.po.ackDeadline.Seconds())) + for id, expiry := range it.keepAliveDeadlines { + if expiry.Before(now) { + // This delete will not result in skipping any map items, as implied by + // the spec at https://golang.org/ref/spec#For_statements, "For + // statements with range clause", note 3, and stated explicitly at + // https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ. + delete(it.keepAliveDeadlines, id) + } else { + // This will not overwrite a nack, because nacking removes the ID from keepAliveDeadlines. + it.pendingModAcks[id] = dl + } + } + it.checkDrained() +} diff --git a/vendor/cloud.google.com/go/pubsub/loadtest/benchmark_test.go b/vendor/cloud.google.com/go/pubsub/loadtest/benchmark_test.go new file mode 100644 index 0000000..ff695ea --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/loadtest/benchmark_test.go @@ -0,0 +1,176 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loadtest + +// Performance benchmarks for pubsub. +// Run with +// go test -bench . -cpu 1 + +import ( + "log" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/pubsub" + gtransport "google.golang.org/api/transport/grpc" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// These constants are designed to match the "throughput" test in +// https://github.com/GoogleCloudPlatform/pubsub/blob/master/load-test-framework/run.py +// and +// https://github.com/GoogleCloudPlatform/pubsub/blob/master/load-test-framework/src/main/java/com/google/pubsub/clients/experimental/CPSPublisherTask.java + +const ( + nMessages = 1e5 + messageSize = 10000 // size of msg data in bytes + batchSize = 10 + batchDuration = 50 * time.Millisecond + serverDelay = 200 * time.Millisecond + maxOutstandingPublishes = 1600 // max_outstanding_messages in run.py +) + +func BenchmarkPublishThroughput(b *testing.B) { + b.SetBytes(nMessages * messageSize) + client := perfClient(serverDelay, 1, b) + + lts := &PubServer{ID: "xxx"} + lts.init(client, "t", messageSize, batchSize, batchDuration) + b.ResetTimer() + for i := 0; i < b.N; i++ { + runOnce(lts) + } +} + +func runOnce(lts *PubServer) { + nRequests := int64(nMessages / batchSize) + var nPublished int64 + var wg sync.WaitGroup + // The Java loadtest framework is rate-limited to 1 billion Execute calls a + // second (each Execute call corresponding to a publishBatch call here), + // but we can ignore this because of the following. + // The framework runs 10,000 threads, each calling Execute in a loop, but + // we can ignore this too. + // The framework caps the number of outstanding calls to Execute at + // maxOutstandingPublishes. That is what we simulate here. + for i := 0; i < maxOutstandingPublishes; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for atomic.AddInt64(&nRequests, -1) >= 0 { + latencies, err := lts.publishBatch() + if err != nil { + log.Fatalf("publishBatch: %v", err) + } + atomic.AddInt64(&nPublished, int64(len(latencies))) + } + }() + } + wg.Wait() + sent := atomic.LoadInt64(&nPublished) + if sent != nMessages { + log.Fatalf("sent %d messages, expected %d", sent, int(nMessages)) + } +} + +func perfClient(pubDelay time.Duration, nConns int, f interface { + Fatal(...interface{}) +}) *pubsub.Client { + ctx := context.Background() + srv, err := newPerfServer(pubDelay) + if err != nil { + f.Fatal(err) + } + conn, err := gtransport.DialInsecure(ctx, + option.WithEndpoint(srv.Addr), + option.WithGRPCConnectionPool(nConns), + + // TODO(grpc/grpc-go#1388) using connection pool without WithBlock + // can cause RPCs to fail randomly. We can delete this after the issue is fixed. + option.WithGRPCDialOption(grpc.WithBlock())) + if err != nil { + f.Fatal(err) + } + client, err := pubsub.NewClient(ctx, "projectID", option.WithGRPCConn(conn)) + if err != nil { + f.Fatal(err) + } + return client +} + +type perfServer struct { + pb.PublisherServer + pb.SubscriberServer + + Addr string + pubDelay time.Duration + + mu sync.Mutex + activePubs int + maxActivePubs int +} + +func newPerfServer(pubDelay time.Duration) (*perfServer, error) { + srv, err := testutil.NewServer(grpc.MaxMsgSize(pubsub.MaxPublishRequestBytes)) + if err != nil { + return nil, err + } + perf := &perfServer{Addr: srv.Addr, pubDelay: pubDelay} + pb.RegisterPublisherServer(srv.Gsrv, perf) + pb.RegisterSubscriberServer(srv.Gsrv, perf) + srv.Start() + return perf, nil +} + +var doLog = false + +func (p *perfServer) incActivePubs(n int) (int, bool) { + p.mu.Lock() + defer p.mu.Unlock() + p.activePubs += n + newMax := false + if p.activePubs > p.maxActivePubs { + p.maxActivePubs = p.activePubs + newMax = true + } + return p.activePubs, newMax +} + +func (p *perfServer) Publish(ctx context.Context, req *pb.PublishRequest) (*pb.PublishResponse, error) { + a, newMax := p.incActivePubs(1) + defer p.incActivePubs(-1) + if newMax && doLog { + log.Printf("max %d active publish calls", a) + } + if doLog { + log.Printf("%p -> Publish %d", p, len(req.Messages)) + } + res := &pb.PublishResponse{MessageIds: make([]string, len(req.Messages))} + for i := range res.MessageIds { + res.MessageIds[i] = "x" + } + time.Sleep(p.pubDelay) + if doLog { + log.Printf("%p <- Publish %d", p, len(req.Messages)) + } + return res, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/loadtest/cmd/loadtest.go b/vendor/cloud.google.com/go/pubsub/loadtest/cmd/loadtest.go new file mode 100644 index 0000000..fa75eb3 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/loadtest/cmd/loadtest.go @@ -0,0 +1,54 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "fmt" + "log" + "net" + "strconv" + + "math/rand" + + "cloud.google.com/go/pubsub/loadtest" + pb "cloud.google.com/go/pubsub/loadtest/pb" + "google.golang.org/grpc" +) + +func main() { + port := flag.Uint("worker_port", 6000, "port to bind worker to") + role := flag.String("r", "", "role: pub/sub") + flag.Parse() + + var lts pb.LoadtestWorkerServer + switch *role { + case "pub": + lts = &loadtest.PubServer{ID: strconv.Itoa(rand.Int())} + case "sub": + lts = &loadtest.SubServer{} + default: + log.Fatalf("unknown role: %q", *role) + } + + serv := grpc.NewServer() + pb.RegisterLoadtestWorkerServer(serv, lts) + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + serv.Serve(lis) +} diff --git a/vendor/cloud.google.com/go/pubsub/loadtest/loadtest.go b/vendor/cloud.google.com/go/pubsub/loadtest/loadtest.go new file mode 100644 index 0000000..19a7a60 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/loadtest/loadtest.go @@ -0,0 +1,215 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loadtest implements load testing for pubsub, +// following the interface defined in https://github.com/GoogleCloudPlatform/pubsub/tree/master/load-test-framework/ . +// +// This package is experimental. +package loadtest + +import ( + "bytes" + "errors" + "log" + "runtime" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/time/rate" + + "github.com/golang/protobuf/ptypes" + + "cloud.google.com/go/pubsub" + pb "cloud.google.com/go/pubsub/loadtest/pb" +) + +type pubServerConfig struct { + topic *pubsub.Topic + msgData []byte + batchSize int32 +} + +type PubServer struct { + ID string + + cfg atomic.Value + seqNum int32 +} + +func (l *PubServer) Start(ctx context.Context, req *pb.StartRequest) (*pb.StartResponse, error) { + log.Println("received start") + c, err := pubsub.NewClient(ctx, req.Project) + if err != nil { + return nil, err + } + dur, err := ptypes.Duration(req.PublishBatchDuration) + if err != nil { + return nil, err + } + l.init(c, req.Topic, req.MessageSize, req.PublishBatchSize, dur) + log.Println("started") + return &pb.StartResponse{}, nil +} + +func (l *PubServer) init(c *pubsub.Client, topicName string, msgSize, batchSize int32, batchDur time.Duration) { + topic := c.Topic(topicName) + topic.PublishSettings = pubsub.PublishSettings{ + DelayThreshold: batchDur, + CountThreshold: 950, + ByteThreshold: 9500000, + } + + l.cfg.Store(pubServerConfig{ + topic: topic, + msgData: bytes.Repeat([]byte{'A'}, int(msgSize)), + batchSize: batchSize, + }) +} + +func (l *PubServer) Execute(ctx context.Context, _ *pb.ExecuteRequest) (*pb.ExecuteResponse, error) { + latencies, err := l.publishBatch() + if err != nil { + log.Printf("error: %v", err) + return nil, err + } + return &pb.ExecuteResponse{Latencies: latencies}, nil +} + +func (l *PubServer) publishBatch() ([]int64, error) { + var cfg pubServerConfig + if c, ok := l.cfg.Load().(pubServerConfig); ok { + cfg = c + } else { + return nil, errors.New("config not loaded") + } + + start := time.Now() + latencies := make([]int64, cfg.batchSize) + startStr := strconv.FormatInt(start.UnixNano()/1e6, 10) + seqNum := atomic.AddInt32(&l.seqNum, cfg.batchSize) - cfg.batchSize + + rs := make([]*pubsub.PublishResult, cfg.batchSize) + for i := int32(0); i < cfg.batchSize; i++ { + rs[i] = cfg.topic.Publish(context.TODO(), &pubsub.Message{ + Data: cfg.msgData, + Attributes: map[string]string{ + "sendTime": startStr, + "clientId": l.ID, + "sequenceNumber": strconv.Itoa(int(seqNum + i)), + }, + }) + } + for i, r := range rs { + _, err := r.Get(context.Background()) + if err != nil { + return nil, err + } + // TODO(jba,pongad): fix latencies + // Later values will be skewed by earlier ones, since we wait for the + // results in order. (On the other hand, it may not matter much, since + // messages are added to bundles in order and bundles get sent more or + // less in order.) If we want more accurate values, we can either start + // a goroutine for each result (similar to the original code using a + // callback), or call reflect.Select with the Ready channels of the + // results. + latencies[i] = time.Since(start).Nanoseconds() / 1e6 + } + return latencies, nil +} + +type SubServer struct { + lim *rate.Limiter + + mu sync.Mutex + idents []*pb.MessageIdentifier + latencies []int64 +} + +func (s *SubServer) Start(ctx context.Context, req *pb.StartRequest) (*pb.StartResponse, error) { + log.Println("received start") + s.lim = rate.NewLimiter(rate.Every(time.Second), 1) + + c, err := pubsub.NewClient(ctx, req.Project) + if err != nil { + return nil, err + } + + // Load test API doesn't define any way to stop right now. + go func() { + sub := c.Subscription(req.GetPubsubOptions().Subscription) + sub.ReceiveSettings.NumGoroutines = 10 * runtime.GOMAXPROCS(0) + err := sub.Receive(context.Background(), s.callback) + log.Fatal(err) + }() + + log.Println("started") + return &pb.StartResponse{}, nil +} + +func (s *SubServer) callback(_ context.Context, m *pubsub.Message) { + id, err := strconv.ParseInt(m.Attributes["clientId"], 10, 64) + if err != nil { + log.Println(err) + m.Nack() + return + } + + seqNum, err := strconv.ParseInt(m.Attributes["sequenceNumber"], 10, 32) + if err != nil { + log.Println(err) + m.Nack() + return + } + + sendTimeMillis, err := strconv.ParseInt(m.Attributes["sendTime"], 10, 64) + if err != nil { + log.Println(err) + m.Nack() + return + } + + latency := time.Now().UnixNano()/1e6 - sendTimeMillis + ident := &pb.MessageIdentifier{ + PublisherClientId: id, + SequenceNumber: int32(seqNum), + } + + s.mu.Lock() + s.idents = append(s.idents, ident) + s.latencies = append(s.latencies, latency) + s.mu.Unlock() + m.Ack() +} + +func (s *SubServer) Execute(ctx context.Context, _ *pb.ExecuteRequest) (*pb.ExecuteResponse, error) { + // Throttle so the load tester doesn't spam us and consume all our CPU. + if err := s.lim.Wait(ctx); err != nil { + return nil, err + } + + s.mu.Lock() + idents := s.idents + s.idents = nil + latencies := s.latencies + s.latencies = nil + s.mu.Unlock() + + return &pb.ExecuteResponse{ + Latencies: latencies, + ReceivedMessages: idents, + }, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/loadtest/pb/loadtest.pb.go b/vendor/cloud.google.com/go/pubsub/loadtest/pb/loadtest.pb.go new file mode 100644 index 0000000..12ea8f0 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/loadtest/pb/loadtest.pb.go @@ -0,0 +1,792 @@ +// Code generated by protoc-gen-go. +// source: loadtest.proto +// DO NOT EDIT! + +/* +Package google_pubsub_loadtest is a generated protocol buffer package. + +It is generated from these files: + loadtest.proto + +It has these top-level messages: + StartRequest + StartResponse + PubsubOptions + KafkaOptions + MessageIdentifier + CheckRequest + CheckResponse + ExecuteRequest + ExecuteResponse +*/ +package google_pubsub_loadtest + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/duration" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StartRequest struct { + // The GCP project. This must be set even for Kafka, as we use it to export metrics. + Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` + // The Pub/Sub or Kafka topic name. + Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` + // The number of requests that can be made, each second, per client. + RequestRate int32 `protobuf:"varint,3,opt,name=request_rate,json=requestRate" json:"request_rate,omitempty"` + // The size of each user message to publish + MessageSize int32 `protobuf:"varint,4,opt,name=message_size,json=messageSize" json:"message_size,omitempty"` + // The maximum outstanding requests, per client. + MaxOutstandingRequests int32 `protobuf:"varint,5,opt,name=max_outstanding_requests,json=maxOutstandingRequests" json:"max_outstanding_requests,omitempty"` + // The time at which the load test should start. If this is less than the current time, we start immediately. + StartTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The burn-in duration, before which results should not be reported. + BurnInDuration *google_protobuf.Duration `protobuf:"bytes,12,opt,name=burn_in_duration,json=burnInDuration" json:"burn_in_duration,omitempty"` + // The number of user messages of size message_size to publish together. + PublishBatchSize int32 `protobuf:"varint,11,opt,name=publish_batch_size,json=publishBatchSize" json:"publish_batch_size,omitempty"` + // The max duration for coalescing a batch of published messages. + PublishBatchDuration *google_protobuf.Duration `protobuf:"bytes,13,opt,name=publish_batch_duration,json=publishBatchDuration" json:"publish_batch_duration,omitempty"` + // Types that are valid to be assigned to StopConditions: + // *StartRequest_TestDuration + // *StartRequest_NumberOfMessages + StopConditions isStartRequest_StopConditions `protobuf_oneof:"stop_conditions"` + // Types that are valid to be assigned to Options: + // *StartRequest_PubsubOptions + // *StartRequest_KafkaOptions + Options isStartRequest_Options `protobuf_oneof:"options"` +} + +func (m *StartRequest) Reset() { *m = StartRequest{} } +func (m *StartRequest) String() string { return proto.CompactTextString(m) } +func (*StartRequest) ProtoMessage() {} +func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isStartRequest_StopConditions interface { + isStartRequest_StopConditions() +} +type isStartRequest_Options interface { + isStartRequest_Options() +} + +type StartRequest_TestDuration struct { + TestDuration *google_protobuf.Duration `protobuf:"bytes,7,opt,name=test_duration,json=testDuration,oneof"` +} +type StartRequest_NumberOfMessages struct { + NumberOfMessages int32 `protobuf:"varint,8,opt,name=number_of_messages,json=numberOfMessages,oneof"` +} +type StartRequest_PubsubOptions struct { + PubsubOptions *PubsubOptions `protobuf:"bytes,9,opt,name=pubsub_options,json=pubsubOptions,oneof"` +} +type StartRequest_KafkaOptions struct { + KafkaOptions *KafkaOptions `protobuf:"bytes,10,opt,name=kafka_options,json=kafkaOptions,oneof"` +} + +func (*StartRequest_TestDuration) isStartRequest_StopConditions() {} +func (*StartRequest_NumberOfMessages) isStartRequest_StopConditions() {} +func (*StartRequest_PubsubOptions) isStartRequest_Options() {} +func (*StartRequest_KafkaOptions) isStartRequest_Options() {} + +func (m *StartRequest) GetStopConditions() isStartRequest_StopConditions { + if m != nil { + return m.StopConditions + } + return nil +} +func (m *StartRequest) GetOptions() isStartRequest_Options { + if m != nil { + return m.Options + } + return nil +} + +func (m *StartRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *StartRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *StartRequest) GetRequestRate() int32 { + if m != nil { + return m.RequestRate + } + return 0 +} + +func (m *StartRequest) GetMessageSize() int32 { + if m != nil { + return m.MessageSize + } + return 0 +} + +func (m *StartRequest) GetMaxOutstandingRequests() int32 { + if m != nil { + return m.MaxOutstandingRequests + } + return 0 +} + +func (m *StartRequest) GetStartTime() *google_protobuf1.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *StartRequest) GetBurnInDuration() *google_protobuf.Duration { + if m != nil { + return m.BurnInDuration + } + return nil +} + +func (m *StartRequest) GetPublishBatchSize() int32 { + if m != nil { + return m.PublishBatchSize + } + return 0 +} + +func (m *StartRequest) GetPublishBatchDuration() *google_protobuf.Duration { + if m != nil { + return m.PublishBatchDuration + } + return nil +} + +func (m *StartRequest) GetTestDuration() *google_protobuf.Duration { + if x, ok := m.GetStopConditions().(*StartRequest_TestDuration); ok { + return x.TestDuration + } + return nil +} + +func (m *StartRequest) GetNumberOfMessages() int32 { + if x, ok := m.GetStopConditions().(*StartRequest_NumberOfMessages); ok { + return x.NumberOfMessages + } + return 0 +} + +func (m *StartRequest) GetPubsubOptions() *PubsubOptions { + if x, ok := m.GetOptions().(*StartRequest_PubsubOptions); ok { + return x.PubsubOptions + } + return nil +} + +func (m *StartRequest) GetKafkaOptions() *KafkaOptions { + if x, ok := m.GetOptions().(*StartRequest_KafkaOptions); ok { + return x.KafkaOptions + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StartRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StartRequest_OneofMarshaler, _StartRequest_OneofUnmarshaler, _StartRequest_OneofSizer, []interface{}{ + (*StartRequest_TestDuration)(nil), + (*StartRequest_NumberOfMessages)(nil), + (*StartRequest_PubsubOptions)(nil), + (*StartRequest_KafkaOptions)(nil), + } +} + +func _StartRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StartRequest) + // stop_conditions + switch x := m.StopConditions.(type) { + case *StartRequest_TestDuration: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TestDuration); err != nil { + return err + } + case *StartRequest_NumberOfMessages: + b.EncodeVarint(8<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NumberOfMessages)) + case nil: + default: + return fmt.Errorf("StartRequest.StopConditions has unexpected type %T", x) + } + // options + switch x := m.Options.(type) { + case *StartRequest_PubsubOptions: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PubsubOptions); err != nil { + return err + } + case *StartRequest_KafkaOptions: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KafkaOptions); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StartRequest.Options has unexpected type %T", x) + } + return nil +} + +func _StartRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StartRequest) + switch tag { + case 7: // stop_conditions.test_duration + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf.Duration) + err := b.DecodeMessage(msg) + m.StopConditions = &StartRequest_TestDuration{msg} + return true, err + case 8: // stop_conditions.number_of_messages + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.StopConditions = &StartRequest_NumberOfMessages{int32(x)} + return true, err + case 9: // options.pubsub_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PubsubOptions) + err := b.DecodeMessage(msg) + m.Options = &StartRequest_PubsubOptions{msg} + return true, err + case 10: // options.kafka_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(KafkaOptions) + err := b.DecodeMessage(msg) + m.Options = &StartRequest_KafkaOptions{msg} + return true, err + default: + return false, nil + } +} + +func _StartRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StartRequest) + // stop_conditions + switch x := m.StopConditions.(type) { + case *StartRequest_TestDuration: + s := proto.Size(x.TestDuration) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StartRequest_NumberOfMessages: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NumberOfMessages)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // options + switch x := m.Options.(type) { + case *StartRequest_PubsubOptions: + s := proto.Size(x.PubsubOptions) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StartRequest_KafkaOptions: + s := proto.Size(x.KafkaOptions) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type StartResponse struct { +} + +func (m *StartResponse) Reset() { *m = StartResponse{} } +func (m *StartResponse) String() string { return proto.CompactTextString(m) } +func (*StartResponse) ProtoMessage() {} +func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type PubsubOptions struct { + // The Cloud Pub/Sub subscription name + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // The maximum number of messages to pull which each request. + MaxMessagesPerPull int32 `protobuf:"varint,2,opt,name=max_messages_per_pull,json=maxMessagesPerPull" json:"max_messages_per_pull,omitempty"` +} + +func (m *PubsubOptions) Reset() { *m = PubsubOptions{} } +func (m *PubsubOptions) String() string { return proto.CompactTextString(m) } +func (*PubsubOptions) ProtoMessage() {} +func (*PubsubOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *PubsubOptions) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *PubsubOptions) GetMaxMessagesPerPull() int32 { + if m != nil { + return m.MaxMessagesPerPull + } + return 0 +} + +type KafkaOptions struct { + // The network address of the Kafka broker. + Broker string `protobuf:"bytes,1,opt,name=broker" json:"broker,omitempty"` + // The length of time to poll for. + PollDuration *google_protobuf.Duration `protobuf:"bytes,2,opt,name=poll_duration,json=pollDuration" json:"poll_duration,omitempty"` +} + +func (m *KafkaOptions) Reset() { *m = KafkaOptions{} } +func (m *KafkaOptions) String() string { return proto.CompactTextString(m) } +func (*KafkaOptions) ProtoMessage() {} +func (*KafkaOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *KafkaOptions) GetBroker() string { + if m != nil { + return m.Broker + } + return "" +} + +func (m *KafkaOptions) GetPollDuration() *google_protobuf.Duration { + if m != nil { + return m.PollDuration + } + return nil +} + +type MessageIdentifier struct { + // The unique id of the client that published the message. + PublisherClientId int64 `protobuf:"varint,1,opt,name=publisher_client_id,json=publisherClientId" json:"publisher_client_id,omitempty"` + // Sequence number of the published message with the given publish_client_id. + SequenceNumber int32 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` +} + +func (m *MessageIdentifier) Reset() { *m = MessageIdentifier{} } +func (m *MessageIdentifier) String() string { return proto.CompactTextString(m) } +func (*MessageIdentifier) ProtoMessage() {} +func (*MessageIdentifier) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *MessageIdentifier) GetPublisherClientId() int64 { + if m != nil { + return m.PublisherClientId + } + return 0 +} + +func (m *MessageIdentifier) GetSequenceNumber() int32 { + if m != nil { + return m.SequenceNumber + } + return 0 +} + +type CheckRequest struct { + // Duplicate messages that should not be reported for throughput and latency. + Duplicates []*MessageIdentifier `protobuf:"bytes,1,rep,name=duplicates" json:"duplicates,omitempty"` +} + +func (m *CheckRequest) Reset() { *m = CheckRequest{} } +func (m *CheckRequest) String() string { return proto.CompactTextString(m) } +func (*CheckRequest) ProtoMessage() {} +func (*CheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CheckRequest) GetDuplicates() []*MessageIdentifier { + if m != nil { + return m.Duplicates + } + return nil +} + +type CheckResponse struct { + // Histogram of latencies, each one a delta from the previous CheckResponse sent. + BucketValues []int64 `protobuf:"varint,1,rep,packed,name=bucket_values,json=bucketValues" json:"bucket_values,omitempty"` + // The duration from the start of the loadtest to its completion or now if is_finished is false. + RunningDuration *google_protobuf.Duration `protobuf:"bytes,2,opt,name=running_duration,json=runningDuration" json:"running_duration,omitempty"` + // True if the load test has finished running. + IsFinished bool `protobuf:"varint,3,opt,name=is_finished,json=isFinished" json:"is_finished,omitempty"` + // MessageIdentifiers of all received messages since the last Check + ReceivedMessages []*MessageIdentifier `protobuf:"bytes,4,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` +} + +func (m *CheckResponse) Reset() { *m = CheckResponse{} } +func (m *CheckResponse) String() string { return proto.CompactTextString(m) } +func (*CheckResponse) ProtoMessage() {} +func (*CheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *CheckResponse) GetBucketValues() []int64 { + if m != nil { + return m.BucketValues + } + return nil +} + +func (m *CheckResponse) GetRunningDuration() *google_protobuf.Duration { + if m != nil { + return m.RunningDuration + } + return nil +} + +func (m *CheckResponse) GetIsFinished() bool { + if m != nil { + return m.IsFinished + } + return false +} + +func (m *CheckResponse) GetReceivedMessages() []*MessageIdentifier { + if m != nil { + return m.ReceivedMessages + } + return nil +} + +type ExecuteRequest struct { +} + +func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } +func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteRequest) ProtoMessage() {} +func (*ExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type ExecuteResponse struct { + // Latencies of the completed operations + Latencies []int64 `protobuf:"varint,1,rep,packed,name=latencies" json:"latencies,omitempty"` + // MessageIdentifiers of all received messages since the last Execute + ReceivedMessages []*MessageIdentifier `protobuf:"bytes,2,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` +} + +func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } +func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteResponse) ProtoMessage() {} +func (*ExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ExecuteResponse) GetLatencies() []int64 { + if m != nil { + return m.Latencies + } + return nil +} + +func (m *ExecuteResponse) GetReceivedMessages() []*MessageIdentifier { + if m != nil { + return m.ReceivedMessages + } + return nil +} + +func init() { + proto.RegisterType((*StartRequest)(nil), "google.pubsub.loadtest.StartRequest") + proto.RegisterType((*StartResponse)(nil), "google.pubsub.loadtest.StartResponse") + proto.RegisterType((*PubsubOptions)(nil), "google.pubsub.loadtest.PubsubOptions") + proto.RegisterType((*KafkaOptions)(nil), "google.pubsub.loadtest.KafkaOptions") + proto.RegisterType((*MessageIdentifier)(nil), "google.pubsub.loadtest.MessageIdentifier") + proto.RegisterType((*CheckRequest)(nil), "google.pubsub.loadtest.CheckRequest") + proto.RegisterType((*CheckResponse)(nil), "google.pubsub.loadtest.CheckResponse") + proto.RegisterType((*ExecuteRequest)(nil), "google.pubsub.loadtest.ExecuteRequest") + proto.RegisterType((*ExecuteResponse)(nil), "google.pubsub.loadtest.ExecuteResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Loadtest service + +type LoadtestClient interface { + // Starts a load test + Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) + // Checks the status of a load test + Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) +} + +type loadtestClient struct { + cc *grpc.ClientConn +} + +func NewLoadtestClient(cc *grpc.ClientConn) LoadtestClient { + return &loadtestClient{cc} +} + +func (c *loadtestClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { + out := new(StartResponse) + err := grpc.Invoke(ctx, "/google.pubsub.loadtest.Loadtest/Start", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *loadtestClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) { + out := new(CheckResponse) + err := grpc.Invoke(ctx, "/google.pubsub.loadtest.Loadtest/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Loadtest service + +type LoadtestServer interface { + // Starts a load test + Start(context.Context, *StartRequest) (*StartResponse, error) + // Checks the status of a load test + Check(context.Context, *CheckRequest) (*CheckResponse, error) +} + +func RegisterLoadtestServer(s *grpc.Server, srv LoadtestServer) { + s.RegisterService(&_Loadtest_serviceDesc, srv) +} + +func _Loadtest_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoadtestServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.loadtest.Loadtest/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoadtestServer).Start(ctx, req.(*StartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Loadtest_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoadtestServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.loadtest.Loadtest/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoadtestServer).Check(ctx, req.(*CheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Loadtest_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.loadtest.Loadtest", + HandlerType: (*LoadtestServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Start", + Handler: _Loadtest_Start_Handler, + }, + { + MethodName: "Check", + Handler: _Loadtest_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "loadtest.proto", +} + +// Client API for LoadtestWorker service + +type LoadtestWorkerClient interface { + // Starts a worker + Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) + // Executes a command on the worker, returning the latencies of the operations. Since some + // commands consist of multiple operations (i.e. pulls contain many received messages with + // different end to end latencies) a single command can have multiple latencies returned. + Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) +} + +type loadtestWorkerClient struct { + cc *grpc.ClientConn +} + +func NewLoadtestWorkerClient(cc *grpc.ClientConn) LoadtestWorkerClient { + return &loadtestWorkerClient{cc} +} + +func (c *loadtestWorkerClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { + out := new(StartResponse) + err := grpc.Invoke(ctx, "/google.pubsub.loadtest.LoadtestWorker/Start", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *loadtestWorkerClient) Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) { + out := new(ExecuteResponse) + err := grpc.Invoke(ctx, "/google.pubsub.loadtest.LoadtestWorker/Execute", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for LoadtestWorker service + +type LoadtestWorkerServer interface { + // Starts a worker + Start(context.Context, *StartRequest) (*StartResponse, error) + // Executes a command on the worker, returning the latencies of the operations. Since some + // commands consist of multiple operations (i.e. pulls contain many received messages with + // different end to end latencies) a single command can have multiple latencies returned. + Execute(context.Context, *ExecuteRequest) (*ExecuteResponse, error) +} + +func RegisterLoadtestWorkerServer(s *grpc.Server, srv LoadtestWorkerServer) { + s.RegisterService(&_LoadtestWorker_serviceDesc, srv) +} + +func _LoadtestWorker_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoadtestWorkerServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.loadtest.LoadtestWorker/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoadtestWorkerServer).Start(ctx, req.(*StartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LoadtestWorker_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecuteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoadtestWorkerServer).Execute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.loadtest.LoadtestWorker/Execute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoadtestWorkerServer).Execute(ctx, req.(*ExecuteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LoadtestWorker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.loadtest.LoadtestWorker", + HandlerType: (*LoadtestWorkerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Start", + Handler: _LoadtestWorker_Start_Handler, + }, + { + MethodName: "Execute", + Handler: _LoadtestWorker_Execute_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "loadtest.proto", +} + +func init() { proto.RegisterFile("loadtest.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 847 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xdd, 0x6e, 0xdc, 0x44, + 0x14, 0xae, 0x93, 0x6e, 0x92, 0x3d, 0x6b, 0xef, 0x6e, 0x86, 0x12, 0x99, 0x15, 0xd0, 0x60, 0x28, + 0x0d, 0x12, 0x72, 0x45, 0xb8, 0x81, 0x1b, 0x84, 0x92, 0x82, 0x12, 0x15, 0x9a, 0xc8, 0x8d, 0x8a, + 0xe0, 0x66, 0x34, 0xb6, 0x67, 0x93, 0x61, 0xed, 0x19, 0x33, 0x3f, 0x55, 0xd4, 0x17, 0xe0, 0x8d, + 0x78, 0x00, 0x1e, 0x87, 0x5b, 0x5e, 0x00, 0xcd, 0x78, 0xbc, 0x3f, 0x6d, 0x57, 0x0b, 0x42, 0xbd, + 0x3c, 0xdf, 0xf9, 0xce, 0x37, 0xe7, 0xd7, 0x86, 0x61, 0x25, 0x48, 0xa9, 0xa9, 0xd2, 0x69, 0x23, + 0x85, 0x16, 0xe8, 0xe0, 0x5a, 0x88, 0xeb, 0x8a, 0xa6, 0x8d, 0xc9, 0x95, 0xc9, 0xd3, 0xce, 0x3b, + 0xf9, 0xb0, 0xc5, 0x1f, 0x39, 0x56, 0x6e, 0xa6, 0x8f, 0x4a, 0x23, 0x89, 0x66, 0x82, 0xb7, 0x71, + 0x93, 0xfb, 0xaf, 0xfa, 0x35, 0xab, 0xa9, 0xd2, 0xa4, 0x6e, 0x5a, 0x42, 0xf2, 0x57, 0x0f, 0xc2, + 0x67, 0x9a, 0x48, 0x9d, 0xd1, 0xdf, 0x0c, 0x55, 0x1a, 0xc5, 0xb0, 0xdb, 0x48, 0xf1, 0x2b, 0x2d, + 0x74, 0x1c, 0x1c, 0x06, 0x47, 0xfd, 0xac, 0x33, 0xd1, 0x3d, 0xe8, 0x69, 0xd1, 0xb0, 0x22, 0xde, + 0x72, 0x78, 0x6b, 0xa0, 0x8f, 0x20, 0x94, 0x6d, 0x28, 0x96, 0x44, 0xd3, 0x78, 0xfb, 0x30, 0x38, + 0xea, 0x65, 0x03, 0x8f, 0x65, 0x44, 0x53, 0x4b, 0xa9, 0xa9, 0x52, 0xe4, 0x9a, 0x62, 0xc5, 0x5e, + 0xd2, 0xf8, 0x6e, 0x4b, 0xf1, 0xd8, 0x33, 0xf6, 0x92, 0xa2, 0xaf, 0x20, 0xae, 0xc9, 0x2d, 0x16, + 0x46, 0x2b, 0x4d, 0x78, 0xc9, 0xf8, 0x35, 0xf6, 0x0a, 0x2a, 0xee, 0x39, 0xfa, 0x41, 0x4d, 0x6e, + 0x2f, 0x16, 0x6e, 0x9f, 0xae, 0x42, 0x5f, 0x03, 0x28, 0x9b, 0x3f, 0xb6, 0x95, 0xc5, 0x3b, 0x87, + 0xc1, 0xd1, 0xe0, 0x78, 0x92, 0x76, 0xed, 0xf2, 0x65, 0xa7, 0x57, 0x5d, 0xd9, 0x59, 0xdf, 0xb1, + 0xad, 0x8d, 0x4e, 0x61, 0x9c, 0x1b, 0xc9, 0x31, 0xe3, 0xb8, 0x6b, 0x5b, 0x1c, 0x3a, 0x81, 0xf7, + 0x5e, 0x13, 0x78, 0xec, 0x09, 0xd9, 0xd0, 0x86, 0x9c, 0xf3, 0xce, 0x46, 0x9f, 0x03, 0x6a, 0x4c, + 0x5e, 0x31, 0x75, 0x83, 0x73, 0xa2, 0x8b, 0x9b, 0xb6, 0xc4, 0x81, 0xcb, 0x79, 0xec, 0x3d, 0x27, + 0xd6, 0xe1, 0xea, 0xbc, 0x80, 0x83, 0x55, 0xf6, 0xfc, 0xe1, 0x68, 0xd3, 0xc3, 0xf7, 0x96, 0xc5, + 0xe6, 0xcf, 0x7f, 0x0b, 0x91, 0x5d, 0x84, 0x85, 0xce, 0xee, 0x06, 0x9d, 0xb3, 0x3b, 0x59, 0x68, + 0x23, 0xe6, 0x0a, 0x29, 0x20, 0x6e, 0xea, 0x9c, 0x4a, 0x2c, 0xa6, 0xd8, 0xcf, 0x44, 0xc5, 0x7b, + 0xb6, 0x80, 0xb3, 0x3b, 0xd9, 0xb8, 0xf5, 0x5d, 0x4c, 0x7f, 0xf4, 0x1e, 0xf4, 0x14, 0x86, 0xed, + 0x16, 0x62, 0xd1, 0x58, 0x01, 0x15, 0xf7, 0xdd, 0x93, 0x0f, 0xd2, 0x37, 0xef, 0x68, 0x7a, 0xe9, + 0xec, 0x8b, 0x96, 0x7c, 0x16, 0x64, 0x51, 0xb3, 0x0c, 0xa0, 0x27, 0x10, 0xcd, 0xc8, 0x74, 0x46, + 0xe6, 0x72, 0xe0, 0xe4, 0x3e, 0x59, 0x27, 0xf7, 0xc4, 0x92, 0x17, 0x6a, 0xe1, 0x6c, 0xc9, 0x3e, + 0xd9, 0x87, 0x91, 0xd2, 0xa2, 0xc1, 0x85, 0xe0, 0x25, 0x6b, 0xa1, 0x3e, 0xec, 0x7a, 0xe5, 0x64, + 0x04, 0x91, 0xdf, 0x75, 0xd5, 0x08, 0xae, 0x68, 0x32, 0x85, 0x68, 0x25, 0x3b, 0x94, 0x40, 0xa8, + 0x4c, 0xae, 0x0a, 0xc9, 0x1c, 0xe0, 0x4f, 0x60, 0x05, 0x43, 0x5f, 0xc0, 0xbb, 0x76, 0x57, 0xbb, + 0x56, 0xe1, 0x86, 0x4a, 0xdc, 0x98, 0xaa, 0x72, 0x77, 0xd1, 0xcb, 0x50, 0x4d, 0x6e, 0xbb, 0x66, + 0x5d, 0x52, 0x79, 0x69, 0xaa, 0x2a, 0x99, 0x42, 0xb8, 0x9c, 0x36, 0x3a, 0x80, 0x9d, 0x5c, 0x8a, + 0x19, 0x95, 0xfe, 0x01, 0x6f, 0xa1, 0x6f, 0x20, 0x6a, 0x44, 0x55, 0x2d, 0xa6, 0xb9, 0xb5, 0x69, + 0x2b, 0x42, 0xcb, 0xef, 0xac, 0xa4, 0x82, 0x7d, 0xff, 0xf4, 0x79, 0x49, 0xb9, 0x66, 0x53, 0x46, + 0x25, 0x4a, 0xe1, 0x1d, 0xbf, 0x3a, 0x54, 0xe2, 0xa2, 0x62, 0x94, 0x6b, 0xcc, 0x4a, 0xf7, 0xf2, + 0x76, 0xb6, 0x3f, 0x77, 0x9d, 0x3a, 0xcf, 0x79, 0x89, 0x1e, 0xc2, 0x48, 0xd9, 0xeb, 0xe2, 0x05, + 0xc5, 0xed, 0xf4, 0x7d, 0x65, 0xc3, 0x0e, 0x7e, 0xea, 0xd0, 0xe4, 0x67, 0x08, 0x4f, 0x6f, 0x68, + 0x31, 0xeb, 0x3e, 0x1d, 0xe7, 0x00, 0xa5, 0x69, 0x2a, 0x56, 0x10, 0x4d, 0x55, 0x1c, 0x1c, 0x6e, + 0x1f, 0x0d, 0x8e, 0x3f, 0x5b, 0x37, 0xc6, 0xd7, 0xf2, 0xcc, 0x96, 0x82, 0x93, 0xbf, 0x03, 0x88, + 0xbc, 0x76, 0x3b, 0x2a, 0xf4, 0x31, 0x44, 0xb9, 0x29, 0x66, 0x54, 0xe3, 0x17, 0xa4, 0x32, 0x5e, + 0x7f, 0x3b, 0x0b, 0x5b, 0xf0, 0xb9, 0xc3, 0xd0, 0x63, 0x18, 0x4b, 0xc3, 0xb9, 0xfd, 0x7c, 0xfc, + 0xfb, 0x16, 0x8e, 0x7c, 0xc8, 0xfc, 0x22, 0xee, 0xc3, 0x80, 0x29, 0x3c, 0x65, 0xdc, 0xf6, 0xa5, + 0x74, 0x5f, 0xb4, 0xbd, 0x0c, 0x98, 0xfa, 0xde, 0x23, 0xe8, 0x39, 0xec, 0x4b, 0x5a, 0x50, 0xf6, + 0x82, 0x96, 0x8b, 0x8b, 0xb9, 0xfb, 0x5f, 0xeb, 0x1d, 0x77, 0x1a, 0xdd, 0xb6, 0x24, 0x63, 0x18, + 0x7e, 0x77, 0x4b, 0x0b, 0xa3, 0xa9, 0x6f, 0x69, 0xf2, 0x7b, 0x00, 0xa3, 0x39, 0xe4, 0x3b, 0xf1, + 0x3e, 0xf4, 0x2b, 0xa2, 0x29, 0x2f, 0xd8, 0xbc, 0x0b, 0x0b, 0xe0, 0xcd, 0xb9, 0x6d, 0xfd, 0xef, + 0xdc, 0x8e, 0xff, 0x08, 0x60, 0xef, 0x07, 0x1f, 0x80, 0xae, 0xa0, 0xe7, 0x0e, 0x09, 0xad, 0xbd, + 0xd2, 0xe5, 0x7f, 0xca, 0xe4, 0xc1, 0x06, 0x96, 0x2f, 0xec, 0x0a, 0x7a, 0x6e, 0xe6, 0xeb, 0x55, + 0x97, 0xd7, 0x6d, 0xbd, 0xea, 0xca, 0xe2, 0x1c, 0xff, 0x19, 0xc0, 0xb0, 0x4b, 0xfc, 0x27, 0x21, + 0xed, 0x99, 0xbd, 0x9d, 0xf4, 0x7f, 0x81, 0x5d, 0x3f, 0x2a, 0xf4, 0xe9, 0xba, 0x88, 0xd5, 0xf1, + 0x4e, 0x1e, 0x6e, 0xe4, 0xb5, 0xda, 0x27, 0x29, 0x7c, 0x50, 0x88, 0xfa, 0x15, 0xf6, 0xb4, 0x62, + 0x45, 0x5a, 0x88, 0xba, 0x16, 0xfc, 0x24, 0xea, 0x4a, 0xbc, 0x74, 0xfb, 0xbd, 0xe3, 0xd6, 0xfc, + 0xcb, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xfc, 0xdc, 0x27, 0x48, 0x08, 0x00, 0x00, +} diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go new file mode 100644 index 0000000..ac2cecc --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -0,0 +1,100 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "time" + + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// Message represents a Pub/Sub message. +type Message struct { + // ID identifies this message. + // This ID is assigned by the server and is populated for Messages obtained from a subscription. + // This field is read-only. + ID string + + // Data is the actual data in the message. + Data []byte + + // Attributes represents the key-value pairs the current message + // is labelled with. + Attributes map[string]string + + // ackID is the identifier to acknowledge this message. + ackID string + + // The time at which the message was published. + // This is populated by the server for Messages obtained from a subscription. + // This field is read-only. + PublishTime time.Time + + // receiveTime is the time the message was received by the client. + receiveTime time.Time + + // size is the approximate size of the message's data and attributes. + size int + + calledDone bool + + // The done method of the iterator that created this Message. + doneFunc func(string, bool, time.Time) +} + +func toMessage(resp *pb.ReceivedMessage) (*Message, error) { + if resp.Message == nil { + return &Message{ackID: resp.AckId}, nil + } + + pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) + if err != nil { + return nil, err + } + return &Message{ + ackID: resp.AckId, + Data: resp.Message.Data, + Attributes: resp.Message.Attributes, + ID: resp.Message.MessageId, + PublishTime: pubTime, + }, nil +} + +// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// If message acknowledgement fails, the Message will be redelivered. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Ack() { + m.done(true) +} + +// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// Nack will result in the Message being redelivered more quickly than if it were allowed to expire. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Nack() { + m.done(false) +} + +func (m *Message) done(ack bool) { + if m.calledDone { + return + } + m.calledDone = true + m.doneFunc(m.ackID, ack, m.receiveTime) +} diff --git a/vendor/cloud.google.com/go/pubsub/not_go18.go b/vendor/cloud.google.com/go/pubsub/not_go18.go new file mode 100644 index 0000000..09fd4bf --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/not_go18.go @@ -0,0 +1,54 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package pubsub + +import ( + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +// OpenCensus only supports go 1.8 and higher. + +func openCensusOptions() []option.ClientOption { return nil } + +func withSubscriptionKey(ctx context.Context, _ string) context.Context { + return ctx +} + +type dummy struct{} + +var ( + // Not supported below Go 1.8. + PullCount dummy + // Not supported below Go 1.8. + AckCount dummy + // Not supported below Go 1.8. + NackCount dummy + // Not supported below Go 1.8. + ModAckCount dummy + // Not supported below Go 1.8. + StreamOpenCount dummy + // Not supported below Go 1.8. + StreamRetryCount dummy + // Not supported below Go 1.8. + StreamRequestCount dummy + // Not supported below Go 1.8. + StreamResponseCount dummy +) + +func recordStat(context.Context, dummy, int64) { +} diff --git a/vendor/cloud.google.com/go/pubsub/pstest/examples_test.go b/vendor/cloud.google.com/go/pubsub/pstest/examples_test.go new file mode 100644 index 0000000..e103eac --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pstest/examples_test.go @@ -0,0 +1,41 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pstest_test + +import ( + "cloud.google.com/go/pubsub" + "cloud.google.com/go/pubsub/pstest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func ExampleNewServer() { + ctx := context.Background() + // Start a fake server running locally. + srv := pstest.NewServer() + // Connect to the server without using TLS. + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + // TODO: Handle error. + } + // Use the connection when creating a pubsub client. + client, err := pubsub.NewClient(ctx, "project", option.WithGRPCConn(conn)) + if err != nil { + // TODO: Handle error. + } + defer client.Close() + _ = client // TODO: Use the client. +} diff --git a/vendor/cloud.google.com/go/pubsub/pstest/fake.go b/vendor/cloud.google.com/go/pubsub/pstest/fake.go new file mode 100644 index 0000000..d20dddc --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pstest/fake.go @@ -0,0 +1,771 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pstest provides a fake Cloud PubSub service for testing. It implements a +// simplified form of the service, suitable for unit tests. It may behave +// differently from the actual service in ways in which the service is +// non-deterministic or unspecified: timing, delivery order, etc. +// +// This package is EXPERIMENTAL and is subject to change without notice. +// +// See the example for usage. +package pstest + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "cloud.google.com/go/internal/testutil" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + emptypb "github.com/golang/protobuf/ptypes/empty" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// For testing. Note that even though changes to the now variable are atomic, a call +// to the stored function can race with a change to that function. This could be a +// problem if tests are run in parallel, or even if concurrent parts of the same test +// change the value of the variable. +var now atomic.Value + +func init() { + now.Store(time.Now) +} + +func timeNow() time.Time { + return now.Load().(func() time.Time)() +} + +type Server struct { + Addr string // The address that the server is listening on. + gServer gServer +} + +type gServer struct { + pb.PublisherServer + pb.SubscriberServer + + mu sync.Mutex + topics map[string]*topic + subs map[string]*subscription + msgs []*Message // all messages ever published + msgsByID map[string]*Message + wg sync.WaitGroup + nextID int + streamTimeout time.Duration +} + +// NewServer creates a new fake server running in the current process. +func NewServer() *Server { + srv, err := testutil.NewServer() + if err != nil { + panic(fmt.Sprintf("pstest.NewServer: %v", err)) + } + s := &Server{ + Addr: srv.Addr, + gServer: gServer{ + topics: map[string]*topic{}, + subs: map[string]*subscription{}, + msgsByID: map[string]*Message{}, + }, + } + pb.RegisterPublisherServer(srv.Gsrv, &s.gServer) + pb.RegisterSubscriberServer(srv.Gsrv, &s.gServer) + srv.Start() + return s +} + +// Publish behaves as if the Publish RPC was called with a message with the given +// data and attrs. It returns the ID of the message. +// The topic will be created if it doesn't exist. +// +// Publish panics if there is an error, which is appropriate for testing. +func (s *Server) Publish(topic string, data []byte, attrs map[string]string) string { + const topicPattern = "projects/*/topics/*" + ok, err := path.Match(topicPattern, topic) + if err != nil { + panic(err) + } + if !ok { + panic(fmt.Sprintf("topic name must be of the form %q", topicPattern)) + } + _, _ = s.gServer.CreateTopic(nil, &pb.Topic{Name: topic}) + req := &pb.PublishRequest{ + Topic: topic, + Messages: []*pb.PubsubMessage{{Data: data, Attributes: attrs}}, + } + res, err := s.gServer.Publish(nil, req) + if err != nil { + panic(fmt.Sprintf("pstest.Server.Publish: %v", err)) + } + return res.MessageIds[0] +} + +// SetStreamTimeout sets the amount of time a stream will be active before it shuts +// itself down. This mimics the real service's behavior of closing streams after 30 +// minutes. If SetStreamTimeout is never called or is passed zero, streams never shut +// down. +func (s *Server) SetStreamTimeout(d time.Duration) { + s.gServer.mu.Lock() + defer s.gServer.mu.Unlock() + s.gServer.streamTimeout = d +} + +// A Message is a message that was published to the server. +type Message struct { + ID string + Data []byte + Attributes map[string]string + PublishTime time.Time + Deliveries int // number of times delivery of the message was attempted + Acks int // number of acks received from clients + + // protected by server mutex + deliveries int + acks int +} + +// Messages returns information about all messages ever published. +func (s *Server) Messages() []*Message { + s.gServer.mu.Lock() + defer s.gServer.mu.Unlock() + + var msgs []*Message + for _, m := range s.gServer.msgs { + m.Deliveries = m.deliveries + m.Acks = m.acks + msgs = append(msgs, m) + } + return msgs +} + +// Message returns the message with the given ID, or nil if no message +// with that ID was published. +func (s *Server) Message(id string) *Message { + s.gServer.mu.Lock() + defer s.gServer.mu.Unlock() + + m := s.gServer.msgsByID[id] + if m != nil { + m.Deliveries = m.deliveries + m.Acks = m.acks + } + return m +} + +// Wait blocks until all server activity has completed. +func (s *Server) Wait() { + s.gServer.wg.Wait() +} + +func (s *gServer) CreateTopic(_ context.Context, t *pb.Topic) (*pb.Topic, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.topics[t.Name] != nil { + return nil, status.Errorf(codes.AlreadyExists, "topic %q", t.Name) + } + top := newTopic(t) + s.topics[t.Name] = top + return top.proto, nil +} + +func (s *gServer) GetTopic(_ context.Context, req *pb.GetTopicRequest) (*pb.Topic, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if t := s.topics[req.Topic]; t != nil { + return t.proto, nil + } + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) +} + +func (s *gServer) UpdateTopic(_ context.Context, req *pb.UpdateTopicRequest) (*pb.Topic, error) { + return nil, status.Errorf(codes.Unimplemented, "unimplemented") +} + +func (s *gServer) ListTopics(_ context.Context, req *pb.ListTopicsRequest) (*pb.ListTopicsResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + var names []string + for n := range s.topics { + if strings.HasPrefix(n, req.Project) { + names = append(names, n) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + res := &pb.ListTopicsResponse{NextPageToken: nextToken} + for i := from; i < to; i++ { + res.Topics = append(res.Topics, s.topics[names[i]].proto) + } + return res, nil +} + +func (s *gServer) ListTopicSubscriptions(_ context.Context, req *pb.ListTopicSubscriptionsRequest) (*pb.ListTopicSubscriptionsResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + var names []string + for name, sub := range s.subs { + if sub.topic.proto.Name == req.Topic { + names = append(names, name) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + return &pb.ListTopicSubscriptionsResponse{ + Subscriptions: names[from:to], + NextPageToken: nextToken, + }, nil +} + +func (s *gServer) DeleteTopic(_ context.Context, req *pb.DeleteTopicRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + + t := s.topics[req.Topic] + if t == nil { + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) + } + t.stop() + delete(s.topics, req.Topic) + return &emptypb.Empty{}, nil +} + +func (s *gServer) CreateSubscription(_ context.Context, ps *pb.Subscription) (*pb.Subscription, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if ps.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing name") + } + if s.subs[ps.Name] != nil { + return nil, status.Errorf(codes.AlreadyExists, "subscription %q", ps.Name) + } + if ps.Topic == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing topic") + } + top := s.topics[ps.Topic] + if top == nil { + return nil, status.Errorf(codes.NotFound, "topic %q", ps.Topic) + } + if err := checkAckDeadline(ps.AckDeadlineSeconds); err != nil { + return nil, err + } + if ps.MessageRetentionDuration == nil { + ps.MessageRetentionDuration = defaultMessageRetentionDuration + } + if err := checkMRD(ps.MessageRetentionDuration); err != nil { + return nil, err + } + if ps.PushConfig == nil { + ps.PushConfig = &pb.PushConfig{} + } + + sub := newSubscription(top, &s.mu, ps) + top.subs[ps.Name] = sub + s.subs[ps.Name] = sub + sub.start(&s.wg) + return ps, nil +} + +// Can be set for testing. +var minAckDeadlineSecs int32 = 10 + +func checkAckDeadline(ads int32) error { + if ads < minAckDeadlineSecs || ads > 600 { + // PubSub service returns Unknown. + return status.Errorf(codes.Unknown, "bad ack_deadline_seconds: %d", ads) + } + return nil +} + +const ( + minMessageRetentionDuration = 10 * time.Minute + maxMessageRetentionDuration = 168 * time.Hour +) + +var defaultMessageRetentionDuration = ptypes.DurationProto(maxMessageRetentionDuration) + +func checkMRD(pmrd *durpb.Duration) error { + mrd, err := ptypes.Duration(pmrd) + if err != nil || mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { + return status.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) + } + return nil +} + +func (s *gServer) GetSubscription(_ context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if sub := s.subs[req.Subscription]; sub != nil { + return sub.proto, nil + } + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription) +} + +func (s *gServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscriptionRequest) (*pb.Subscription, error) { + s.mu.Lock() + defer s.mu.Unlock() + + sub := s.subs[req.Subscription.Name] + if sub == nil { + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription.Name) + } + + for _, path := range req.UpdateMask.Paths { + switch path { + case "push_config": + sub.proto.PushConfig = req.Subscription.PushConfig + + case "ack_deadline_seconds": + a := req.Subscription.AckDeadlineSeconds + if err := checkAckDeadline(a); err != nil { + return nil, err + } + sub.proto.AckDeadlineSeconds = a + + case "retain_acked_messages": + sub.proto.RetainAckedMessages = req.Subscription.RetainAckedMessages + + case "message_retention_duration": + if err := checkMRD(req.Subscription.MessageRetentionDuration); err != nil { + return nil, err + } + sub.proto.MessageRetentionDuration = req.Subscription.MessageRetentionDuration + + // TODO(jba): labels + default: + return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) + } + } + return sub.proto, nil +} + +func (s *gServer) ListSubscriptions(_ context.Context, req *pb.ListSubscriptionsRequest) (*pb.ListSubscriptionsResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + var names []string + for name := range s.subs { + if strings.HasPrefix(name, req.Project) { + names = append(names, name) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + res := &pb.ListSubscriptionsResponse{NextPageToken: nextToken} + for i := from; i < to; i++ { + res.Subscriptions = append(res.Subscriptions, s.subs[names[i]].proto) + } + return res, nil +} + +func (s *gServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + + sub := s.subs[req.Subscription] + if sub == nil { + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription) + } + sub.stop() + delete(s.subs, req.Subscription) + sub.topic.deleteSub(sub) + return &emptypb.Empty{}, nil +} + +func (s *gServer) Publish(_ context.Context, req *pb.PublishRequest) (*pb.PublishResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if req.Topic == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing topic") + } + top := s.topics[req.Topic] + if top == nil { + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) + } + var ids []string + for _, pm := range req.Messages { + id := fmt.Sprintf("m%d", s.nextID) + s.nextID++ + pm.MessageId = id + pubTime := timeNow() + tsPubTime, err := ptypes.TimestampProto(pubTime) + if err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + pm.PublishTime = tsPubTime + m := &Message{ + ID: id, + Data: pm.Data, + Attributes: pm.Attributes, + PublishTime: pubTime, + } + top.publish(pm, m) + ids = append(ids, id) + s.msgs = append(s.msgs, m) + s.msgsByID[id] = m + } + return &pb.PublishResponse{MessageIds: ids}, nil +} + +type topic struct { + proto *pb.Topic + subs map[string]*subscription +} + +func newTopic(pt *pb.Topic) *topic { + return &topic{ + proto: pt, + subs: map[string]*subscription{}, + } +} + +func (t *topic) stop() { + for _, sub := range t.subs { + sub.proto.Topic = "_deleted-topic_" + sub.stop() + } +} + +func (t *topic) deleteSub(sub *subscription) { + delete(t.subs, sub.proto.Name) +} + +func (t *topic) publish(pm *pb.PubsubMessage, m *Message) { + for _, s := range t.subs { + s.msgs[pm.MessageId] = &message{ + publishTime: m.PublishTime, + proto: &pb.ReceivedMessage{ + AckId: pm.MessageId, + Message: pm, + }, + deliveries: &m.deliveries, + acks: &m.acks, + streamIndex: -1, + } + } +} + +type subscription struct { + topic *topic + mu *sync.Mutex + proto *pb.Subscription + ackTimeout time.Duration + msgs map[string]*message // unacked messages by message ID + streams []*stream + done chan struct{} +} + +func newSubscription(t *topic, mu *sync.Mutex, ps *pb.Subscription) *subscription { + at := time.Duration(ps.AckDeadlineSeconds) * time.Second + if at == 0 { + at = 10 * time.Second + } + return &subscription{ + topic: t, + mu: mu, + proto: ps, + ackTimeout: at, + msgs: map[string]*message{}, + done: make(chan struct{}), + } +} + +func (s *subscription) start(wg *sync.WaitGroup) { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-s.done: + return + case <-time.After(10 * time.Millisecond): + s.deliver() + } + } + }() +} + +func (s *subscription) stop() { + close(s.done) +} + +func (s *gServer) StreamingPull(sps pb.Subscriber_StreamingPullServer) error { + // Receive initial message configuring the pull. + req, err := sps.Recv() + if err != nil { + return err + } + if req.Subscription == "" { + return status.Errorf(codes.InvalidArgument, "missing subscription") + } + s.mu.Lock() + sub := s.subs[req.Subscription] + s.mu.Unlock() + if sub == nil { + return status.Errorf(codes.NotFound, "subscription %s", req.Subscription) + } + // Create a new stream to handle the pull. + st := sub.newStream(sps, s.streamTimeout) + err = st.pull(&s.wg) + sub.deleteStream(st) + return err +} + +var retentionDuration = 10 * time.Minute + +func (s *subscription) deliver() { + s.mu.Lock() + defer s.mu.Unlock() + + tNow := timeNow() + for id, m := range s.msgs { + // Mark a message as re-deliverable if its ack deadline has expired. + if m.outstanding() && tNow.After(m.ackDeadline) { + m.makeAvailable() + } + pubTime, err := ptypes.Timestamp(m.proto.Message.PublishTime) + if err != nil { + panic(err) + } + // Remove messages that have been undelivered for a long time. + if !m.outstanding() && tNow.Sub(pubTime) > retentionDuration { + delete(s.msgs, id) + } + } + // Try to deliver each remaining message. + curIndex := 0 + for _, m := range s.msgs { + if m.outstanding() { + continue + } + // If the message was never delivered before, start with the stream at + // curIndex. If it was delivered before, start with the stream after the one + // that owned it. + if m.streamIndex < 0 { + delIndex, ok := s.deliverMessage(m, curIndex, tNow) + if !ok { + break + } + curIndex = delIndex + 1 + m.streamIndex = curIndex + } else { + delIndex, ok := s.deliverMessage(m, m.streamIndex, tNow) + if !ok { + break + } + m.streamIndex = delIndex + } + } +} + +// deliverMessage attempts to deliver m to the stream at index i. If it can't, it +// tries streams i+1, i+2, ..., wrapping around. It returns the index of the stream +// it delivered the message to, or 0, false if it didn't deliver the message because +// there are no active streams. +func (s *subscription) deliverMessage(m *message, i int, tNow time.Time) (int, bool) { + for len(s.streams) > 0 { + if i >= len(s.streams) { + i = 0 + } + st := s.streams[i] + select { + case <-st.done: + s.streams = deleteStreamAt(s.streams, i) + + case st.msgc <- m.proto: + (*m.deliveries)++ + m.ackDeadline = tNow.Add(st.ackTimeout) + return i, true + } + } + return 0, false +} + +func (s *subscription) newStream(gs pb.Subscriber_StreamingPullServer, timeout time.Duration) *stream { + st := &stream{ + sub: s, + done: make(chan struct{}), + msgc: make(chan *pb.ReceivedMessage), + gstream: gs, + ackTimeout: s.ackTimeout, + timeout: timeout, + } + s.mu.Lock() + s.streams = append(s.streams, st) + s.mu.Unlock() + return st +} + +func (s *subscription) deleteStream(st *stream) { + s.mu.Lock() + defer s.mu.Unlock() + var i int + for i = 0; i < len(s.streams); i++ { + if s.streams[i] == st { + break + } + } + if i < len(s.streams) { + s.streams = deleteStreamAt(s.streams, i) + } +} +func deleteStreamAt(s []*stream, i int) []*stream { + // Preserve order for round-robin delivery. + return append(s[:i], s[i+1:]...) +} + +type message struct { + proto *pb.ReceivedMessage + publishTime time.Time + ackDeadline time.Time + deliveries *int + acks *int + streamIndex int // index of stream that currently owns msg, for round-robin delivery +} + +// A message is outstanding if it is owned by some stream. +func (m *message) outstanding() bool { + return !m.ackDeadline.IsZero() +} + +func (m *message) makeAvailable() { + m.ackDeadline = time.Time{} +} + +type stream struct { + sub *subscription + done chan struct{} // closed when the stream is finished + msgc chan *pb.ReceivedMessage + gstream pb.Subscriber_StreamingPullServer + ackTimeout time.Duration + timeout time.Duration +} + +// pull manages the StreamingPull interaction for the life of the stream. +func (st *stream) pull(wg *sync.WaitGroup) error { + errc := make(chan error, 2) + wg.Add(2) + go func() { + defer wg.Done() + errc <- st.sendLoop() + }() + go func() { + defer wg.Done() + errc <- st.recvLoop() + }() + var tchan <-chan time.Time + if st.timeout > 0 { + tchan = time.After(st.timeout) + } + // Wait until one of the goroutines returns an error, or we time out. + var err error + select { + case err = <-errc: + if err == io.EOF { + err = nil + } + case <-tchan: + } + close(st.done) // stop the other goroutine + return err +} + +func (st *stream) sendLoop() error { + for { + select { + case <-st.done: + return nil + case rm := <-st.msgc: + res := &pb.StreamingPullResponse{ReceivedMessages: []*pb.ReceivedMessage{rm}} + if err := st.gstream.Send(res); err != nil { + return err + } + } + } +} + +func (st *stream) recvLoop() error { + for { + req, err := st.gstream.Recv() + if err != nil { + return err + } + st.sub.handleStreamingPullRequest(st, req) + } +} + +func (s *subscription) handleStreamingPullRequest(st *stream, req *pb.StreamingPullRequest) { + // Lock the entire server. + s.mu.Lock() + defer s.mu.Unlock() + + for _, ackID := range req.AckIds { + s.ack(ackID) + } + for i, id := range req.ModifyDeadlineAckIds { + s.modifyAckDeadline(id, secsToDur(req.ModifyDeadlineSeconds[i])) + } + if req.StreamAckDeadlineSeconds > 0 { + st.ackTimeout = secsToDur(req.StreamAckDeadlineSeconds) + } +} + +func (s *subscription) ack(id string) { + m := s.msgs[id] + if m != nil { + (*m.acks)++ + delete(s.msgs, id) + } +} + +func (s *subscription) modifyAckDeadline(id string, d time.Duration) { + m := s.msgs[id] + if m == nil { // already acked: ignore. + return + } + if d == 0 { // nack + m.makeAvailable() + } else { // extend the deadline by d + m.ackDeadline = timeNow().Add(d) + } +} + +func secsToDur(secs int32) time.Duration { + return time.Duration(secs) * time.Second +} diff --git a/vendor/cloud.google.com/go/pubsub/pstest/fake_test.go b/vendor/cloud.google.com/go/pubsub/pstest/fake_test.go new file mode 100644 index 0000000..5b4a78e --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pstest/fake_test.go @@ -0,0 +1,434 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pstest + +import ( + "fmt" + "io" + "testing" + "time" + + "github.com/golang/protobuf/ptypes" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" +) + +func TestTopics(t *testing.T) { + pclient, _, server := newFake(t) + ctx := context.Background() + var topics []*pb.Topic + for i := 1; i < 3; i++ { + topics = append(topics, mustCreateTopic(t, pclient, &pb.Topic{ + Name: fmt.Sprintf("projects/P/topics/T%d", i), + Labels: map[string]string{"num": fmt.Sprintf("%d", i)}, + })) + } + if got, want := len(server.gServer.topics), len(topics); got != want { + t.Fatalf("got %d topics, want %d", got, want) + } + for _, top := range topics { + got, err := pclient.GetTopic(ctx, &pb.GetTopicRequest{Topic: top.Name}) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, top) { + t.Errorf("\ngot %+v\nwant %+v", got, top) + } + } + + res, err := pclient.ListTopics(ctx, &pb.ListTopicsRequest{Project: "projects/P"}) + if err != nil { + t.Fatal(err) + } + if got, want := res.Topics, topics; !testutil.Equal(got, want) { + t.Errorf("\ngot %+v\nwant %+v", got, want) + } + + for _, top := range topics { + if _, err := pclient.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: top.Name}); err != nil { + t.Fatal(err) + } + } + if got, want := len(server.gServer.topics), 0; got != want { + t.Fatalf("got %d topics, want %d", got, want) + } +} + +func TestSubscriptions(t *testing.T) { + pclient, sclient, server := newFake(t) + ctx := context.Background() + topic := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + var subs []*pb.Subscription + for i := 0; i < 3; i++ { + subs = append(subs, mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: fmt.Sprintf("projects/P/subscriptions/S%d", i), + Topic: topic.Name, + AckDeadlineSeconds: int32(10 * (i + 1)), + })) + } + + if got, want := len(server.gServer.subs), len(subs); got != want { + t.Fatalf("got %d subscriptions, want %d", got, want) + } + for _, s := range subs { + got, err := sclient.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.Name}) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, s) { + t.Errorf("\ngot %+v\nwant %+v", got, s) + } + } + + res, err := sclient.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{Project: "projects/P"}) + if err != nil { + t.Fatal(err) + } + if got, want := res.Subscriptions, subs; !testutil.Equal(got, want) { + t.Errorf("\ngot %+v\nwant %+v", got, want) + } + + res2, err := pclient.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{Topic: topic.Name}) + if err != nil { + t.Fatal(err) + } + if got, want := len(res2.Subscriptions), len(subs); got != want { + t.Fatalf("got %d subs, want %d", got, want) + } + for i, got := range res2.Subscriptions { + want := subs[i].Name + if !testutil.Equal(got, want) { + t.Errorf("\ngot %+v\nwant %+v", got, want) + } + } + + for _, s := range subs { + if _, err := sclient.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: s.Name}); err != nil { + t.Fatal(err) + } + } + if got, want := len(server.gServer.subs), 0; got != want { + t.Fatalf("got %d subscriptions, want %d", got, want) + } +} + +func TestPublish(t *testing.T) { + s := NewServer() + var ids []string + for i := 0; i < 3; i++ { + ids = append(ids, s.Publish("projects/p/topics/t", []byte("hello"), nil)) + } + s.Wait() + ms := s.Messages() + if got, want := len(ms), len(ids); got != want { + t.Errorf("got %d messages, want %d", got, want) + } + for i, id := range ids { + if got, want := ms[i].ID, id; got != want { + t.Errorf("got %s, want %s", got, want) + } + } + + m := s.Message(ids[1]) + if m == nil { + t.Error("got nil, want a message") + } +} + +// Note: this sets the fake's "now" time, so it is senstive to concurrent changes to "now". +func publish(t *testing.T, pclient pb.PublisherClient, topic *pb.Topic, messages []*pb.PubsubMessage) map[string]*pb.PubsubMessage { + pubTime := time.Now() + now.Store(func() time.Time { return pubTime }) + defer func() { now.Store(time.Now) }() + + res, err := pclient.Publish(context.Background(), &pb.PublishRequest{ + Topic: topic.Name, + Messages: messages, + }) + if err != nil { + t.Fatal(err) + } + tsPubTime, err := ptypes.TimestampProto(pubTime) + if err != nil { + t.Fatal(err) + } + want := map[string]*pb.PubsubMessage{} + for i, id := range res.MessageIds { + want[id] = &pb.PubsubMessage{ + Data: messages[i].Data, + Attributes: messages[i].Attributes, + MessageId: id, + PublishTime: tsPubTime, + } + } + return want +} + +func TestStreamingPull(t *testing.T) { + // A simple test of streaming pull. + pclient, sclient, _ := newFake(t) + top := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + sub := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S", + Topic: top.Name, + AckDeadlineSeconds: 10, + }) + + want := publish(t, pclient, top, []*pb.PubsubMessage{ + {Data: []byte("d1")}, + {Data: []byte("d2")}, + {Data: []byte("d3")}, + }) + got := pullN(t, len(want), sclient, sub) + if diff := testutil.Diff(got, want); diff != "" { + t.Error(diff) + } +} + +func TestAck(t *testing.T) { + // Ack each message as it arrives. Make sure we don't see dups. + minAckDeadlineSecs = 1 + pclient, sclient, _ := newFake(t) + top := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + sub := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S", + Topic: top.Name, + AckDeadlineSeconds: 1, + }) + + _ = publish(t, pclient, top, []*pb.PubsubMessage{ + {Data: []byte("d1")}, + {Data: []byte("d2")}, + {Data: []byte("d3")}, + }) + + got := map[string]bool{} + spc := mustStartPull(t, sclient, sub) + time.AfterFunc(time.Duration(3*minAckDeadlineSecs)*time.Second, func() { + if err := spc.CloseSend(); err != nil { + t.Errorf("CloseSend: %v", err) + } + }) + + for { + res, err := spc.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + req := &pb.StreamingPullRequest{} + for _, m := range res.ReceivedMessages { + if got[m.Message.MessageId] { + t.Fatal("duplicate message") + } + got[m.Message.MessageId] = true + req.AckIds = append(req.AckIds, m.AckId) + } + if err := spc.Send(req); err != nil { + t.Fatal(err) + } + } +} + +func TestAckDeadline(t *testing.T) { + // Messages should be resent after they expire. + pclient, sclient, _ := newFake(t) + minAckDeadlineSecs = 2 + top := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + sub := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S", + Topic: top.Name, + AckDeadlineSeconds: minAckDeadlineSecs, + }) + + _ = publish(t, pclient, top, []*pb.PubsubMessage{ + {Data: []byte("d1")}, + {Data: []byte("d2")}, + {Data: []byte("d3")}, + }) + + got := map[string]int{} + spc := mustStartPull(t, sclient, sub) + // In 5 seconds the ack deadline will expire twice, so we should see each message + // exactly three times. + time.AfterFunc(5*time.Second, func() { + if err := spc.CloseSend(); err != nil { + t.Errorf("CloseSend: %v", err) + } + }) + for { + res, err := spc.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + for _, m := range res.ReceivedMessages { + got[m.Message.MessageId]++ + } + } + for id, n := range got { + if n != 3 { + t.Errorf("message %s: saw %d times, want 3", id, n) + } + } +} + +func TestMultiSubs(t *testing.T) { + // Each subscription gets every message. + pclient, sclient, _ := newFake(t) + top := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + sub1 := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S1", + Topic: top.Name, + AckDeadlineSeconds: 10, + }) + sub2 := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S2", + Topic: top.Name, + AckDeadlineSeconds: 10, + }) + + want := publish(t, pclient, top, []*pb.PubsubMessage{ + {Data: []byte("d1")}, + {Data: []byte("d2")}, + {Data: []byte("d3")}, + }) + got1 := pullN(t, len(want), sclient, sub1) + got2 := pullN(t, len(want), sclient, sub2) + if diff := testutil.Diff(got1, want); diff != "" { + t.Error(diff) + } + if diff := testutil.Diff(got2, want); diff != "" { + t.Error(diff) + } +} + +func TestMultiStreams(t *testing.T) { + // Messages are handed out to the streams of a subscription in round-robin order. + pclient, sclient, _ := newFake(t) + top := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + sub := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S", + Topic: top.Name, + AckDeadlineSeconds: 10, + }) + want := publish(t, pclient, top, []*pb.PubsubMessage{ + {Data: []byte("d1")}, + {Data: []byte("d2")}, + {Data: []byte("d3")}, + {Data: []byte("d4")}, + }) + streams := []pb.Subscriber_StreamingPullClient{ + mustStartPull(t, sclient, sub), + mustStartPull(t, sclient, sub), + } + got := map[string]*pb.PubsubMessage{} + for i := 0; i < 2; i++ { + for _, st := range streams { + res, err := st.Recv() + if err != nil { + t.Fatal(err) + } + m := res.ReceivedMessages[0] + got[m.Message.MessageId] = m.Message + } + } + if diff := testutil.Diff(got, want); diff != "" { + t.Error(diff) + } +} + +func TestStreamingPullTimeout(t *testing.T) { + pclient, sclient, srv := newFake(t) + timeout := 200 * time.Millisecond + srv.SetStreamTimeout(timeout) + top := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + sub := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S", + Topic: top.Name, + AckDeadlineSeconds: 10, + }) + stream := mustStartPull(t, sclient, sub) + time.Sleep(2 * timeout) + _, err := stream.Recv() + if err != io.EOF { + t.Errorf("got %v, want io.EOF", err) + } +} + +func mustStartPull(t *testing.T, sc pb.SubscriberClient, sub *pb.Subscription) pb.Subscriber_StreamingPullClient { + spc, err := sc.StreamingPull(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := spc.Send(&pb.StreamingPullRequest{Subscription: sub.Name}); err != nil { + t.Fatal(err) + } + return spc +} + +func pullN(t *testing.T, n int, sc pb.SubscriberClient, sub *pb.Subscription) map[string]*pb.PubsubMessage { + spc := mustStartPull(t, sc, sub) + got := map[string]*pb.PubsubMessage{} + for i := 0; i < n; i++ { + res, err := spc.Recv() + if err != nil { + t.Fatal(err) + } + for _, m := range res.ReceivedMessages { + got[m.Message.MessageId] = m.Message + } + } + if err := spc.CloseSend(); err != nil { + t.Fatal(err) + } + res, err := spc.Recv() + if err != io.EOF { + t.Fatalf("Recv returned <%v> instead of EOF; res = %v", err, res) + } + return got +} + +func mustCreateTopic(t *testing.T, pc pb.PublisherClient, topic *pb.Topic) *pb.Topic { + top, err := pc.CreateTopic(context.Background(), topic) + if err != nil { + t.Fatal(err) + } + return top +} + +func mustCreateSubscription(t *testing.T, sc pb.SubscriberClient, sub *pb.Subscription) *pb.Subscription { + sub, err := sc.CreateSubscription(context.Background(), sub) + if err != nil { + t.Fatal(err) + } + return sub +} + +func newFake(t *testing.T) (pb.PublisherClient, pb.SubscriberClient, *Server) { + srv := NewServer() + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + return pb.NewPublisherClient(conn), pb.NewSubscriberClient(conn), srv +} diff --git a/vendor/cloud.google.com/go/pubsub/pstest_test.go b/vendor/cloud.google.com/go/pubsub/pstest_test.go new file mode 100644 index 0000000..5c61930 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pstest_test.go @@ -0,0 +1,76 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "strconv" + "sync" + "testing" + + "golang.org/x/net/context" + + "cloud.google.com/go/pubsub" + "cloud.google.com/go/pubsub/pstest" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func TestPSTest(t *testing.T) { + ctx := context.Background() + srv := pstest.NewServer() + + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + panic(err) + } + + client, err := pubsub.NewClient(ctx, "some-project", option.WithGRPCConn(conn)) + if err != nil { + panic(err) + } + defer client.Close() + + topic, err := client.CreateTopic(ctx, "test-topic") + if err != nil { + panic(err) + } + + sub, err := client.CreateSubscription(ctx, "sub-name", pubsub.SubscriptionConfig{Topic: topic}) + if err != nil { + panic(err) + } + + go func() { + for i := 0; i < 10; i++ { + srv.Publish("projects/some-project/topics/test-topic", []byte(strconv.Itoa(i)), nil) + } + }() + + ctx, cancel := context.WithCancel(ctx) + var mu sync.Mutex + count := 0 + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + mu.Lock() + count++ + if count >= 10 { + cancel() + } + mu.Unlock() + m.Ack() + }) + if err != nil { + panic(err) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go new file mode 100644 index 0000000..8475186 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -0,0 +1,113 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub // import "cloud.google.com/go/pubsub" + +import ( + "fmt" + "os" + "runtime" + "time" + + "cloud.google.com/go/internal/version" + vkit "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +const ( + // ScopePubSub grants permissions to view and manage Pub/Sub + // topics and subscriptions. + ScopePubSub = "https://www.googleapis.com/auth/pubsub" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +const ( + prodAddr = "https://pubsub.googleapis.com/" + minAckDeadline = 10 * time.Second + maxAckDeadline = 10 * time.Minute +) + +// Client is a Google Pub/Sub client scoped to a single project. +// +// Clients should be reused rather than being created as needed. +// A Client may be shared by multiple goroutines. +type Client struct { + projectID string + pubc *vkit.PublisherClient + subc *vkit.SubscriberClient +} + +// NewClient creates a new PubSub client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (c *Client, err error) { + var o []option.ClientOption + // Environment variables for gcloud emulator: + // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ + if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + // Create multiple connections to increase throughput. + option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)), + option.WithGRPCDialOption(grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 5 * time.Minute, + })), + } + o = append(o, openCensusOptions()...) + } + o = append(o, opts...) + pubc, err := vkit.NewPublisherClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("pubsub: %v", err) + } + subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) + if err != nil { + // Should never happen, since we are passing in the connection. + // If it does, we cannot close, because the user may have passed in their + // own connection originally. + return nil, fmt.Errorf("pubsub: %v", err) + } + pubc.SetGoogleClientInfo("gccl", version.Repo) + subc.SetGoogleClientInfo("gccl", version.Repo) + return &Client{ + projectID: projectID, + pubc: pubc, + subc: subc, + }, nil +} + +// Close releases any resources held by the client, +// such as memory and goroutines. +// +// If the client is available for the lifetime of the program, then Close need not be +// called at exit. +func (c *Client) Close() error { + // Return the first error, because the first call closes the connection. + err := c.pubc.Close() + _ = c.subc.Close() + return err +} + +func (c *Client) fullyQualifiedProjectName() string { + return fmt.Sprintf("projects/%s", c.projectID) +} diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go new file mode 100644 index 0000000..d9318a1 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pullstream.go @@ -0,0 +1,167 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "io" + "sync" + + vkit "cloud.google.com/go/pubsub/apiv1" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" +) + +// A pullStream supports the methods of a StreamingPullClient, but re-opens +// the stream on a retryable error. +type pullStream struct { + ctx context.Context + open func() (pb.Subscriber_StreamingPullClient, error) + + mu sync.Mutex + spc *pb.Subscriber_StreamingPullClient + err error // permanent error +} + +func newPullStream(ctx context.Context, subc *vkit.SubscriberClient, subName string, ackDeadlineSecs int32) *pullStream { + ctx = withSubscriptionKey(ctx, subName) + return &pullStream{ + ctx: ctx, + open: func() (pb.Subscriber_StreamingPullClient, error) { + spc, err := subc.StreamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) + if err == nil { + recordStat(ctx, StreamRequestCount, 1) + err = spc.Send(&pb.StreamingPullRequest{ + Subscription: subName, + StreamAckDeadlineSeconds: ackDeadlineSecs, + }) + } + if err != nil { + return nil, err + } + return spc, nil + }, + } +} + +// get returns either a valid *StreamingPullClient (SPC), or a permanent error. +// If the argument is nil, this is the first call for an RPC, and the current +// SPC will be returned (or a new one will be opened). Otherwise, this call is a +// request to re-open the stream because of a retryable error, and the argument +// is a pointer to the SPC that returned the error. +func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber_StreamingPullClient, error) { + s.mu.Lock() + defer s.mu.Unlock() + // A stored error is permanent. + if s.err != nil { + return nil, s.err + } + // If the context is done, so are we. + select { + case <-s.ctx.Done(): + s.err = s.ctx.Err() + return nil, s.err + default: + } + // TODO(jba): We can use the following instead of the above after we drop support for 1.8: + // s.err = s.ctx.Err() + // if s.err != nil { + // return nil, s.err + // } + + // If the current and argument SPCs differ, return the current one. This subsumes two cases: + // 1. We have an SPC and the caller is getting the stream for the first time. + // 2. The caller wants to retry, but they have an older SPC; we've already retried. + if spc != s.spc { + return s.spc, nil + } + // Either this is the very first call on this stream (s.spc == nil), or we have a valid + // retry request. Either way, open a new stream. + // The lock is held here for a long time, but it doesn't matter because no callers could get + // anything done anyway. + s.spc = new(pb.Subscriber_StreamingPullClient) + recordStat(s.ctx, StreamOpenCount, 1) + *s.spc, s.err = s.open() // Setting s.err means any error from open is permanent. Reconsider. + return s.spc, s.err +} + +func (s *pullStream) call(f func(pb.Subscriber_StreamingPullClient) error) error { + var ( + spc *pb.Subscriber_StreamingPullClient + err error + bo gax.Backoff + ) + for { + spc, err = s.get(spc) + if err != nil { + // Preserve the existing behavior of not retrying on open. Is that a bug? + // (If we do decide to retry, don't retry after we're closed.) + return err + } + err = f(*spc) + if err != nil { + if isRetryable(err) { + recordStat(s.ctx, StreamRetryCount, 1) + gax.Sleep(s.ctx, bo.Pause()) + continue + } + s.mu.Lock() + s.err = err + s.mu.Unlock() + } + return err + } +} + +func (s *pullStream) Send(req *pb.StreamingPullRequest) error { + return s.call(func(spc pb.Subscriber_StreamingPullClient) error { + recordStat(s.ctx, AckCount, int64(len(req.AckIds))) + zeroes := 0 + for _, mds := range req.ModifyDeadlineSeconds { + if mds == 0 { + zeroes++ + } + } + recordStat(s.ctx, NackCount, int64(zeroes)) + recordStat(s.ctx, ModAckCount, int64(len(req.ModifyDeadlineSeconds)-zeroes)) + recordStat(s.ctx, StreamRequestCount, 1) + return spc.Send(req) + }) +} + +func (s *pullStream) Recv() (*pb.StreamingPullResponse, error) { + var res *pb.StreamingPullResponse + err := s.call(func(spc pb.Subscriber_StreamingPullClient) error { + var err error + recordStat(s.ctx, StreamResponseCount, 1) + res, err = spc.Recv() + if err == nil { + recordStat(s.ctx, PullCount, int64(len(res.ReceivedMessages))) + } + return err + }) + return res, err +} + +func (s *pullStream) CloseSend() error { + err := s.call(func(spc pb.Subscriber_StreamingPullClient) error { + return spc.CloseSend() + }) + s.mu.Lock() + s.err = io.EOF // should not be retried + s.mu.Unlock() + return err +} diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go new file mode 100644 index 0000000..c63e4d9 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -0,0 +1,120 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "math" + "strings" + + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// maxPayload is the maximum number of bytes to devote to actual ids in +// acknowledgement or modifyAckDeadline requests. A serialized +// AcknowledgeRequest proto has a small constant overhead, plus the size of the +// subscription name, plus 3 bytes per ID (a tag byte and two size bytes). A +// ModifyAckDeadlineRequest has an additional few bytes for the deadline. We +// don't know the subscription name here, so we just assume the size exclusive +// of ids is 100 bytes. +// +// With gRPC there is no way for the client to know the server's max message size (it is +// configurable on the server). We know from experience that it +// it 512K. +const ( + maxPayload = 512 * 1024 + reqFixedOverhead = 100 + overheadPerID = 3 + maxSendRecvBytes = 20 * 1024 * 1024 // 20M +) + +func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { + msgs := make([]*Message, 0, len(rms)) + for i, m := range rms { + msg, err := toMessage(m) + if err != nil { + return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) + } + msgs = append(msgs, msg) + } + return msgs, nil +} + +func trunc32(i int64) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +// Logic from https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java. +func isRetryable(err error) bool { + s, ok := status.FromError(err) + if !ok { // includes io.EOF, normal stream close, which causes us to reopen + return true + } + switch s.Code() { + case codes.DeadlineExceeded, codes.Internal, codes.Canceled, codes.ResourceExhausted: + return true + case codes.Unavailable: + return !strings.Contains(s.Message(), "Server shutdownNow invoked") + default: + return false + } +} + +// Split req into a prefix that is smaller than maxSize, and a remainder. +func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) { + const int32Bytes = 4 + + // Copy all fields before splitting the variable-sized ones. + remainder = &pb.StreamingPullRequest{} + *remainder = *req + // Split message so it isn't too big. + size := reqFixedOverhead + i := 0 + for size < maxSize && (i < len(req.AckIds) || i < len(req.ModifyDeadlineAckIds)) { + if i < len(req.AckIds) { + size += overheadPerID + len(req.AckIds[i]) + } + if i < len(req.ModifyDeadlineAckIds) { + size += overheadPerID + len(req.ModifyDeadlineAckIds[i]) + int32Bytes + } + i++ + } + + min := func(a, b int) int { + if a < b { + return a + } + return b + } + + j := i + if size > maxSize { + j-- + } + k := min(j, len(req.AckIds)) + remainder.AckIds = req.AckIds[k:] + req.AckIds = req.AckIds[:k] + k = min(j, len(req.ModifyDeadlineAckIds)) + remainder.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[k:] + remainder.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[k:] + req.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[:k] + req.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[:k] + return req, remainder +} diff --git a/vendor/cloud.google.com/go/pubsub/service_test.go b/vendor/cloud.google.com/go/pubsub/service_test.go new file mode 100644 index 0000000..1139d0f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service_test.go @@ -0,0 +1,69 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func TestSplitRequest(t *testing.T) { + split := func(a []string, i int) ([]string, []string) { + if len(a) < i { + return a, nil + } + return a[:i], a[i:] + } + ackIDs := []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"} + modDeadlines := []int32{1, 2, 3, 4, 5} + for i, test := range []struct { + ackIDs []string + modAckIDs []string + splitIndex int + }{ + {ackIDs, ackIDs, 2}, + {nil, ackIDs, 3}, + {ackIDs, nil, 5}, + {nil, ackIDs[:1], 1}, + } { + req := &pb.StreamingPullRequest{ + AckIds: test.ackIDs, + ModifyDeadlineAckIds: test.modAckIDs, + ModifyDeadlineSeconds: modDeadlines[:len(test.modAckIDs)], + } + a1, a2 := split(test.ackIDs, test.splitIndex) + m1, m2 := split(test.modAckIDs, test.splitIndex) + want1 := &pb.StreamingPullRequest{ + AckIds: a1, + ModifyDeadlineAckIds: m1, + ModifyDeadlineSeconds: modDeadlines[:len(m1)], + } + want2 := &pb.StreamingPullRequest{ + AckIds: a2, + ModifyDeadlineAckIds: m2, + ModifyDeadlineSeconds: modDeadlines[len(m1) : len(m1)+len(m2)], + } + got1, got2 := splitRequest(req, reqFixedOverhead+40) + if !testutil.Equal(got1, want1) { + t.Errorf("#%d: first:\ngot %+v\nwant %+v", i, got1, want1) + } + if !testutil.Equal(got2, want2) { + t.Errorf("#%d: second:\ngot %+v\nwant %+v", i, got2, want2) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/snapshot.go b/vendor/cloud.google.com/go/pubsub/snapshot.go new file mode 100644 index 0000000..7140e96 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/snapshot.go @@ -0,0 +1,160 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// Snapshot is a reference to a PubSub snapshot. +type Snapshot struct { + c *Client + + // The fully qualified identifier for the snapshot, in the format "projects//snapshots/" + name string +} + +// ID returns the unique identifier of the snapshot within its project. +func (s *Snapshot) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad snapshot name") + } + return s.name[slash+1:] +} + +// SnapshotConfig contains the details of a Snapshot. +type SnapshotConfig struct { + *Snapshot + Topic *Topic + Expiration time.Time +} + +// Snapshot creates a reference to a snapshot. +func (c *Client) Snapshot(id string) *Snapshot { + return &Snapshot{ + c: c, + name: fmt.Sprintf("projects/%s/snapshots/%s", c.projectID, id), + } +} + +// Snapshots returns an iterator which returns snapshots for this project. +func (c *Client) Snapshots(ctx context.Context) *SnapshotConfigIterator { + it := c.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{ + Project: c.fullyQualifiedProjectName(), + }) + next := func() (*SnapshotConfig, error) { + snap, err := it.Next() + if err != nil { + return nil, err + } + return toSnapshotConfig(snap, c) + } + return &SnapshotConfigIterator{next: next} +} + +// SnapshotConfigIterator is an iterator that returns a series of snapshots. +type SnapshotConfigIterator struct { + next func() (*SnapshotConfig, error) +} + +// Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results. +// Once Next returns iterator.Done, all subsequent calls will return iterator.Done. +func (snaps *SnapshotConfigIterator) Next() (*SnapshotConfig, error) { + return snaps.next() +} + +// Delete deletes a snapshot. +func (snap *Snapshot) Delete(ctx context.Context) error { + return snap.c.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: snap.name}) +} + +// SeekToTime seeks the subscription to a point in time. +// +// Messages retained in the subscription that were published before this +// time are marked as acknowledged, and messages retained in the +// subscription that were published after this time are marked as +// unacknowledged. Note that this operation affects only those messages +// retained in the subscription (configured by SnapshotConfig). For example, +// if `time` corresponds to a point before the message retention +// window (or to a point before the system's notion of the subscription +// creation time), only retained messages will be marked as unacknowledged, +// and already-expunged messages will not be restored. +func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error { + ts, err := ptypes.TimestampProto(t) + if err != nil { + return err + } + _, err = s.c.subc.Seek(ctx, &pb.SeekRequest{ + Subscription: s.name, + Target: &pb.SeekRequest_Time{ts}, + }) + return err +} + +// CreateSnapshot creates a new snapshot from this subscription. +// The snapshot will be for the topic this subscription is subscribed to. +// If the name is empty string, a unique name is assigned. +// +// The created snapshot is guaranteed to retain: +// (a) The existing backlog on the subscription. More precisely, this is +// defined as the messages in the subscription's backlog that are +// unacknowledged when Snapshot returns without error. +// (b) Any messages published to the subscription's topic following +// Snapshot returning without error. +func (s *Subscription) CreateSnapshot(ctx context.Context, name string) (*SnapshotConfig, error) { + if name != "" { + name = fmt.Sprintf("projects/%s/snapshots/%s", strings.Split(s.name, "/")[1], name) + } + snap, err := s.c.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{ + Name: name, + Subscription: s.name, + }) + if err != nil { + return nil, err + } + return toSnapshotConfig(snap, s.c) +} + +// SeekToSnapshot seeks the subscription to a snapshot. +// +// The snapshot need not be created from this subscription, +// but it must be for the topic this subscription is subscribed to. +func (s *Subscription) SeekToSnapshot(ctx context.Context, snap *Snapshot) error { + _, err := s.c.subc.Seek(ctx, &pb.SeekRequest{ + Subscription: s.name, + Target: &pb.SeekRequest_Snapshot{snap.name}, + }) + return err +} + +func toSnapshotConfig(snap *pb.Snapshot, c *Client) (*SnapshotConfig, error) { + exp, err := ptypes.Timestamp(snap.ExpireTime) + if err != nil { + return nil, err + } + return &SnapshotConfig{ + Snapshot: &Snapshot{c: c, name: snap.Name}, + Topic: newTopic(c, snap.Topic), + Expiration: exp, + }, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go new file mode 100644 index 0000000..b7b119c --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go @@ -0,0 +1,325 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// TODO(jba): test keepalive +// TODO(jba): test that expired messages are not kept alive +// TODO(jba): test that when all messages expire, Stop returns. + +import ( + "io" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/grpc/status" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/net/context" + "google.golang.org/api/option" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + timestamp = &tspb.Timestamp{} + testMessages = []*pb.ReceivedMessage{ + {AckId: "0", Message: &pb.PubsubMessage{Data: []byte{1}, PublishTime: timestamp}}, + {AckId: "1", Message: &pb.PubsubMessage{Data: []byte{2}, PublishTime: timestamp}}, + {AckId: "2", Message: &pb.PubsubMessage{Data: []byte{3}, PublishTime: timestamp}}, + } +) + +func TestStreamingPullBasic(t *testing.T) { + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + testStreamingPullIteration(t, client, server, testMessages) +} + +func TestStreamingPullMultipleFetches(t *testing.T) { + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullMessages(testMessages[1:]) + testStreamingPullIteration(t, client, server, testMessages) +} + +func newTestSubscription(t *testing.T, client *Client, name string) *Subscription { + topic := client.Topic("t") + sub, err := client.CreateSubscription(context.Background(), name, + SubscriptionConfig{Topic: topic}) + if err != nil { + t.Fatalf("CreateSubscription: %v", err) + } + return sub +} + +func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer, msgs []*pb.ReceivedMessage) { + sub := newTestSubscription(t, client, "s") + gotMsgs, err := pullN(context.Background(), sub, len(msgs), func(_ context.Context, m *Message) { + id, err := strconv.Atoi(m.ackID) + if err != nil { + panic(err) + } + // ack evens, nack odds + if id%2 == 0 { + m.Ack() + } else { + m.Nack() + } + }) + if err != nil { + t.Fatalf("Pull: %v", err) + } + gotMap := map[string]*Message{} + for _, m := range gotMsgs { + gotMap[m.ackID] = m + } + for i, msg := range msgs { + want, err := toMessage(msg) + if err != nil { + t.Fatal(err) + } + want.calledDone = true + got := gotMap[want.ackID] + if got == nil { + t.Errorf("%d: no message for ackID %q", i, want.ackID) + continue + } + if !testutil.Equal(got, want, cmp.AllowUnexported(Message{}), cmpopts.IgnoreTypes(time.Time{}, func(string, bool, time.Time) {})) { + t.Errorf("%d: got\n%#v\nwant\n%#v", i, got, want) + } + } + server.wait() + for i := 0; i < len(msgs); i++ { + id := msgs[i].AckId + if i%2 == 0 { + if !server.Acked[id] { + t.Errorf("msg %q should have been acked but wasn't", id) + } + } else { + if dl, ok := server.Deadlines[id]; !ok || dl != 0 { + t.Errorf("msg %q should have been nacked but wasn't", id) + } + } + } +} + +func TestStreamingPullError(t *testing.T) { + // If an RPC to the service returns a non-retryable error, Pull should + // return after all callbacks return, without waiting for messages to be + // acked. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullError(status.Errorf(codes.Unknown, "")) + sub := newTestSubscription(t, client, "s") + // Use only one goroutine, since the fake server is configured to + // return only one error. + sub.ReceiveSettings.NumGoroutines = 1 + callbackDone := make(chan struct{}) + ctx, _ := context.WithTimeout(context.Background(), time.Second) + err := sub.Receive(ctx, func(ctx context.Context, m *Message) { + defer close(callbackDone) + select { + case <-ctx.Done(): + return + } + }) + select { + case <-callbackDone: + default: + t.Fatal("Receive returned but callback was not done") + } + if want := codes.Unknown; grpc.Code(err) != want { + t.Fatalf("got <%v>, want code %v", err, want) + } +} + +func TestStreamingPullCancel(t *testing.T) { + // If Receive's context is canceled, it should return after all callbacks + // return and all messages have been acked. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + sub := newTestSubscription(t, client, "s") + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + var n int32 + err := sub.Receive(ctx, func(ctx2 context.Context, m *Message) { + atomic.AddInt32(&n, 1) + defer atomic.AddInt32(&n, -1) + cancel() + m.Ack() + }) + if got := atomic.LoadInt32(&n); got != 0 { + t.Errorf("Receive returned with %d callbacks still running", got) + } + if err != nil { + t.Fatalf("Receive got <%v>, want nil", err) + } +} + +func TestStreamingPullRetry(t *testing.T) { + // Check that we retry on io.EOF or Unavailable. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullError(io.EOF) + server.addStreamingPullError(io.EOF) + server.addStreamingPullMessages(testMessages[1:2]) + server.addStreamingPullError(status.Errorf(codes.Unavailable, "")) + server.addStreamingPullError(status.Errorf(codes.Unavailable, "")) + server.addStreamingPullMessages(testMessages[2:]) + + testStreamingPullIteration(t, client, server, testMessages) +} + +func TestStreamingPullOneActive(t *testing.T) { + // Only one call to Pull can be active at a time. + client, srv := newFake(t) + srv.addStreamingPullMessages(testMessages[:1]) + sub := newTestSubscription(t, client, "s") + ctx, cancel := context.WithCancel(context.Background()) + err := sub.Receive(ctx, func(ctx context.Context, m *Message) { + m.Ack() + err := sub.Receive(ctx, func(context.Context, *Message) {}) + if err != errReceiveInProgress { + t.Errorf("got <%v>, want <%v>", err, errReceiveInProgress) + } + cancel() + }) + if err != nil { + t.Fatalf("got <%v>, want nil", err) + } +} + +func TestStreamingPullConcurrent(t *testing.T) { + newMsg := func(i int) *pb.ReceivedMessage { + return &pb.ReceivedMessage{ + AckId: strconv.Itoa(i), + Message: &pb.PubsubMessage{Data: []byte{byte(i)}, PublishTime: timestamp}, + } + } + + // Multiple goroutines should be able to read from the same iterator. + client, server := newFake(t) + // Add a lot of messages, a few at a time, to make sure both threads get a chance. + nMessages := 100 + for i := 0; i < nMessages; i += 2 { + server.addStreamingPullMessages([]*pb.ReceivedMessage{newMsg(i), newMsg(i + 1)}) + } + sub := newTestSubscription(t, client, "s") + ctx, _ := context.WithTimeout(context.Background(), time.Second) + gotMsgs, err := pullN(ctx, sub, nMessages, func(ctx context.Context, m *Message) { + m.Ack() + }) + if err != nil { + t.Fatalf("Receive: %v", err) + } + seen := map[string]bool{} + for _, gm := range gotMsgs { + if seen[gm.ackID] { + t.Fatalf("duplicate ID %q", gm.ackID) + } + seen[gm.ackID] = true + } + if len(seen) != nMessages { + t.Fatalf("got %d messages, want %d", len(seen), nMessages) + } +} + +func TestStreamingPullFlowControl(t *testing.T) { + // Callback invocations should not occur if flow control limits are exceeded. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + sub := newTestSubscription(t, client, "s") + sub.ReceiveSettings.MaxOutstandingMessages = 2 + ctx, cancel := context.WithCancel(context.Background()) + activec := make(chan int) + waitc := make(chan int) + errc := make(chan error) + go func() { + errc <- sub.Receive(ctx, func(_ context.Context, m *Message) { + activec <- 1 + <-waitc + m.Ack() + }) + }() + // Here, two callbacks are active. Receive should be blocked in the flow + // control acquire method on the third message. + <-activec + <-activec + select { + case <-activec: + t.Fatal("third callback in progress") + case <-time.After(100 * time.Millisecond): + } + cancel() + // Receive still has not returned, because both callbacks are still blocked on waitc. + select { + case err := <-errc: + t.Fatalf("Receive returned early with error %v", err) + case <-time.After(100 * time.Millisecond): + } + // Let both callbacks proceed. + waitc <- 1 + waitc <- 1 + // The third callback will never run, because acquire returned a non-nil + // error, causing Receive to return. So now Receive should end. + if err := <-errc; err != nil { + t.Fatalf("got %v from Receive, want nil", err) + } +} + +func newFake(t *testing.T) (*Client, *fakeServer) { + srv, err := newFakeServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + return client, srv +} + +// pullN calls sub.Receive until at least n messages are received. +func pullN(ctx context.Context, sub *Subscription, n int, f func(context.Context, *Message)) ([]*Message, error) { + var ( + mu sync.Mutex + msgs []*Message + ) + cctx, cancel := context.WithCancel(ctx) + err := sub.Receive(cctx, func(ctx context.Context, m *Message) { + mu.Lock() + msgs = append(msgs, m) + nSeen := len(msgs) + mu.Unlock() + f(ctx, m) + if nSeen >= n { + cancel() + } + }) + if err != nil { + return nil, err + } + return msgs, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go new file mode 100644 index 0000000..93054d5 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -0,0 +1,522 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "fmt" + "io" + "strings" + "sync" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/optional" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + fmpb "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Subscription is a reference to a PubSub subscription. +type Subscription struct { + c *Client + + // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" + name string + + // Settings for pulling messages. Configure these before calling Receive. + ReceiveSettings ReceiveSettings + + mu sync.Mutex + receiveActive bool +} + +// Subscription creates a reference to a subscription. +func (c *Client) Subscription(id string) *Subscription { + return c.SubscriptionInProject(id, c.projectID) +} + +// SubscriptionInProject creates a reference to a subscription in a given project. +func (c *Client) SubscriptionInProject(id, projectID string) *Subscription { + return &Subscription{ + c: c, + name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, id), + } +} + +// String returns the globally unique printable name of the subscription. +func (s *Subscription) String() string { + return s.name +} + +// ID returns the unique identifier of the subscription within its project. +func (s *Subscription) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad subscription name") + } + return s.name[slash+1:] +} + +// Subscriptions returns an iterator which returns all of the subscriptions for the client's project. +func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { + it := c.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{ + Project: c.fullyQualifiedProjectName(), + }) + return &SubscriptionIterator{ + c: c, + next: func() (string, error) { + sub, err := it.Next() + if err != nil { + return "", err + } + return sub.Name, nil + }, + } +} + +// SubscriptionIterator is an iterator that returns a series of subscriptions. +type SubscriptionIterator struct { + c *Client + next func() (string, error) +} + +// Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned. +func (subs *SubscriptionIterator) Next() (*Subscription, error) { + subName, err := subs.next() + if err != nil { + return nil, err + } + return &Subscription{c: subs.c, name: subName}, nil +} + +// PushConfig contains configuration for subscriptions that operate in push mode. +type PushConfig struct { + // A URL locating the endpoint to which messages should be pushed. + Endpoint string + + // Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details. + Attributes map[string]string +} + +func (pc *PushConfig) toProto() *pb.PushConfig { + return &pb.PushConfig{ + Attributes: pc.Attributes, + PushEndpoint: pc.Endpoint, + } +} + +// Subscription config contains the configuration of a subscription. +type SubscriptionConfig struct { + Topic *Topic + PushConfig PushConfig + + // The default maximum time after a subscriber receives a message before + // the subscriber should acknowledge the message. Note: messages which are + // obtained via Subscription.Receive need not be acknowledged within this + // deadline, as the deadline will be automatically extended. + AckDeadline time.Duration + + // Whether to retain acknowledged messages. If true, acknowledged messages + // will not be expunged until they fall out of the RetentionDuration window. + RetainAckedMessages bool + + // How long to retain messages in backlog, from the time of publish. If + // RetainAckedMessages is true, this duration affects the retention of + // acknowledged messages, otherwise only unacknowledged messages are retained. + // Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes. + RetentionDuration time.Duration +} + +func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { + var pbPushConfig *pb.PushConfig + if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 { + pbPushConfig = &pb.PushConfig{ + Attributes: cfg.PushConfig.Attributes, + PushEndpoint: cfg.PushConfig.Endpoint, + } + } + var retentionDuration *durpb.Duration + if cfg.RetentionDuration != 0 { + retentionDuration = ptypes.DurationProto(cfg.RetentionDuration) + } + return &pb.Subscription{ + Name: name, + Topic: cfg.Topic.name, + PushConfig: pbPushConfig, + AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), + RetainAckedMessages: cfg.RetainAckedMessages, + MessageRetentionDuration: retentionDuration, + } +} + +func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionConfig, error) { + rd := time.Hour * 24 * 7 + var err error + if pbSub.MessageRetentionDuration != nil { + rd, err = ptypes.Duration(pbSub.MessageRetentionDuration) + if err != nil { + return SubscriptionConfig{}, err + } + } + return SubscriptionConfig{ + Topic: newTopic(c, pbSub.Topic), + AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds), + PushConfig: PushConfig{ + Endpoint: pbSub.PushConfig.PushEndpoint, + Attributes: pbSub.PushConfig.Attributes, + }, + RetainAckedMessages: pbSub.RetainAckedMessages, + RetentionDuration: rd, + }, nil +} + +// ReceiveSettings configure the Receive method. +// A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings. +type ReceiveSettings struct { + // MaxExtension is the maximum period for which the Subscription should + // automatically extend the ack deadline for each message. + // + // The Subscription will automatically extend the ack deadline of all + // fetched Messages for the duration specified. Automatic deadline + // extension may be disabled by specifying a duration less than 0. + // + // Connections may be terminated if they last longer than 30m, which + // effectively makes that the ceiling for this value. For longer message + // processing, see the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example_SubscriberClient_Pull_lengthyClientProcessing + MaxExtension time.Duration + + // MaxOutstandingMessages is the maximum number of unprocessed messages + // (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it + // will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages. + // If the value is negative, then there will be no limit on the number of + // unprocessed messages. + MaxOutstandingMessages int + + // MaxOutstandingBytes is the maximum size of unprocessed messages + // (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will + // be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If + // the value is negative, then there will be no limit on the number of bytes + // for unprocessed messages. + MaxOutstandingBytes int + + // NumGoroutines is the number of goroutines Receive will spawn to pull + // messages concurrently. If NumGoroutines is less than 1, it will be treated + // as if it were DefaultReceiveSettings.NumGoroutines. + // + // NumGoroutines does not limit the number of messages that can be processed + // concurrently. Even with one goroutine, many messages might be processed at + // once, because that goroutine may continually receive messages and invoke the + // function passed to Receive on them. To limit the number of messages being + // processed concurrently, set MaxOutstandingMessages. + NumGoroutines int +} + +// DefaultReceiveSettings holds the default values for ReceiveSettings. +var DefaultReceiveSettings = ReceiveSettings{ + MaxExtension: 10 * time.Minute, + MaxOutstandingMessages: 1000, + MaxOutstandingBytes: 1e9, // 1G + NumGoroutines: 1, +} + +// Delete deletes the subscription. +func (s *Subscription) Delete(ctx context.Context) error { + return s.c.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: s.name}) +} + +// Exists reports whether the subscription exists on the server. +func (s *Subscription) Exists(ctx context.Context) (bool, error) { + _, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name}) + if err == nil { + return true, nil + } + if grpc.Code(err) == codes.NotFound { + return false, nil + } + return false, err +} + +// Config fetches the current configuration for the subscription. +func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) { + pbSub, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name}) + if err != nil { + return SubscriptionConfig{}, err + } + cfg, err := protoToSubscriptionConfig(pbSub, s.c) + if err != nil { + return SubscriptionConfig{}, err + } + return cfg, nil +} + +// SubscriptionConfigToUpdate describes how to update a subscription. +type SubscriptionConfigToUpdate struct { + // If non-nil, the push config is changed. + PushConfig *PushConfig + + // If non-zero, the ack deadline is changed. + AckDeadline time.Duration + + // If set, RetainAckedMessages is changed. + RetainAckedMessages optional.Bool + + // If non-zero, RetentionDuration is changed. + RetentionDuration time.Duration +} + +// Update changes an existing subscription according to the fields set in cfg. +// It returns the new SubscriptionConfig. +// +// Update returns an error if no fields were modified. +func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) { + req := s.updateRequest(&cfg) + if len(req.UpdateMask.Paths) == 0 { + return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update") + } + rpsub, err := s.c.subc.UpdateSubscription(ctx, req) + if err != nil { + return SubscriptionConfig{}, err + } + return protoToSubscriptionConfig(rpsub, s.c) +} + +func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.UpdateSubscriptionRequest { + psub := &pb.Subscription{Name: s.name} + var paths []string + if cfg.PushConfig != nil { + psub.PushConfig = cfg.PushConfig.toProto() + paths = append(paths, "push_config") + } + if cfg.AckDeadline != 0 { + psub.AckDeadlineSeconds = trunc32(int64(cfg.AckDeadline.Seconds())) + paths = append(paths, "ack_deadline_seconds") + } + if cfg.RetainAckedMessages != nil { + psub.RetainAckedMessages = optional.ToBool(cfg.RetainAckedMessages) + paths = append(paths, "retain_acked_messages") + } + if cfg.RetentionDuration != 0 { + psub.MessageRetentionDuration = ptypes.DurationProto(cfg.RetentionDuration) + paths = append(paths, "message_retention_duration") + } + return &pb.UpdateSubscriptionRequest{ + Subscription: psub, + UpdateMask: &fmpb.FieldMask{Paths: paths}, + } +} + +func (s *Subscription) IAM() *iam.Handle { + return iam.InternalNewHandle(s.c.subc.Connection(), s.name) +} + +// CreateSubscription creates a new subscription on a topic. +// +// id is the name of the subscription to create. It must start with a letter, +// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-), +// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It +// must be between 3 and 255 characters in length, and must not start with +// "goog". +// +// cfg.Topic is the topic from which the subscription should receive messages. It +// need not belong to the same project as the subscription. This field is required. +// +// cfg.AckDeadline is the maximum time after a subscriber receives a message before +// the subscriber should acknowledge the message. It must be between 10 and 600 +// seconds (inclusive), and is rounded down to the nearest second. If the +// provided ackDeadline is 0, then the default value of 10 seconds is used. +// Note: messages which are obtained via Subscription.Receive need not be +// acknowledged within this deadline, as the deadline will be automatically +// extended. +// +// cfg.PushConfig may be set to configure this subscription for push delivery. +// +// If the subscription already exists an error will be returned. +func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) { + if cfg.Topic == nil { + return nil, errors.New("pubsub: require non-nil Topic") + } + if cfg.AckDeadline == 0 { + cfg.AckDeadline = 10 * time.Second + } + if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second { + return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d) + } + + sub := c.Subscription(id) + _, err := c.subc.CreateSubscription(ctx, cfg.toProto(sub.name)) + if err != nil { + return nil, err + } + return sub, nil +} + +var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription") + +// Receive calls f with the outstanding messages from the subscription. +// It blocks until ctx is done, or the service returns a non-retryable error. +// +// The standard way to terminate a Receive is to cancel its context: +// +// cctx, cancel := context.WithCancel(ctx) +// err := sub.Receive(cctx, callback) +// // Call cancel from callback, or another goroutine. +// +// If the service returns a non-retryable error, Receive returns that error after +// all of the outstanding calls to f have returned. If ctx is done, Receive +// returns nil after all of the outstanding calls to f have returned and +// all messages have been acknowledged or have expired. +// +// Receive calls f concurrently from multiple goroutines. It is encouraged to +// process messages synchronously in f, even if that processing is relatively +// time-consuming; Receive will spawn new goroutines for incoming messages, +// limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings. +// +// The context passed to f will be canceled when ctx is Done or there is a +// fatal service error. +// +// Receive will automatically extend the ack deadline of all fetched Messages for the +// period specified by s.ReceiveSettings.MaxExtension. +// +// Each Subscription may have only one invocation of Receive active at a time. +func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error { + s.mu.Lock() + if s.receiveActive { + s.mu.Unlock() + return errReceiveInProgress + } + s.receiveActive = true + s.mu.Unlock() + defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }() + + config, err := s.Config(ctx) + if err != nil { + if grpc.Code(err) == codes.Canceled { + return nil + } + return err + } + maxCount := s.ReceiveSettings.MaxOutstandingMessages + if maxCount == 0 { + maxCount = DefaultReceiveSettings.MaxOutstandingMessages + } + maxBytes := s.ReceiveSettings.MaxOutstandingBytes + if maxBytes == 0 { + maxBytes = DefaultReceiveSettings.MaxOutstandingBytes + } + maxExt := s.ReceiveSettings.MaxExtension + if maxExt == 0 { + maxExt = DefaultReceiveSettings.MaxExtension + } else if maxExt < 0 { + // If MaxExtension is negative, disable automatic extension. + maxExt = 0 + } + numGoroutines := s.ReceiveSettings.NumGoroutines + if numGoroutines < 1 { + numGoroutines = DefaultReceiveSettings.NumGoroutines + } + // TODO(jba): add tests that verify that ReceiveSettings are correctly processed. + po := &pullOptions{ + maxExtension: maxExt, + maxPrefetch: trunc32(int64(maxCount)), + ackDeadline: config.AckDeadline, + } + fc := newFlowController(maxCount, maxBytes) + + // Wait for all goroutines started by Receive to return, so instead of an + // obscure goroutine leak we have an obvious blocked call to Receive. + group, gctx := errgroup.WithContext(ctx) + for i := 0; i < numGoroutines; i++ { + group.Go(func() error { + return s.receive(gctx, po, fc, f) + }) + } + return group.Wait() +} + +func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error { + // Cancel a sub-context when we return, to kick the context-aware callbacks + // and the goroutine below. + ctx2, cancel := context.WithCancel(ctx) + // Call stop when Receive's context is done. + // Stop will block until all outstanding messages have been acknowledged + // or there was a fatal service error. + // The iterator does not use the context passed to Receive. If it did, canceling + // that context would immediately stop the iterator without waiting for unacked + // messages. + iter := newMessageIterator(context.Background(), s.c.subc, s.name, po) + + // We cannot use errgroup from Receive here. Receive might already be calling group.Wait, + // and group.Wait cannot be called concurrently with group.Go. We give each receive() its + // own WaitGroup instead. + // Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed + // to be called after all Adds. + var wg sync.WaitGroup + wg.Add(1) + go func() { + <-ctx2.Done() + iter.stop() + wg.Done() + }() + defer wg.Wait() + + defer cancel() + for { + msgs, err := iter.receive() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + for i, msg := range msgs { + msg := msg + // TODO(jba): call acquire closer to when the message is allocated. + if err := fc.acquire(ctx, len(msg.Data)); err != nil { + // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done. + for _, m := range msgs[i:] { + m.Nack() + } + return nil + } + old := msg.doneFunc + msgLen := len(msg.Data) + msg.doneFunc = func(ackID string, ack bool, receiveTime time.Time) { + defer fc.release(msgLen) + old(ackID, ack, receiveTime) + } + wg.Add(1) + go func() { + defer wg.Done() + f(ctx2, msg) + }() + } + } +} + +// TODO(jba): remove when we delete messageIterator. +type pullOptions struct { + maxExtension time.Duration + maxPrefetch int32 + // ackDeadline is the default ack deadline for the subscription. Not + // configurable. + ackDeadline time.Duration +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription_test.go b/vendor/cloud.google.com/go/pubsub/subscription_test.go new file mode 100644 index 0000000..207841a --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription_test.go @@ -0,0 +1,180 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" +) + +// All returns the remaining subscriptions from this iterator. +func slurpSubs(it *SubscriptionIterator) ([]*Subscription, error) { + var subs []*Subscription + for { + switch sub, err := it.Next(); err { + case nil: + subs = append(subs, sub) + case iterator.Done: + return subs, nil + default: + return nil, err + } + } +} + +func TestSubscriptionID(t *testing.T) { + const id = "id" + c := &Client{projectID: "projid"} + s := c.Subscription(id) + if got, want := s.ID(), id; got != want { + t.Errorf("Subscription.ID() = %q; want %q", got, want) + } +} + +func TestListProjectSubscriptions(t *testing.T) { + ctx := context.Background() + c, _ := newFake(t) + topic := mustCreateTopic(t, c, "t") + var want []string + for i := 1; i <= 2; i++ { + id := fmt.Sprintf("s%d", i) + want = append(want, id) + _, err := c.CreateSubscription(ctx, id, SubscriptionConfig{Topic: topic}) + if err != nil { + t.Fatal(err) + } + } + subs, err := slurpSubs(c.Subscriptions(ctx)) + if err != nil { + t.Fatal(err) + } + + got := getSubIDs(subs) + if !testutil.Equal(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func getSubIDs(subs []*Subscription) []string { + var names []string + for _, sub := range subs { + names = append(names, sub.ID()) + } + return names +} + +func TestListTopicSubscriptions(t *testing.T) { + ctx := context.Background() + c, _ := newFake(t) + topics := []*Topic{ + mustCreateTopic(t, c, "t0"), + mustCreateTopic(t, c, "t1"), + } + wants := make([][]string, 2) + for i := 0; i < 5; i++ { + id := fmt.Sprintf("s%d", i) + sub, err := c.CreateSubscription(ctx, id, SubscriptionConfig{Topic: topics[i%2]}) + if err != nil { + t.Fatal(err) + } + wants[i%2] = append(wants[i%2], sub.ID()) + } + + for i, topic := range topics { + subs, err := slurpSubs(topic.Subscriptions(ctx)) + if err != nil { + t.Fatal(err) + } + got := getSubIDs(subs) + if !testutil.Equal(got, wants[i]) { + t.Errorf("#%d: got %v, want %v", i, got, wants[i]) + } + } +} + +const defaultRetentionDuration = 168 * time.Hour + +func TestUpdateSubscription(t *testing.T) { + ctx := context.Background() + client, _ := newFake(t) + defer client.Close() + + topic := client.Topic("t") + sub, err := client.CreateSubscription(ctx, "s", SubscriptionConfig{Topic: topic}) + if err != nil { + t.Fatal(err) + } + cfg, err := sub.Config(ctx) + if err != nil { + t.Fatal(err) + } + want := SubscriptionConfig{ + Topic: topic, + AckDeadline: 10 * time.Second, + RetainAckedMessages: false, + RetentionDuration: defaultRetentionDuration, + } + if !testutil.Equal(cfg, want) { + t.Fatalf("\ngot %+v\nwant %+v", cfg, want) + } + + got, err := sub.Update(ctx, SubscriptionConfigToUpdate{ + AckDeadline: 20 * time.Second, + RetainAckedMessages: true, + }) + if err != nil { + t.Fatal(err) + } + want = SubscriptionConfig{ + Topic: topic, + AckDeadline: 20 * time.Second, + RetainAckedMessages: true, + RetentionDuration: defaultRetentionDuration, + } + if !testutil.Equal(got, want) { + t.Fatalf("\ngot %+v\nwant %+v", got, want) + } + + got, err = sub.Update(ctx, SubscriptionConfigToUpdate{RetentionDuration: 2 * time.Hour}) + if err != nil { + t.Fatal(err) + } + want.RetentionDuration = 2 * time.Hour + if !testutil.Equal(got, want) { + t.Fatalf("\ngot %+v\nwant %+v", got, want) + } + + _, err = sub.Update(ctx, SubscriptionConfigToUpdate{}) + if err == nil { + t.Fatal("got nil, want error") + } +} + +func (t1 *Topic) Equal(t2 *Topic) bool { + if t1 == nil && t2 == nil { + return true + } + if t1 == nil || t2 == nil { + return false + } + return t1.c == t2.c && t1.name == t2.name +} diff --git a/vendor/cloud.google.com/go/pubsub/timeout_test.go b/vendor/cloud.google.com/go/pubsub/timeout_test.go new file mode 100644 index 0000000..6191bbc --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/timeout_test.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "log" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/pubsub/pstest" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +// Using the fake PubSub server in the pstest package, verify that streaming +// pull resumes if the server stream times out. +func TestStreamTimeout(t *testing.T) { + log.SetFlags(log.Lmicroseconds) + ctx := context.Background() + srv := pstest.NewServer() + srv.SetStreamTimeout(2 * time.Second) + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + + client, err := NewClient(ctx, "P", option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + defer client.Close() + topic, err := client.CreateTopic(ctx, "T") + if err != nil { + t.Fatal(err) + } + sub, err := client.CreateSubscription(ctx, "sub", SubscriptionConfig{Topic: topic, AckDeadline: 10 * time.Second}) + if err != nil { + t.Fatal(err) + } + const nPublish = 8 + rctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + errc := make(chan error) + var nSeen int64 + go func() { + errc <- sub.Receive(rctx, func(ctx context.Context, m *Message) { + m.Ack() + n := atomic.AddInt64(&nSeen, 1) + if n >= nPublish { + cancel() + } + }) + }() + + for i := 0; i < nPublish; i++ { + pr := topic.Publish(ctx, &Message{Data: []byte("msg")}) + _, err := pr.Get(ctx) + if err != nil { + t.Fatal(err) + } + time.Sleep(250 * time.Millisecond) + } + + err = <-errc + if err := sub.Delete(ctx); err != nil { + t.Fatal(err) + } + n := atomic.LoadInt64(&nSeen) + t.Logf("Receive returned %v after seeing %d messages\n", err, n) + if n < nPublish { + t.Errorf("got %d messages, want %d", n, nPublish) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go new file mode 100644 index 0000000..388d451 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -0,0 +1,397 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "cloud.google.com/go/iam" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/support/bundler" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +const ( + // The maximum number of messages that can be in a single publish request, as + // determined by the PubSub service. + MaxPublishRequestCount = 1000 + + // The maximum size of a single publish request in bytes, as determined by the PubSub service. + MaxPublishRequestBytes = 1e7 + + maxInt = int(^uint(0) >> 1) +) + +// ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes. +var ErrOversizedMessage = bundler.ErrOversizedItem + +// Topic is a reference to a PubSub topic. +// +// The methods of Topic are safe for use by multiple goroutines. +type Topic struct { + c *Client + // The fully qualified identifier for the topic, in the format "projects//topics/" + name string + + // Settings for publishing messages. All changes must be made before the + // first call to Publish. The default is DefaultPublishSettings. + PublishSettings PublishSettings + + mu sync.RWMutex + stopped bool + bundler *bundler.Bundler + + wg sync.WaitGroup + + // Channel for message bundles to be published. Close to indicate that Stop was called. + bundlec chan []*bundledMessage +} + +// PublishSettings control the bundling of published messages. +type PublishSettings struct { + + // Publish a non-empty batch after this delay has passed. + DelayThreshold time.Duration + + // Publish a batch when it has this many messages. The maximum is + // MaxPublishRequestCount. + CountThreshold int + + // Publish a batch when its size in bytes reaches this value. + ByteThreshold int + + // The number of goroutines that invoke the Publish RPC concurrently. + // Defaults to a multiple of GOMAXPROCS. + NumGoroutines int + + // The maximum time that the client will attempt to publish a bundle of messages. + Timeout time.Duration +} + +// DefaultPublishSettings holds the default values for topics' PublishSettings. +var DefaultPublishSettings = PublishSettings{ + DelayThreshold: 1 * time.Millisecond, + CountThreshold: 100, + ByteThreshold: 1e6, + Timeout: 60 * time.Second, +} + +// CreateTopic creates a new topic. +// The specified topic ID must start with a letter, and contain only letters +// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), +// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 +// characters in length, and must not start with "goog". +// If the topic already exists an error will be returned. +func (c *Client) CreateTopic(ctx context.Context, id string) (*Topic, error) { + t := c.Topic(id) + _, err := c.pubc.CreateTopic(ctx, &pb.Topic{Name: t.name}) + if err != nil { + return nil, err + } + return t, nil +} + +// Topic creates a reference to a topic in the client's project. +// +// If a Topic's Publish method is called, it has background goroutines +// associated with it. Clean them up by calling Topic.Stop. +// +// Avoid creating many Topic instances if you use them to publish. +func (c *Client) Topic(id string) *Topic { + return c.TopicInProject(id, c.projectID) +} + +// TopicInProject creates a reference to a topic in the given project. +// +// If a Topic's Publish method is called, it has background goroutines +// associated with it. Clean them up by calling Topic.Stop. +// +// Avoid creating many Topic instances if you use them to publish. +func (c *Client) TopicInProject(id, projectID string) *Topic { + return newTopic(c, fmt.Sprintf("projects/%s/topics/%s", projectID, id)) +} + +func newTopic(c *Client, name string) *Topic { + // bundlec is unbuffered. A buffer would occupy memory not + // accounted for by the bundler, so BufferedByteLimit would be a lie: + // the actual memory consumed would be higher. + return &Topic{ + c: c, + name: name, + PublishSettings: DefaultPublishSettings, + bundlec: make(chan []*bundledMessage), + } +} + +// Topics returns an iterator which returns all of the topics for the client's project. +func (c *Client) Topics(ctx context.Context) *TopicIterator { + it := c.pubc.ListTopics(ctx, &pb.ListTopicsRequest{Project: c.fullyQualifiedProjectName()}) + return &TopicIterator{ + c: c, + next: func() (string, error) { + topic, err := it.Next() + if err != nil { + return "", err + } + return topic.Name, nil + }, + } +} + +// TopicIterator is an iterator that returns a series of topics. +type TopicIterator struct { + c *Client + next func() (string, error) +} + +// Next returns the next topic. If there are no more topics, iterator.Done will be returned. +func (tps *TopicIterator) Next() (*Topic, error) { + topicName, err := tps.next() + if err != nil { + return nil, err + } + return newTopic(tps.c, topicName), nil +} + +// ID returns the unique idenfier of the topic within its project. +func (t *Topic) ID() string { + slash := strings.LastIndex(t.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad topic name") + } + return t.name[slash+1:] +} + +// String returns the printable globally unique name for the topic. +func (t *Topic) String() string { + return t.name +} + +// Delete deletes the topic. +func (t *Topic) Delete(ctx context.Context) error { + return t.c.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: t.name}) +} + +// Exists reports whether the topic exists on the server. +func (t *Topic) Exists(ctx context.Context) (bool, error) { + if t.name == "_deleted-topic_" { + return false, nil + } + _, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name}) + if err == nil { + return true, nil + } + if grpc.Code(err) == codes.NotFound { + return false, nil + } + return false, err +} + +func (t *Topic) IAM() *iam.Handle { + return iam.InternalNewHandle(t.c.pubc.Connection(), t.name) +} + +// Subscriptions returns an iterator which returns the subscriptions for this topic. +// +// Some of the returned subscriptions may belong to a project other than t. +func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { + it := t.c.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{ + Topic: t.name, + }) + return &SubscriptionIterator{ + c: t.c, + next: it.Next, + } +} + +var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") + +// Publish publishes msg to the topic asynchronously. Messages are batched and +// sent according to the topic's PublishSettings. Publish never blocks. +// +// Publish returns a non-nil PublishResult which will be ready when the +// message has been sent (or has failed to be sent) to the server. +// +// Publish creates goroutines for batching and sending messages. These goroutines +// need to be stopped by calling t.Stop(). Once stopped, future calls to Publish +// will immediately return a PublishResult with an error. +func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { + // TODO(jba): if this turns out to take significant time, try to approximate it. + // Or, convert the messages to protos in Publish, instead of in the service. + msg.size = proto.Size(&pb.PubsubMessage{ + Data: msg.Data, + Attributes: msg.Attributes, + }) + r := &PublishResult{ready: make(chan struct{})} + t.initBundler() + t.mu.RLock() + defer t.mu.RUnlock() + // TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here + if t.stopped { + r.set("", errTopicStopped) + return r + } + + // TODO(jba) [from bcmills] consider using a shared channel per bundle + // (requires Bundler API changes; would reduce allocations) + // The call to Add should never return an error because the bundler's + // BufferedByteLimit is set to maxInt; we do not perform any flow + // control in the client. + err := t.bundler.Add(&bundledMessage{msg, r}, msg.size) + if err != nil { + r.set("", err) + } + return r +} + +// Send all remaining published messages and stop goroutines created for handling +// publishing. Returns once all outstanding messages have been sent or have +// failed to be sent. +func (t *Topic) Stop() { + t.mu.Lock() + noop := t.stopped || t.bundler == nil + t.stopped = true + t.mu.Unlock() + if noop { + return + } + t.bundler.Flush() + // At this point, all pending bundles have been published and the bundler's + // goroutines have exited, so it is OK for this goroutine to close bundlec. + close(t.bundlec) + t.wg.Wait() +} + +// A PublishResult holds the result from a call to Publish. +type PublishResult struct { + ready chan struct{} + serverID string + err error +} + +// Ready returns a channel that is closed when the result is ready. +// When the Ready channel is closed, Get is guaranteed not to block. +func (r *PublishResult) Ready() <-chan struct{} { return r.ready } + +// Get returns the server-generated message ID and/or error result of a Publish call. +// Get blocks until the Publish call completes or the context is done. +func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { + // If the result is already ready, return it even if the context is done. + select { + case <-r.Ready(): + return r.serverID, r.err + default: + } + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-r.Ready(): + return r.serverID, r.err + } +} + +func (r *PublishResult) set(sid string, err error) { + r.serverID = sid + r.err = err + close(r.ready) +} + +type bundledMessage struct { + msg *Message + res *PublishResult +} + +func (t *Topic) initBundler() { + t.mu.RLock() + noop := t.stopped || t.bundler != nil + t.mu.RUnlock() + if noop { + return + } + t.mu.Lock() + defer t.mu.Unlock() + // Must re-check, since we released the lock. + if t.stopped || t.bundler != nil { + return + } + + // TODO(jba): use a context detached from the one passed to NewClient. + ctx := context.TODO() + // Unless overridden, run several goroutines per CPU to call the Publish RPC. + n := t.PublishSettings.NumGoroutines + if n <= 0 { + n = 25 * runtime.GOMAXPROCS(0) + } + timeout := t.PublishSettings.Timeout + t.wg.Add(n) + for i := 0; i < n; i++ { + go func() { + defer t.wg.Done() + for b := range t.bundlec { + bctx := ctx + cancel := func() {} + if timeout != 0 { + bctx, cancel = context.WithTimeout(ctx, timeout) + } + t.publishMessageBundle(bctx, b) + cancel() + } + }() + } + t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) { + t.bundlec <- items.([]*bundledMessage) + + }) + t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold + t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold + if t.bundler.BundleCountThreshold > MaxPublishRequestCount { + t.bundler.BundleCountThreshold = MaxPublishRequestCount + } + t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold + t.bundler.BufferedByteLimit = maxInt + t.bundler.BundleByteLimit = MaxPublishRequestBytes +} + +func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) { + pbMsgs := make([]*pb.PubsubMessage, len(bms)) + for i, bm := range bms { + pbMsgs[i] = &pb.PubsubMessage{ + Data: bm.msg.Data, + Attributes: bm.msg.Attributes, + } + bm.msg = nil // release bm.msg for GC + } + res, err := t.c.pubc.Publish(ctx, &pb.PublishRequest{ + Topic: t.name, + Messages: pbMsgs, + }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes))) + for i, bm := range bms { + if err != nil { + bm.res.set("", err) + } else { + bm.res.set(res.MessageIds[i], nil) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/topic_test.go b/vendor/cloud.google.com/go/pubsub/topic_test.go new file mode 100644 index 0000000..0a6bc2f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic_test.go @@ -0,0 +1,148 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "net" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func checkTopicListing(t *testing.T, c *Client, want []string) { + topics, err := slurpTopics(c.Topics(context.Background())) + if err != nil { + t.Fatalf("error listing topics: %v", err) + } + var got []string + for _, topic := range topics { + got = append(got, topic.ID()) + } + if !testutil.Equal(got, want) { + t.Errorf("topic list: got: %v, want: %v", got, want) + } +} + +// All returns the remaining topics from this iterator. +func slurpTopics(it *TopicIterator) ([]*Topic, error) { + var topics []*Topic + for { + switch topic, err := it.Next(); err { + case nil: + topics = append(topics, topic) + case iterator.Done: + return topics, nil + default: + return nil, err + } + } +} + +func TestTopicID(t *testing.T) { + const id = "id" + c, _ := newFake(t) + s := c.Topic(id) + if got, want := s.ID(), id; got != want { + t.Errorf("Token.ID() = %q; want %q", got, want) + } +} + +func TestListTopics(t *testing.T) { + c, _ := newFake(t) + var ids []string + for i := 1; i <= 4; i++ { + id := fmt.Sprintf("t%d", i) + ids = append(ids, id) + mustCreateTopic(t, c, id) + } + checkTopicListing(t, c, ids) +} + +func TestListCompletelyEmptyTopics(t *testing.T) { + c, _ := newFake(t) + checkTopicListing(t, c, nil) +} + +func TestStopPublishOrder(t *testing.T) { + // Check that Stop doesn't panic if called before Publish. + // Also that Publish after Stop returns the right error. + ctx := context.Background() + c := &Client{projectID: "projid"} + topic := c.Topic("t") + topic.Stop() + r := topic.Publish(ctx, &Message{}) + _, err := r.Get(ctx) + if err != errTopicStopped { + t.Errorf("got %v, want errTopicStopped", err) + } +} + +func TestPublishTimeout(t *testing.T) { + ctx := context.Background() + serv := grpc.NewServer() + pubsubpb.RegisterPublisherServer(serv, &alwaysFailPublish{}) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + } + go serv.Serve(lis) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + c, err := NewClient(ctx, "projectID", option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + topic := c.Topic("t") + topic.PublishSettings.Timeout = 3 * time.Second + r := topic.Publish(ctx, &Message{}) + defer topic.Stop() + select { + case <-r.Ready(): + _, err = r.Get(ctx) + if err != context.DeadlineExceeded { + t.Fatalf("got %v, want context.DeadlineExceeded", err) + } + case <-time.After(2 * topic.PublishSettings.Timeout): + t.Fatal("timed out") + } +} + +type alwaysFailPublish struct { + pubsubpb.PublisherServer +} + +func (s *alwaysFailPublish) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { + return nil, status.Errorf(codes.Unavailable, "try again") +} + +func mustCreateTopic(t *testing.T, c *Client, id string) *Topic { + topic, err := c.CreateTopic(context.Background(), id) + if err != nil { + t.Fatal(err) + } + return topic +} diff --git a/vendor/cloud.google.com/go/regen-gapic.sh b/vendor/cloud.google.com/go/regen-gapic.sh new file mode 100755 index 0000000..9b345c9 --- /dev/null +++ b/vendor/cloud.google.com/go/regen-gapic.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +# This script generates all GAPIC clients in this repo. +# One-time setup: +# cd path/to/googleapis # https://github.com/googleapis/googleapis +# virtualenv env +# . env/bin/activate +# pip install googleapis-artman +# deactivate +# +# Regenerate: +# cd path/to/googleapis +# . env/bin/activate +# $GOPATH/src/cloud.google.com/go/regen-gapic.sh +# deactivate +# +# Being in googleapis directory is important; +# that's where we find YAML files and where artman puts the "artman-genfiles" directory. +# +# NOTE: This script does not generate the "raw" gRPC client found in google.golang.org/genproto. +# To do that, use the regen.sh script in the genproto repo instead. + +set -ex + +APIS=( +google/cloud/bigquery/datatransfer/artman_bigquerydatatransfer.yaml +google/cloud/dataproc/artman_dataproc_v1.yaml +google/cloud/language/artman_language_v1.yaml +google/cloud/language/artman_language_v1beta2.yaml +google/cloud/oslogin/artman_oslogin_v1beta.yaml +google/cloud/speech/artman_speech_v1.yaml +google/cloud/speech/artman_speech_v1beta1.yaml +google/cloud/videointelligence/artman_videointelligence_v1beta1.yaml +google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml +google/cloud/vision/artman_vision_v1.yaml +google/cloud/vision/artman_vision_v1p1beta1.yaml +google/container/artman_container.yaml +google/devtools/artman_clouddebugger.yaml +google/devtools/clouderrorreporting/artman_errorreporting.yaml +google/devtools/cloudtrace/artman_cloudtrace_v1.yaml +google/devtools/cloudtrace/artman_cloudtrace_v2.yaml +google/firestore/artman_firestore.yaml +google/logging/artman_logging.yaml +google/longrunning/artman_longrunning.yaml +google/monitoring/artman_monitoring.yaml +google/privacy/dlp/artman_dlp_v2beta1.yaml +google/privacy/dlp/artman_dlp_v2.yaml +google/pubsub/artman_pubsub.yaml +google/spanner/admin/database/artman_spanner_admin_database.yaml +google/spanner/admin/instance/artman_spanner_admin_instance.yaml +google/spanner/artman_spanner.yaml +) + +for api in "${APIS[@]}"; do + rm -rf artman-genfiles/* + artman --config "$api" generate go_gapic + cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/ +done + +go list cloud.google.com/go/... | grep apiv | xargs go test + +go test -short cloud.google.com/go/... + +echo "googleapis version: $(git rev-parse HEAD)" diff --git a/vendor/cloud.google.com/go/rpcreplay/Makefile b/vendor/cloud.google.com/go/rpcreplay/Makefile new file mode 100644 index 0000000..f41293f --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/Makefile @@ -0,0 +1,32 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Makefile for building Go files from protos. + +# Change these to match your environment. +PROTOC=$(HOME)/bin/protoc +PROTOC_GO_PLUGIN_DIR=$(GOPATH)/bin +PROTOBUF_REPO=$(HOME)/git-repos/protobuf + +gen-protos: sync-protobuf + for d in proto/*; do \ + PATH=$(PATH):$(PROTOC_GO_PLUGIN_DIR) \ + $(PROTOC) --go_out=plugins=grpc:$$d \ + -I $$d -I $(PROTOBUF_REPO)/src $$d/*.proto; \ + done + + +sync-protobuf: + cd $(PROTOBUF_REPO); git pull + diff --git a/vendor/cloud.google.com/go/rpcreplay/doc.go b/vendor/cloud.google.com/go/rpcreplay/doc.go new file mode 100644 index 0000000..a38afc6 --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/doc.go @@ -0,0 +1,108 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package rpcreplay supports the capture and replay of gRPC calls. Its main goal is +to improve testing. Once you capture the calls of a test that runs against a real +service, you have an "automatic mock" that can be replayed against the same test, +yielding a unit test that is fast and flake-free. + +This package is EXPERIMENTAL and subject to change without notice. + + +Recording + +To record a sequence of gRPC calls to a file, create a Recorder and pass its +DialOptions to grpc.Dial: + + rec, err := rpcreplay.NewRecorder("service.replay", nil) + if err != nil { ... } + defer func() { + if err := rec.Close(); err != nil { ... } + }() + conn, err := grpc.Dial(serverAddress, rec.DialOptions()...) + +It is essential to close the Recorder when the interaction is finished. + +There is also a NewRecorderWriter function for capturing to an arbitrary +io.Writer. + + +Replaying + +Replaying a captured file looks almost identical: create a Replayer and use +its DialOptions. (Since we're reading the file and not writing it, we don't +have to be as careful about the error returned from Close). + + rep, err := rpcreplay.NewReplayer("service.replay") + if err != nil { ... } + defer rep.Close() + conn, err := grpc.Dial(serverAddress, rep.DialOptions()...) + + +Initial State + +A test might use random or time-sensitive values, for instance to create unique +resources for isolation from other tests. The test therefore has initial values, such +as the current time, or a random seed, that differ from run to run. You must record +this initial state and re-establish it on replay. + +To record the initial state, serialize it into a []byte and pass it as the second +argument to NewRecorder: + + timeNow := time.Now() + b, err := timeNow.MarshalBinary() + if err != nil { ... } + rec, err := rpcreplay.NewRecorder("service.replay", b) + +On replay, get the bytes from Replayer.Initial: + + rep, err := rpcreplay.NewReplayer("service.replay") + if err != nil { ... } + defer rep.Close() + err = timeNow.UnmarshalBinary(rep.Initial()) + if err != nil { ... } + + +Nondeterminism + +A nondeterministic program may invoke RPCs in a different order each time +it is run. The order in which RPCs are called during recording may differ +from the order during replay. + +The replayer matches incoming to recorded requests by method name and request +contents, so nondeterminism is only a concern for identical requests that result +in different responses. A nondeterministic program whose behavior differs +depending on the order of such RPCs probably has a race condition: since both the +recorded sequence of RPCs and the sequence during replay are valid orderings, the +program should behave the same under both. + + +Other Replayer Differences + +Besides the differences in replay mentioned above, other differences may cause issues +for some programs. We list them here. + +The Replayer delivers a response to an RPC immediately, without waiting for other +incoming RPCs. This can violate causality. For example, in a Pub/Sub program where +one goroutine publishes and another subscribes, during replay the Subscribe call may +finish before the Publish call begins. + +For streaming RPCs, the Replayer delivers the result of Send and Recv calls in +the order they were recorded. No attempt is made to match message contents. + +At present, this package does not record or replay stream headers and trailers, or +the result of the CloseSend method. +*/ +package rpcreplay // import "cloud.google.com/go/rpcreplay" diff --git a/vendor/cloud.google.com/go/rpcreplay/example_test.go b/vendor/cloud.google.com/go/rpcreplay/example_test.go new file mode 100644 index 0000000..747ba50 --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/example_test.go @@ -0,0 +1,47 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcreplay_test + +var serverAddress string + +// func Example_NewRecorder() { +// rec, err := rpcreplay.NewRecorder("service.replay", nil) +// if err != nil { +// // TODO: Handle error. +// } +// defer func() { +// if err := rec.Close(); err != nil { +// // TODO: Handle error. +// } +// }() +// conn, err := grpc.Dial(serverAddress, rec.DialOptions()...) +// if err != nil { +// // TODO: Handle error. +// } +// _ = conn // TODO: use connection +// } + +// func Example_NewReplayer() { +// rep, err := rpcreplay.NewReplayer("service.replay") +// if err != nil { +// // TODO: Handle error. +// } +// defer rep.Close() +// conn, err := grpc.Dial(serverAddress, rep.DialOptions()...) +// if err != nil { +// // TODO: Handle error. +// } +// _ = conn // TODO: use connection +// } diff --git a/vendor/cloud.google.com/go/rpcreplay/fake_test.go b/vendor/cloud.google.com/go/rpcreplay/fake_test.go new file mode 100644 index 0000000..6953e6b --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/fake_test.go @@ -0,0 +1,122 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcreplay + +import ( + "io" + "log" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + pb "cloud.google.com/go/rpcreplay/proto/intstore" +) + +// intStoreServer is an in-memory implementation of IntStore. +type intStoreServer struct { + pb.IntStoreServer + + Addr string + l net.Listener + gsrv *grpc.Server + + items map[string]int32 +} + +func newIntStoreServer() *intStoreServer { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + log.Fatal(err) + } + s := &intStoreServer{ + Addr: l.Addr().String(), + l: l, + gsrv: grpc.NewServer(), + } + pb.RegisterIntStoreServer(s.gsrv, s) + go s.gsrv.Serve(s.l) + return s +} + +func (s *intStoreServer) stop() { + s.gsrv.Stop() + s.l.Close() +} + +func (s *intStoreServer) Set(_ context.Context, item *pb.Item) (*pb.SetResponse, error) { + old := s.setItem(item) + return &pb.SetResponse{PrevValue: old}, nil +} + +func (s *intStoreServer) setItem(item *pb.Item) int32 { + if s.items == nil { + s.items = map[string]int32{} + } + old := s.items[item.Name] + s.items[item.Name] = item.Value + return old +} + +func (s *intStoreServer) Get(_ context.Context, req *pb.GetRequest) (*pb.Item, error) { + val, ok := s.items[req.Name] + if !ok { + return nil, status.Errorf(codes.NotFound, "%q", req.Name) + } + return &pb.Item{Name: req.Name, Value: val}, nil +} + +func (s *intStoreServer) ListItems(_ *pb.ListItemsRequest, ss pb.IntStore_ListItemsServer) error { + for name, val := range s.items { + if err := ss.Send(&pb.Item{Name: name, Value: val}); err != nil { + return err + } + } + return nil +} + +func (s *intStoreServer) SetStream(ss pb.IntStore_SetStreamServer) error { + n := 0 + for { + item, err := ss.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + s.setItem(item) + n++ + } + return ss.SendAndClose(&pb.Summary{Count: int32(n)}) +} + +func (s *intStoreServer) StreamChat(ss pb.IntStore_StreamChatServer) error { + for { + item, err := ss.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := ss.Send(item); err != nil { + return err + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/rpcreplay/proto/intstore/intstore.pb.go b/vendor/cloud.google.com/go/rpcreplay/proto/intstore/intstore.pb.go new file mode 100644 index 0000000..657ac2f --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/proto/intstore/intstore.pb.go @@ -0,0 +1,454 @@ +// Code generated by protoc-gen-go. +// source: intstore.proto +// DO NOT EDIT! + +/* +Package intstore is a generated protocol buffer package. + +It is generated from these files: + intstore.proto + +It has these top-level messages: + Item + SetResponse + GetRequest + Summary + ListItemsRequest +*/ +package intstore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Item struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Item) Reset() { *m = Item{} } +func (m *Item) String() string { return proto.CompactTextString(m) } +func (*Item) ProtoMessage() {} +func (*Item) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Item) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Item) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +type SetResponse struct { + PrevValue int32 `protobuf:"varint,1,opt,name=prev_value,json=prevValue" json:"prev_value,omitempty"` +} + +func (m *SetResponse) Reset() { *m = SetResponse{} } +func (m *SetResponse) String() string { return proto.CompactTextString(m) } +func (*SetResponse) ProtoMessage() {} +func (*SetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *SetResponse) GetPrevValue() int32 { + if m != nil { + return m.PrevValue + } + return 0 +} + +type GetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type Summary struct { + Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Summary) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type ListItemsRequest struct { +} + +func (m *ListItemsRequest) Reset() { *m = ListItemsRequest{} } +func (m *ListItemsRequest) String() string { return proto.CompactTextString(m) } +func (*ListItemsRequest) ProtoMessage() {} +func (*ListItemsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func init() { + proto.RegisterType((*Item)(nil), "intstore.Item") + proto.RegisterType((*SetResponse)(nil), "intstore.SetResponse") + proto.RegisterType((*GetRequest)(nil), "intstore.GetRequest") + proto.RegisterType((*Summary)(nil), "intstore.Summary") + proto.RegisterType((*ListItemsRequest)(nil), "intstore.ListItemsRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for IntStore service + +type IntStoreClient interface { + Set(ctx context.Context, in *Item, opts ...grpc.CallOption) (*SetResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*Item, error) + // A server-to-client streaming RPC. + ListItems(ctx context.Context, in *ListItemsRequest, opts ...grpc.CallOption) (IntStore_ListItemsClient, error) + // A client-to-server streaming RPC. + SetStream(ctx context.Context, opts ...grpc.CallOption) (IntStore_SetStreamClient, error) + // A Bidirectional streaming RPC. + StreamChat(ctx context.Context, opts ...grpc.CallOption) (IntStore_StreamChatClient, error) +} + +type intStoreClient struct { + cc *grpc.ClientConn +} + +func NewIntStoreClient(cc *grpc.ClientConn) IntStoreClient { + return &intStoreClient{cc} +} + +func (c *intStoreClient) Set(ctx context.Context, in *Item, opts ...grpc.CallOption) (*SetResponse, error) { + out := new(SetResponse) + err := grpc.Invoke(ctx, "/intstore.IntStore/Set", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *intStoreClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*Item, error) { + out := new(Item) + err := grpc.Invoke(ctx, "/intstore.IntStore/Get", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *intStoreClient) ListItems(ctx context.Context, in *ListItemsRequest, opts ...grpc.CallOption) (IntStore_ListItemsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[0], c.cc, "/intstore.IntStore/ListItems", opts...) + if err != nil { + return nil, err + } + x := &intStoreListItemsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type IntStore_ListItemsClient interface { + Recv() (*Item, error) + grpc.ClientStream +} + +type intStoreListItemsClient struct { + grpc.ClientStream +} + +func (x *intStoreListItemsClient) Recv() (*Item, error) { + m := new(Item) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *intStoreClient) SetStream(ctx context.Context, opts ...grpc.CallOption) (IntStore_SetStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[1], c.cc, "/intstore.IntStore/SetStream", opts...) + if err != nil { + return nil, err + } + x := &intStoreSetStreamClient{stream} + return x, nil +} + +type IntStore_SetStreamClient interface { + Send(*Item) error + CloseAndRecv() (*Summary, error) + grpc.ClientStream +} + +type intStoreSetStreamClient struct { + grpc.ClientStream +} + +func (x *intStoreSetStreamClient) Send(m *Item) error { + return x.ClientStream.SendMsg(m) +} + +func (x *intStoreSetStreamClient) CloseAndRecv() (*Summary, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Summary) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *intStoreClient) StreamChat(ctx context.Context, opts ...grpc.CallOption) (IntStore_StreamChatClient, error) { + stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[2], c.cc, "/intstore.IntStore/StreamChat", opts...) + if err != nil { + return nil, err + } + x := &intStoreStreamChatClient{stream} + return x, nil +} + +type IntStore_StreamChatClient interface { + Send(*Item) error + Recv() (*Item, error) + grpc.ClientStream +} + +type intStoreStreamChatClient struct { + grpc.ClientStream +} + +func (x *intStoreStreamChatClient) Send(m *Item) error { + return x.ClientStream.SendMsg(m) +} + +func (x *intStoreStreamChatClient) Recv() (*Item, error) { + m := new(Item) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for IntStore service + +type IntStoreServer interface { + Set(context.Context, *Item) (*SetResponse, error) + Get(context.Context, *GetRequest) (*Item, error) + // A server-to-client streaming RPC. + ListItems(*ListItemsRequest, IntStore_ListItemsServer) error + // A client-to-server streaming RPC. + SetStream(IntStore_SetStreamServer) error + // A Bidirectional streaming RPC. + StreamChat(IntStore_StreamChatServer) error +} + +func RegisterIntStoreServer(s *grpc.Server, srv IntStoreServer) { + s.RegisterService(&_IntStore_serviceDesc, srv) +} + +func _IntStore_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Item) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntStoreServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/intstore.IntStore/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntStoreServer).Set(ctx, req.(*Item)) + } + return interceptor(ctx, in, info, handler) +} + +func _IntStore_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntStoreServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/intstore.IntStore/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntStoreServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IntStore_ListItems_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListItemsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(IntStoreServer).ListItems(m, &intStoreListItemsServer{stream}) +} + +type IntStore_ListItemsServer interface { + Send(*Item) error + grpc.ServerStream +} + +type intStoreListItemsServer struct { + grpc.ServerStream +} + +func (x *intStoreListItemsServer) Send(m *Item) error { + return x.ServerStream.SendMsg(m) +} + +func _IntStore_SetStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IntStoreServer).SetStream(&intStoreSetStreamServer{stream}) +} + +type IntStore_SetStreamServer interface { + SendAndClose(*Summary) error + Recv() (*Item, error) + grpc.ServerStream +} + +type intStoreSetStreamServer struct { + grpc.ServerStream +} + +func (x *intStoreSetStreamServer) SendAndClose(m *Summary) error { + return x.ServerStream.SendMsg(m) +} + +func (x *intStoreSetStreamServer) Recv() (*Item, error) { + m := new(Item) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _IntStore_StreamChat_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IntStoreServer).StreamChat(&intStoreStreamChatServer{stream}) +} + +type IntStore_StreamChatServer interface { + Send(*Item) error + Recv() (*Item, error) + grpc.ServerStream +} + +type intStoreStreamChatServer struct { + grpc.ServerStream +} + +func (x *intStoreStreamChatServer) Send(m *Item) error { + return x.ServerStream.SendMsg(m) +} + +func (x *intStoreStreamChatServer) Recv() (*Item, error) { + m := new(Item) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _IntStore_serviceDesc = grpc.ServiceDesc{ + ServiceName: "intstore.IntStore", + HandlerType: (*IntStoreServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Set", + Handler: _IntStore_Set_Handler, + }, + { + MethodName: "Get", + Handler: _IntStore_Get_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListItems", + Handler: _IntStore_ListItems_Handler, + ServerStreams: true, + }, + { + StreamName: "SetStream", + Handler: _IntStore_SetStream_Handler, + ClientStreams: true, + }, + { + StreamName: "StreamChat", + Handler: _IntStore_StreamChat_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "intstore.proto", +} + +func init() { proto.RegisterFile("intstore.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 273 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xc3, 0x40, + 0x10, 0xc5, 0xb3, 0xfd, 0xa3, 0xcd, 0x08, 0x45, 0x87, 0x0a, 0x25, 0x20, 0x86, 0x3d, 0xe5, 0xa0, + 0x21, 0xd4, 0xa3, 0x47, 0x0f, 0xa5, 0xe0, 0x29, 0x0b, 0x5e, 0x25, 0xca, 0x80, 0x05, 0xb3, 0x1b, + 0x77, 0x27, 0x05, 0xbf, 0x84, 0x9f, 0x59, 0x36, 0x5b, 0x9b, 0xd2, 0x78, 0xdb, 0xb7, 0xf3, 0x66, + 0xde, 0x6f, 0x76, 0x61, 0xbe, 0xd5, 0xec, 0xd8, 0x58, 0xca, 0x1b, 0x6b, 0xd8, 0xe0, 0xec, 0x4f, + 0xcb, 0x02, 0x26, 0x1b, 0xa6, 0x1a, 0x11, 0x26, 0xba, 0xaa, 0x69, 0x29, 0x52, 0x91, 0xc5, 0x65, + 0x77, 0xc6, 0x05, 0x4c, 0x77, 0xd5, 0x67, 0x4b, 0xcb, 0x51, 0x2a, 0xb2, 0x69, 0x19, 0x84, 0xbc, + 0x83, 0x0b, 0x45, 0x5c, 0x92, 0x6b, 0x8c, 0x76, 0x84, 0x37, 0x00, 0x8d, 0xa5, 0xdd, 0x6b, 0x70, + 0x8a, 0xce, 0x19, 0xfb, 0x9b, 0x97, 0xce, 0x9d, 0x02, 0xac, 0xbd, 0xfb, 0xab, 0x25, 0xc7, 0xff, + 0xa5, 0xc8, 0x5b, 0x38, 0x57, 0x6d, 0x5d, 0x57, 0xf6, 0xdb, 0x07, 0xbe, 0x9b, 0x56, 0xf3, 0x7e, + 0x4c, 0x10, 0x12, 0xe1, 0xf2, 0x79, 0xeb, 0xd8, 0x63, 0xba, 0xfd, 0xa0, 0xd5, 0xcf, 0x08, 0x66, + 0x1b, 0xcd, 0xca, 0xef, 0x80, 0x39, 0x8c, 0x15, 0x31, 0xce, 0xf3, 0xc3, 0x96, 0xde, 0x9b, 0x5c, + 0xf7, 0xfa, 0x08, 0x58, 0x46, 0x78, 0x0f, 0xe3, 0x35, 0x31, 0x2e, 0xfa, 0x7a, 0x8f, 0x98, 0x9c, + 0x4c, 0x91, 0x11, 0x3e, 0x42, 0x7c, 0xc8, 0xc7, 0xa4, 0x2f, 0x9f, 0x42, 0x0d, 0x5b, 0x0b, 0x81, + 0x2b, 0x88, 0x15, 0xb1, 0x62, 0x4b, 0x55, 0x3d, 0x20, 0xbc, 0x3a, 0x22, 0x0c, 0x4f, 0x20, 0xa3, + 0xcc, 0xf7, 0x40, 0x68, 0x78, 0xfa, 0xa8, 0x86, 0x6b, 0x0d, 0x52, 0x32, 0x51, 0x88, 0xb7, 0xb3, + 0xee, 0x63, 0x1f, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x22, 0x28, 0xa0, 0x49, 0xea, 0x01, 0x00, + 0x00, +} diff --git a/vendor/cloud.google.com/go/rpcreplay/proto/intstore/intstore.proto b/vendor/cloud.google.com/go/rpcreplay/proto/intstore/intstore.proto new file mode 100644 index 0000000..9d987cb --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/proto/intstore/intstore.proto @@ -0,0 +1,54 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// IntStore is a service for testing the rpcreplay package. +// It is a simple key-value store for integers. + +syntax = "proto3"; + +package intstore; + +service IntStore { + rpc Set(Item) returns (SetResponse) {} + + rpc Get(GetRequest) returns (Item) {} + + // A server-to-client streaming RPC. + rpc ListItems(ListItemsRequest) returns (stream Item) {} + + // A client-to-server streaming RPC. + rpc SetStream(stream Item) returns (Summary) {} + + // A Bidirectional streaming RPC. + rpc StreamChat(stream Item) returns (stream Item) {} +} + +message Item { + string name = 1; + int32 value = 2; +} + +message SetResponse { + int32 prev_value = 1; +} + +message GetRequest { + string name = 1; +} + +message Summary { + int32 count = 1; +} + +message ListItemsRequest {} diff --git a/vendor/cloud.google.com/go/rpcreplay/proto/rpcreplay/rpcreplay.pb.go b/vendor/cloud.google.com/go/rpcreplay/proto/rpcreplay/rpcreplay.pb.go new file mode 100644 index 0000000..8e76a39 --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/proto/rpcreplay/rpcreplay.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go. +// source: rpcreplay.proto +// DO NOT EDIT! + +/* +Package rpcreplay is a generated protocol buffer package. + +It is generated from these files: + rpcreplay.proto + +It has these top-level messages: + Entry +*/ +package rpcreplay + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Entry_Kind int32 + +const ( + Entry_TYPE_UNSPECIFIED Entry_Kind = 0 + // A unary request. + // method: the full name of the method + // message: the request proto + // is_error: false + // ref_index: 0 + Entry_REQUEST Entry_Kind = 1 + // A unary response. + // method: the full name of the method + // message: + // if is_error: a google.rpc.Status proto + // else: the response proto + // ref_index: index in the sequence of Entries of matching request (1-based) + Entry_RESPONSE Entry_Kind = 2 + // A method that creates a stream. + // method: the full name of the method + // message: + // if is_error: a google.rpc.Status proto + // else: nil + // ref_index: 0 + Entry_CREATE_STREAM Entry_Kind = 3 + // A call to Send on the client returned by a stream-creating method. + // method: unset + // message: the proto being sent + // is_error: false + // ref_index: index of matching CREATE_STREAM entry (1-based) + Entry_SEND Entry_Kind = 4 + // A call to Recv on the client returned by a stream-creating method. + // method: unset + // message: + // if is_error: a google.rpc.Status proto, or nil on EOF + // else: the received message + // ref_index: index of matching CREATE_STREAM entry + Entry_RECV Entry_Kind = 5 +) + +var Entry_Kind_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "REQUEST", + 2: "RESPONSE", + 3: "CREATE_STREAM", + 4: "SEND", + 5: "RECV", +} +var Entry_Kind_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "REQUEST": 1, + "RESPONSE": 2, + "CREATE_STREAM": 3, + "SEND": 4, + "RECV": 5, +} + +func (x Entry_Kind) String() string { + return proto.EnumName(Entry_Kind_name, int32(x)) +} +func (Entry_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// An Entry represents a single RPC activity, typically a request or response. +type Entry struct { + Kind Entry_Kind `protobuf:"varint,1,opt,name=kind,enum=rpcreplay.Entry_Kind" json:"kind,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method" json:"method,omitempty"` + Message *google_protobuf.Any `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` + IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` + RefIndex int32 `protobuf:"varint,5,opt,name=ref_index,json=refIndex" json:"ref_index,omitempty"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Entry) GetKind() Entry_Kind { + if m != nil { + return m.Kind + } + return Entry_TYPE_UNSPECIFIED +} + +func (m *Entry) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *Entry) GetMessage() *google_protobuf.Any { + if m != nil { + return m.Message + } + return nil +} + +func (m *Entry) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +func (m *Entry) GetRefIndex() int32 { + if m != nil { + return m.RefIndex + } + return 0 +} + +func init() { + proto.RegisterType((*Entry)(nil), "rpcreplay.Entry") + proto.RegisterEnum("rpcreplay.Entry_Kind", Entry_Kind_name, Entry_Kind_value) +} + +func init() { proto.RegisterFile("rpcreplay.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 289 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x44, 0x8e, 0xdf, 0x4e, 0xc2, 0x30, + 0x14, 0xc6, 0x2d, 0x6c, 0x30, 0x0e, 0xfe, 0xa9, 0x0d, 0x9a, 0xa1, 0x37, 0x0b, 0x57, 0xf3, 0xa6, + 0x24, 0xf8, 0x04, 0x04, 0x8e, 0x09, 0x31, 0x22, 0xb6, 0xc3, 0xc4, 0x1b, 0x17, 0x70, 0x05, 0x17, + 0xa1, 0x25, 0xdd, 0x4c, 0xdc, 0x6b, 0xf8, 0xc4, 0x66, 0x13, 0xf4, 0xae, 0xbf, 0x7e, 0xbf, 0x9c, + 0xef, 0x83, 0x33, 0xbb, 0x7b, 0xb3, 0x6a, 0xb7, 0x59, 0x14, 0x7c, 0x67, 0x4d, 0x6e, 0x58, 0xeb, + 0xef, 0xe3, 0xaa, 0xbb, 0x36, 0x66, 0xbd, 0x51, 0xfd, 0x2a, 0x58, 0x7e, 0xae, 0xfa, 0x0b, 0xbd, + 0xb7, 0x7a, 0xdf, 0x35, 0x70, 0x51, 0xe7, 0xb6, 0x60, 0x37, 0xe0, 0x7c, 0xa4, 0x3a, 0xf1, 0x49, + 0x40, 0xc2, 0xd3, 0xc1, 0x05, 0xff, 0xbf, 0x57, 0xe5, 0xfc, 0x3e, 0xd5, 0x89, 0xa8, 0x14, 0x76, + 0x09, 0x8d, 0xad, 0xca, 0xdf, 0x4d, 0xe2, 0xd7, 0x02, 0x12, 0xb6, 0xc4, 0x9e, 0x18, 0x87, 0xe6, + 0x56, 0x65, 0xd9, 0x62, 0xad, 0xfc, 0x7a, 0x40, 0xc2, 0xf6, 0xa0, 0xc3, 0x7f, 0x9b, 0xf9, 0xa1, + 0x99, 0x0f, 0x75, 0x21, 0x0e, 0x12, 0xeb, 0x82, 0x97, 0x66, 0xb1, 0xb2, 0xd6, 0x58, 0xdf, 0x09, + 0x48, 0xe8, 0x89, 0x66, 0x9a, 0x61, 0x89, 0xec, 0x1a, 0x5a, 0x56, 0xad, 0xe2, 0x54, 0x27, 0xea, + 0xcb, 0x77, 0x03, 0x12, 0xba, 0xc2, 0xb3, 0x6a, 0x35, 0x29, 0xb9, 0xf7, 0x0a, 0x4e, 0xb9, 0x86, + 0x75, 0x80, 0x46, 0x2f, 0x33, 0x8c, 0xe7, 0x53, 0x39, 0xc3, 0xd1, 0xe4, 0x6e, 0x82, 0x63, 0x7a, + 0xc4, 0xda, 0xd0, 0x14, 0xf8, 0x34, 0x47, 0x19, 0x51, 0xc2, 0x8e, 0xc1, 0x13, 0x28, 0x67, 0x8f, + 0x53, 0x89, 0xb4, 0xc6, 0xce, 0xe1, 0x64, 0x24, 0x70, 0x18, 0x61, 0x2c, 0x23, 0x81, 0xc3, 0x07, + 0x5a, 0x67, 0x1e, 0x38, 0x12, 0xa7, 0x63, 0xea, 0x94, 0x2f, 0x81, 0xa3, 0x67, 0xea, 0x2e, 0x1b, + 0xd5, 0xdc, 0xdb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x9b, 0x9d, 0x4f, 0x54, 0x01, 0x00, + 0x00, +} diff --git a/vendor/cloud.google.com/go/rpcreplay/proto/rpcreplay/rpcreplay.proto b/vendor/cloud.google.com/go/rpcreplay/proto/rpcreplay/rpcreplay.proto new file mode 100644 index 0000000..8475f33 --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/proto/rpcreplay/rpcreplay.proto @@ -0,0 +1,71 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package rpcreplay; + +import "google/protobuf/any.proto"; + +// An Entry represents a single RPC activity, typically a request or response. +message Entry { + enum Kind { + TYPE_UNSPECIFIED = 0; + + // A unary request. + // method: the full name of the method + // message: the request proto + // is_error: false + // ref_index: 0 + REQUEST = 1; + + // A unary response. + // method: the full name of the method + // message: + // if is_error: a google.rpc.Status proto + // else: the response proto + // ref_index: index in the sequence of Entries of matching request (1-based) + RESPONSE = 2; + + // A method that creates a stream. + // method: the full name of the method + // message: + // if is_error: a google.rpc.Status proto + // else: nil + // ref_index: 0 + CREATE_STREAM = 3; + + // A call to Send on the client returned by a stream-creating method. + // method: unset + // message: the proto being sent + // is_error: false + // ref_index: index of matching CREATE_STREAM entry (1-based) + SEND = 4; // message sent on stream + + // A call to Recv on the client returned by a stream-creating method. + // method: unset + // message: + // if is_error: a google.rpc.Status proto, or nil on EOF + // else: the received message + // ref_index: index of matching CREATE_STREAM entry + RECV = 5; // message received from stream + } + + Kind kind = 1; + string method = 2; // method name + google.protobuf.Any message = 3; // request, response or error status + bool is_error = 4; // was response an error? + int32 ref_index = 5; // for RESPONSE, index of matching request; + // for SEND/RECV, index of CREATE_STREAM +} diff --git a/vendor/cloud.google.com/go/rpcreplay/rpcreplay.go b/vendor/cloud.google.com/go/rpcreplay/rpcreplay.go new file mode 100644 index 0000000..eccc0a6 --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/rpcreplay.go @@ -0,0 +1,689 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcreplay + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "os" + "sync" + + "golang.org/x/net/context" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + pb "cloud.google.com/go/rpcreplay/proto/rpcreplay" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" + spb "google.golang.org/genproto/googleapis/rpc/status" +) + +// A Recorder records RPCs for later playback. +type Recorder struct { + mu sync.Mutex + w *bufio.Writer + f *os.File + next int + err error +} + +// NewRecorder creates a recorder that writes to filename. The file will +// also store the initial bytes for retrieval during replay. +// +// You must call Close on the Recorder to ensure that all data is written. +func NewRecorder(filename string, initial []byte) (*Recorder, error) { + f, err := os.Create(filename) + if err != nil { + return nil, err + } + rec, err := NewRecorderWriter(f, initial) + if err != nil { + _ = f.Close() + return nil, err + } + rec.f = f + return rec, nil +} + +// NewRecorderWriter creates a recorder that writes to w. The initial +// bytes will also be written to w for retrieval during replay. +// +// You must call Close on the Recorder to ensure that all data is written. +func NewRecorderWriter(w io.Writer, initial []byte) (*Recorder, error) { + bw := bufio.NewWriter(w) + if err := writeHeader(bw, initial); err != nil { + return nil, err + } + return &Recorder{w: bw, next: 1}, nil +} + +// DialOptions returns the options that must be passed to grpc.Dial +// to enable recording. +func (r *Recorder) DialOptions() []grpc.DialOption { + return []grpc.DialOption{ + grpc.WithUnaryInterceptor(r.interceptUnary), + grpc.WithStreamInterceptor(r.interceptStream), + } +} + +// Close saves any unwritten information. +func (r *Recorder) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.err != nil { + return r.err + } + err := r.w.Flush() + if r.f != nil { + if err2 := r.f.Close(); err == nil { + err = err2 + } + } + return err +} + +// Intercepts all unary (non-stream) RPCs. +func (r *Recorder) interceptUnary(ctx context.Context, method string, req, res interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ereq := &entry{ + kind: pb.Entry_REQUEST, + method: method, + msg: message{msg: req.(proto.Message)}, + } + + refIndex, err := r.writeEntry(ereq) + if err != nil { + return err + } + ierr := invoker(ctx, method, req, res, cc, opts...) + eres := &entry{ + kind: pb.Entry_RESPONSE, + refIndex: refIndex, + } + // If the error is not a gRPC status, then something more + // serious is wrong. More significantly, we have no way + // of serializing an arbitrary error. So just return it + // without recording the response. + if _, ok := status.FromError(ierr); !ok { + r.mu.Lock() + r.err = fmt.Errorf("saw non-status error in %s response: %v (%T)", method, ierr, ierr) + r.mu.Unlock() + return ierr + } + eres.msg.set(res, ierr) + if _, err := r.writeEntry(eres); err != nil { + return err + } + return ierr +} + +func (r *Recorder) writeEntry(e *entry) (int, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.err != nil { + return 0, r.err + } + err := writeEntry(r.w, e) + if err != nil { + r.err = err + return 0, err + } + n := r.next + r.next++ + return n, nil +} + +func (r *Recorder) interceptStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + cstream, serr := streamer(ctx, desc, cc, method, opts...) + e := &entry{ + kind: pb.Entry_CREATE_STREAM, + method: method, + } + e.msg.set(nil, serr) + refIndex, err := r.writeEntry(e) + if err != nil { + return nil, err + } + return &recClientStream{ + ctx: ctx, + rec: r, + cstream: cstream, + refIndex: refIndex, + }, serr +} + +// A recClientStream implements the gprc.ClientStream interface. +// It behaves exactly like the default ClientStream, but also +// records all messages sent and received. +type recClientStream struct { + ctx context.Context + rec *Recorder + cstream grpc.ClientStream + refIndex int +} + +func (rcs *recClientStream) Context() context.Context { return rcs.ctx } + +func (rcs *recClientStream) SendMsg(m interface{}) error { + serr := rcs.cstream.SendMsg(m) + e := &entry{ + kind: pb.Entry_SEND, + refIndex: rcs.refIndex, + } + e.msg.set(m, serr) + if _, err := rcs.rec.writeEntry(e); err != nil { + return err + } + return serr +} + +func (rcs *recClientStream) RecvMsg(m interface{}) error { + serr := rcs.cstream.RecvMsg(m) + e := &entry{ + kind: pb.Entry_RECV, + refIndex: rcs.refIndex, + } + e.msg.set(m, serr) + if _, err := rcs.rec.writeEntry(e); err != nil { + return err + } + return serr +} + +func (rcs *recClientStream) Header() (metadata.MD, error) { + // TODO(jba): record. + return rcs.cstream.Header() +} + +func (rcs *recClientStream) Trailer() metadata.MD { + // TODO(jba): record. + return rcs.cstream.Trailer() +} + +func (rcs *recClientStream) CloseSend() error { + // TODO(jba): record. + return rcs.cstream.CloseSend() +} + +// A Replayer replays a set of RPCs saved by a Recorder. +type Replayer struct { + initial []byte // initial state + log func(format string, v ...interface{}) // for debugging + + mu sync.Mutex + calls []*call + streams []*stream +} + +// A call represents a unary RPC, with a request and response (or error). +type call struct { + method string + request proto.Message + response message +} + +// A stream represents a gRPC stream, with an initial create-stream call, followed by +// zero or more sends and/or receives. +type stream struct { + method string + createIndex int + createErr error // error from create call + sends []message + recvs []message +} + +// NewReplayer creates a Replayer that reads from filename. +func NewReplayer(filename string) (*Replayer, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + return NewReplayerReader(f) +} + +// NewReplayerReader creates a Replayer that reads from r. +func NewReplayerReader(r io.Reader) (*Replayer, error) { + rep := &Replayer{ + log: func(string, ...interface{}) {}, + } + if err := rep.read(r); err != nil { + return nil, err + } + return rep, nil +} + +// read reads the stream of recorded entries. +// It matches requests with responses, with each pair grouped +// into a call struct. +func (rep *Replayer) read(r io.Reader) error { + r = bufio.NewReader(r) + bytes, err := readHeader(r) + if err != nil { + return err + } + rep.initial = bytes + + callsByIndex := map[int]*call{} + streamsByIndex := map[int]*stream{} + for i := 1; ; i++ { + e, err := readEntry(r) + if err != nil { + return err + } + if e == nil { + break + } + switch e.kind { + case pb.Entry_REQUEST: + callsByIndex[i] = &call{ + method: e.method, + request: e.msg.msg, + } + + case pb.Entry_RESPONSE: + call := callsByIndex[e.refIndex] + if call == nil { + return fmt.Errorf("replayer: no request for response #%d", i) + } + delete(callsByIndex, e.refIndex) + call.response = e.msg + rep.calls = append(rep.calls, call) + + case pb.Entry_CREATE_STREAM: + s := &stream{method: e.method, createIndex: i} + s.createErr = e.msg.err + streamsByIndex[i] = s + rep.streams = append(rep.streams, s) + + case pb.Entry_SEND: + s := streamsByIndex[e.refIndex] + if s == nil { + return fmt.Errorf("replayer: no stream for send #%d", i) + } + s.sends = append(s.sends, e.msg) + + case pb.Entry_RECV: + s := streamsByIndex[e.refIndex] + if s == nil { + return fmt.Errorf("replayer: no stream for recv #%d", i) + } + s.recvs = append(s.recvs, e.msg) + + default: + return fmt.Errorf("replayer: unknown kind %s", e.kind) + } + } + if len(callsByIndex) > 0 { + return fmt.Errorf("replayer: %d unmatched requests", len(callsByIndex)) + } + return nil +} + +// DialOptions returns the options that must be passed to grpc.Dial +// to enable replaying. +func (r *Replayer) DialOptions() []grpc.DialOption { + return []grpc.DialOption{ + // On replay, we make no RPCs, which means the connection may be closed + // before the normally async Dial completes. Making the Dial synchronous + // fixes that. + grpc.WithBlock(), + grpc.WithUnaryInterceptor(r.interceptUnary), + grpc.WithStreamInterceptor(r.interceptStream), + } +} + +// Initial returns the initial state saved by the Recorder. +func (r *Replayer) Initial() []byte { return r.initial } + +// SetLogFunc sets a function to be used for debug logging. The function +// should be safe to be called from multiple goroutines. +func (r *Replayer) SetLogFunc(f func(format string, v ...interface{})) { + r.log = f +} + +// Close closes the Replayer. +func (r *Replayer) Close() error { + return nil +} + +func (r *Replayer) interceptUnary(_ context.Context, method string, req, res interface{}, _ *grpc.ClientConn, _ grpc.UnaryInvoker, _ ...grpc.CallOption) error { + mreq := req.(proto.Message) + r.log("request %s (%s)", method, req) + call := r.extractCall(method, mreq) + if call == nil { + return fmt.Errorf("replayer: request not found: %s", mreq) + } + r.log("returning %v", call.response) + if call.response.err != nil { + return call.response.err + } + proto.Merge(res.(proto.Message), call.response.msg) // copy msg into res + return nil +} + +func (r *Replayer) interceptStream(ctx context.Context, _ *grpc.StreamDesc, _ *grpc.ClientConn, method string, _ grpc.Streamer, _ ...grpc.CallOption) (grpc.ClientStream, error) { + r.log("create-stream %s", method) + str := r.extractStream(method) + if str == nil { + return nil, fmt.Errorf("replayer: stream not found for method %s", method) + } + if str.createErr != nil { + return nil, str.createErr + } + return &repClientStream{ctx: ctx, str: str}, nil +} + +type repClientStream struct { + ctx context.Context + str *stream +} + +func (rcs *repClientStream) Context() context.Context { return rcs.ctx } + +func (rcs *repClientStream) SendMsg(m interface{}) error { + if len(rcs.str.sends) == 0 { + return fmt.Errorf("replayer: no more sends for stream %s, created at index %d", + rcs.str.method, rcs.str.createIndex) + } + // TODO(jba): Do not assume that the sends happen in the same order on replay. + msg := rcs.str.sends[0] + rcs.str.sends = rcs.str.sends[1:] + return msg.err +} + +func (rcs *repClientStream) RecvMsg(m interface{}) error { + if len(rcs.str.recvs) == 0 { + return fmt.Errorf("replayer: no more receives for stream %s, created at index %d", + rcs.str.method, rcs.str.createIndex) + } + msg := rcs.str.recvs[0] + rcs.str.recvs = rcs.str.recvs[1:] + if msg.err != nil { + return msg.err + } + proto.Merge(m.(proto.Message), msg.msg) // copy msg into m + return nil +} + +func (rcs *repClientStream) Header() (metadata.MD, error) { + log.Printf("replay: stream metadata not supported") + return nil, nil +} + +func (rcs *repClientStream) Trailer() metadata.MD { + log.Printf("replay: stream metadata not supported") + return nil +} + +func (rcs *repClientStream) CloseSend() error { + return nil +} + +// extractCall finds the first call in the list with the same method +// and request. It returns nil if it can't find such a call. +func (r *Replayer) extractCall(method string, req proto.Message) *call { + r.mu.Lock() + defer r.mu.Unlock() + for i, call := range r.calls { + if call == nil { + continue + } + if method == call.method && proto.Equal(req, call.request) { + r.calls[i] = nil // nil out this call so we don't reuse it + return call + } + } + return nil +} + +func (r *Replayer) extractStream(method string) *stream { + r.mu.Lock() + defer r.mu.Unlock() + for i, stream := range r.streams { + if stream == nil { + continue + } + if method == stream.method { + r.streams[i] = nil + return stream + } + } + return nil +} + +// Fprint reads the entries from filename and writes them to w in human-readable form. +// It is intended for debugging. +func Fprint(w io.Writer, filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + return FprintReader(w, f) +} + +// FprintReader reads the entries from r and writes them to w in human-readable form. +// It is intended for debugging. +func FprintReader(w io.Writer, r io.Reader) error { + initial, err := readHeader(r) + if err != nil { + return err + } + fmt.Fprintf(w, "initial state: %q\n", string(initial)) + for i := 1; ; i++ { + e, err := readEntry(r) + if err != nil { + return err + } + if e == nil { + return nil + } + + s := "message" + if e.msg.err != nil { + s = "error" + } + fmt.Fprintf(w, "#%d: kind: %s, method: %s, ref index: %d, %s:\n", + i, e.kind, e.method, e.refIndex, s) + if e.msg.err == nil { + if err := proto.MarshalText(w, e.msg.msg); err != nil { + return err + } + } else { + fmt.Fprintf(w, "%v\n", e.msg.err) + } + } +} + +// An entry holds one gRPC action (request, response, etc.). +type entry struct { + kind pb.Entry_Kind + method string + msg message + refIndex int // index of corresponding request or create-stream +} + +func (e1 *entry) equal(e2 *entry) bool { + if e1 == nil && e2 == nil { + return true + } + if e1 == nil || e2 == nil { + return false + } + return e1.kind == e2.kind && + e1.method == e2.method && + proto.Equal(e1.msg.msg, e2.msg.msg) && + errEqual(e1.msg.err, e2.msg.err) && + e1.refIndex == e2.refIndex +} + +func errEqual(e1, e2 error) bool { + if e1 == e2 { + return true + } + s1, ok1 := status.FromError(e1) + s2, ok2 := status.FromError(e2) + if !ok1 || !ok2 { + return false + } + return proto.Equal(s1.Proto(), s2.Proto()) +} + +// message holds either a single proto.Message or an error. +type message struct { + msg proto.Message + err error +} + +func (m *message) set(msg interface{}, err error) { + m.err = err + if err != io.EOF && msg != nil { + m.msg = msg.(proto.Message) + } +} + +// File format: +// header +// sequence of Entry protos +// +// Header format: +// magic string +// a record containing the bytes of the initial state + +const magic = "RPCReplay" + +func writeHeader(w io.Writer, initial []byte) error { + if _, err := io.WriteString(w, magic); err != nil { + return err + } + return writeRecord(w, initial) +} + +func readHeader(r io.Reader) ([]byte, error) { + var buf [len(magic)]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + if err == io.EOF { + err = errors.New("rpcreplay: empty replay file") + } + return nil, err + } + if string(buf[:]) != magic { + return nil, errors.New("rpcreplay: not a replay file (does not begin with magic string)") + } + bytes, err := readRecord(r) + if err == io.EOF { + err = errors.New("rpcreplay: missing initial state") + } + return bytes, err +} + +func writeEntry(w io.Writer, e *entry) error { + var m proto.Message + if e.msg.err != nil && e.msg.err != io.EOF { + s, ok := status.FromError(e.msg.err) + if !ok { + return fmt.Errorf("rpcreplay: error %v is not a Status", e.msg.err) + } + m = s.Proto() + } else { + m = e.msg.msg + } + var a *any.Any + var err error + if m != nil { + a, err = ptypes.MarshalAny(m) + if err != nil { + return err + } + } + pe := &pb.Entry{ + Kind: e.kind, + Method: e.method, + Message: a, + IsError: e.msg.err != nil, + RefIndex: int32(e.refIndex), + } + bytes, err := proto.Marshal(pe) + if err != nil { + return err + } + return writeRecord(w, bytes) +} + +func readEntry(r io.Reader) (*entry, error) { + buf, err := readRecord(r) + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + var pe pb.Entry + if err := proto.Unmarshal(buf, &pe); err != nil { + return nil, err + } + var msg message + if pe.Message != nil { + var any ptypes.DynamicAny + if err := ptypes.UnmarshalAny(pe.Message, &any); err != nil { + return nil, err + } + if pe.IsError { + msg.err = status.ErrorProto(any.Message.(*spb.Status)) + } else { + msg.msg = any.Message + } + } else if pe.IsError { + msg.err = io.EOF + } else if pe.Kind != pb.Entry_CREATE_STREAM { + return nil, errors.New("rpcreplay: entry with nil message and false is_error") + } + return &entry{ + kind: pe.Kind, + method: pe.Method, + msg: msg, + refIndex: int(pe.RefIndex), + }, nil +} + +// A record consists of an unsigned 32-bit little-endian length L followed by L +// bytes. + +func writeRecord(w io.Writer, data []byte) error { + if err := binary.Write(w, binary.LittleEndian, uint32(len(data))); err != nil { + return err + } + _, err := w.Write(data) + return err +} + +func readRecord(r io.Reader) ([]byte, error) { + var size uint32 + if err := binary.Read(r, binary.LittleEndian, &size); err != nil { + return nil, err + } + buf := make([]byte, size) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, err + } + return buf, nil +} diff --git a/vendor/cloud.google.com/go/rpcreplay/rpcreplay_test.go b/vendor/cloud.google.com/go/rpcreplay/rpcreplay_test.go new file mode 100644 index 0000000..6656871 --- /dev/null +++ b/vendor/cloud.google.com/go/rpcreplay/rpcreplay_test.go @@ -0,0 +1,362 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcreplay + +import ( + "bytes" + "io" + "testing" + + "cloud.google.com/go/internal/testutil" + ipb "cloud.google.com/go/rpcreplay/proto/intstore" + rpb "cloud.google.com/go/rpcreplay/proto/rpcreplay" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestRecordIO(t *testing.T) { + buf := &bytes.Buffer{} + want := []byte{1, 2, 3} + if err := writeRecord(buf, want); err != nil { + t.Fatal(err) + } + got, err := readRecord(buf) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestHeaderIO(t *testing.T) { + buf := &bytes.Buffer{} + want := []byte{1, 2, 3} + if err := writeHeader(buf, want); err != nil { + t.Fatal(err) + } + got, err := readHeader(buf) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, want) { + t.Errorf("got %v, want %v", got, want) + } + + // readHeader errors + for _, contents := range []string{"", "badmagic", "gRPCReplay"} { + if _, err := readHeader(bytes.NewBufferString(contents)); err == nil { + t.Errorf("%q: got nil, want error", contents) + } + } +} + +func TestEntryIO(t *testing.T) { + for i, want := range []*entry{ + { + kind: rpb.Entry_REQUEST, + method: "method", + msg: message{msg: &rpb.Entry{}}, + refIndex: 7, + }, + { + kind: rpb.Entry_RESPONSE, + method: "method", + msg: message{err: status.Error(codes.NotFound, "not found")}, + refIndex: 8, + }, + { + kind: rpb.Entry_RECV, + method: "method", + msg: message{err: io.EOF}, + refIndex: 3, + }, + } { + buf := &bytes.Buffer{} + if err := writeEntry(buf, want); err != nil { + t.Fatal(err) + } + got, err := readEntry(buf) + if err != nil { + t.Fatal(err) + } + if !got.equal(want) { + t.Errorf("#%d: got %v, want %v", i, got, want) + } + } +} + +var initialState = []byte{1, 2, 3} + +func TestRecord(t *testing.T) { + srv := newIntStoreServer() + defer srv.stop() + buf := record(t, srv) + + gotIstate, err := readHeader(buf) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(gotIstate, initialState) { + t.Fatalf("got %v, want %v", gotIstate, initialState) + } + item := &ipb.Item{Name: "a", Value: 1} + wantEntries := []*entry{ + // Set + { + kind: rpb.Entry_REQUEST, + method: "/intstore.IntStore/Set", + msg: message{msg: item}, + }, + { + kind: rpb.Entry_RESPONSE, + msg: message{msg: &ipb.SetResponse{PrevValue: 0}}, + refIndex: 1, + }, + // Get + { + kind: rpb.Entry_REQUEST, + method: "/intstore.IntStore/Get", + msg: message{msg: &ipb.GetRequest{Name: "a"}}, + }, + { + kind: rpb.Entry_RESPONSE, + msg: message{msg: item}, + refIndex: 3, + }, + { + kind: rpb.Entry_REQUEST, + method: "/intstore.IntStore/Get", + msg: message{msg: &ipb.GetRequest{Name: "x"}}, + }, + { + kind: rpb.Entry_RESPONSE, + msg: message{err: status.Error(codes.NotFound, `"x"`)}, + refIndex: 5, + }, + // ListItems + { // entry #7 + kind: rpb.Entry_CREATE_STREAM, + method: "/intstore.IntStore/ListItems", + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.ListItemsRequest{}}, + refIndex: 7, + }, + { + kind: rpb.Entry_RECV, + msg: message{msg: item}, + refIndex: 7, + }, + { + kind: rpb.Entry_RECV, + msg: message{err: io.EOF}, + refIndex: 7, + }, + // SetStream + { // entry #11 + kind: rpb.Entry_CREATE_STREAM, + method: "/intstore.IntStore/SetStream", + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.Item{Name: "b", Value: 2}}, + refIndex: 11, + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.Item{Name: "c", Value: 3}}, + refIndex: 11, + }, + { + kind: rpb.Entry_RECV, + msg: message{msg: &ipb.Summary{Count: 2}}, + refIndex: 11, + }, + + // StreamChat + { // entry #15 + kind: rpb.Entry_CREATE_STREAM, + method: "/intstore.IntStore/StreamChat", + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.Item{Name: "d", Value: 4}}, + refIndex: 15, + }, + { + kind: rpb.Entry_RECV, + msg: message{msg: &ipb.Item{Name: "d", Value: 4}}, + refIndex: 15, + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.Item{Name: "e", Value: 5}}, + refIndex: 15, + }, + { + kind: rpb.Entry_RECV, + msg: message{msg: &ipb.Item{Name: "e", Value: 5}}, + refIndex: 15, + }, + { + kind: rpb.Entry_RECV, + msg: message{err: io.EOF}, + refIndex: 15, + }, + } + for i, w := range wantEntries { + g, err := readEntry(buf) + if err != nil { + t.Fatalf("#%d: %v", i+1, err) + } + if !g.equal(w) { + t.Errorf("#%d:\ngot %+v\nwant %+v", i+1, g, w) + } + } + g, err := readEntry(buf) + if err != nil { + t.Fatal(err) + } + if g != nil { + t.Errorf("\ngot %+v\nwant nil", g) + } +} + +func TestReplay(t *testing.T) { + srv := newIntStoreServer() + defer srv.stop() + + buf := record(t, srv) + rep, err := NewReplayerReader(buf) + if err != nil { + t.Fatal(err) + } + if got, want := rep.Initial(), initialState; !testutil.Equal(got, want) { + t.Fatalf("got %v, want %v", got, want) + } + // Replay the test. + testService(t, srv.Addr, rep.DialOptions()) +} + +func record(t *testing.T, srv *intStoreServer) *bytes.Buffer { + buf := &bytes.Buffer{} + rec, err := NewRecorderWriter(buf, initialState) + if err != nil { + t.Fatal(err) + } + testService(t, srv.Addr, rec.DialOptions()) + if err := rec.Close(); err != nil { + t.Fatal(err) + } + return buf +} + +func testService(t *testing.T, addr string, opts []grpc.DialOption) { + conn, err := grpc.Dial(addr, + append([]grpc.DialOption{grpc.WithInsecure()}, opts...)...) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + client := ipb.NewIntStoreClient(conn) + ctx := context.Background() + item := &ipb.Item{Name: "a", Value: 1} + res, err := client.Set(ctx, item) + if err != nil { + t.Fatal(err) + } + if res.PrevValue != 0 { + t.Errorf("got %d, want 0", res.PrevValue) + } + got, err := client.Get(ctx, &ipb.GetRequest{Name: "a"}) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, item) { + t.Errorf("got %v, want %v", got, item) + } + _, err = client.Get(ctx, &ipb.GetRequest{Name: "x"}) + if err == nil { + t.Fatal("got nil, want error") + } + if _, ok := status.FromError(err); !ok { + t.Errorf("got error type %T, want a grpc/status.Status", err) + } + + wantItems := []*ipb.Item{item} + lic, err := client.ListItems(ctx, &ipb.ListItemsRequest{}) + if err != nil { + t.Fatal(err) + } + for i := 0; ; i++ { + item, err := lic.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if i >= len(wantItems) || !proto.Equal(item, wantItems[i]) { + t.Fatalf("%d: bad item", i) + } + } + + ssc, err := client.SetStream(ctx) + if err != nil { + t.Fatal(err) + } + + must := func(err error) { + if err != nil { + t.Fatal(err) + } + } + + for i, name := range []string{"b", "c"} { + must(ssc.Send(&ipb.Item{Name: name, Value: int32(i + 2)})) + } + summary, err := ssc.CloseAndRecv() + if err != nil { + t.Fatal(err) + } + if got, want := summary.Count, int32(2); got != want { + t.Fatalf("got %d, want %d", got, want) + } + + chatc, err := client.StreamChat(ctx) + if err != nil { + t.Fatal(err) + } + for i, name := range []string{"d", "e"} { + item := &ipb.Item{Name: name, Value: int32(i + 4)} + must(chatc.Send(item)) + got, err := chatc.Recv() + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, item) { + t.Errorf("got %v, want %v", got, item) + } + } + must(chatc.CloseSend()) + if _, err := chatc.Recv(); err != io.EOF { + t.Fatalf("got %v, want EOF", err) + } +} diff --git a/vendor/cloud.google.com/go/run-tests.sh b/vendor/cloud.google.com/go/run-tests.sh new file mode 100755 index 0000000..f47ff50 --- /dev/null +++ b/vendor/cloud.google.com/go/run-tests.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +# Selectively run tests for this repo, based on what has changed +# in a commit. Runs short tests for the whole repo, and full tests +# for changed directories. + +set -e + +prefix=cloud.google.com/go + +dryrun=false +if [[ $1 == "-n" ]]; then + dryrun=true + shift +fi + +if [[ $1 == "" ]]; then + echo >&2 "usage: $0 [-n] COMMIT" + exit 1 +fi + +# Files or directories that cause all tests to run if modified. +declare -A run_all +run_all=([.travis.yml]=1 [run-tests.sh]=1) + +function run { + if $dryrun; then + echo $* + else + (set -x; $*) + fi +} + + +# Find all the packages that have changed in this commit. +declare -A changed_packages + +for f in $(git diff-tree --no-commit-id --name-only -r $1); do + if [[ ${run_all[$f]} == 1 ]]; then + # This change requires a full test. Do it and exit. + run go test -race -v $prefix/... + exit + fi + # Map, e.g., "spanner/client.go" to "$prefix/spanner". + d=$(dirname $f) + if [[ $d == "." ]]; then + pkg=$prefix + else + pkg=$prefix/$d + fi + changed_packages[$pkg]=1 +done + +echo "changed packages: ${!changed_packages[*]}" + + +# Reports whether its argument, a package name, depends (recursively) +# on a changed package. +function depends_on_changed_package { + # According to go list, a package does not depend on itself, so + # we test that separately. + if [[ ${changed_packages[$1]} == 1 ]]; then + return 0 + fi + for dep in $(go list -f '{{range .Deps}}{{.}} {{end}}' $1); do + if [[ ${changed_packages[$dep]} == 1 ]]; then + return 0 + fi + done + return 1 +} + +# Collect the packages into two separate lists. (It is faster go test a list of +# packages than to individually go test each one.) + +shorts= +fulls= +for pkg in $(go list $prefix/...); do # for each package in the repo + if depends_on_changed_package $pkg; then # if it depends on a changed package + fulls="$fulls $pkg" # run the full test + else # otherwise + shorts="$shorts $pkg" # run the short test + fi +done +run go test -race -v -short $shorts +if [[ $fulls != "" ]]; then + run go test -race -v $fulls +fi diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go new file mode 100644 index 0000000..cb260a3 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go @@ -0,0 +1,516 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient. +type DatabaseAdminCallOptions struct { + ListDatabases []gax.CallOption + CreateDatabase []gax.CallOption + GetDatabase []gax.CallOption + UpdateDatabaseDdl []gax.CallOption + DropDatabase []gax.CallOption + GetDatabaseDdl []gax.CallOption + SetIamPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultDatabaseAdminClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultDatabaseAdminCallOptions() *DatabaseAdminCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &DatabaseAdminCallOptions{ + ListDatabases: retry[[2]string{"default", "idempotent"}], + CreateDatabase: retry[[2]string{"default", "non_idempotent"}], + GetDatabase: retry[[2]string{"default", "idempotent"}], + UpdateDatabaseDdl: retry[[2]string{"default", "idempotent"}], + DropDatabase: retry[[2]string{"default", "idempotent"}], + GetDatabaseDdl: retry[[2]string{"default", "idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + } +} + +// DatabaseAdminClient is a client for interacting with Cloud Spanner Database Admin API. +type DatabaseAdminClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + databaseAdminClient databasepb.DatabaseAdminClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *DatabaseAdminCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewDatabaseAdminClient creates a new database admin client. +// +// Cloud Spanner Database Admin API +// +// The Cloud Spanner Database Admin API can be used to create, drop, and +// list databases. It also enables updating the schema of pre-existing +// databases. +func NewDatabaseAdminClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultDatabaseAdminClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &DatabaseAdminClient{ + conn: conn, + CallOptions: defaultDatabaseAdminCallOptions(), + + databaseAdminClient: databasepb.NewDatabaseAdminClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *DatabaseAdminClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *DatabaseAdminClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *DatabaseAdminClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListDatabases lists Cloud Spanner databases. +func (c *DatabaseAdminClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDatabases[0:len(c.CallOptions.ListDatabases):len(c.CallOptions.ListDatabases)], opts...) + it := &DatabaseIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) { + var resp *databasepb.ListDatabasesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.ListDatabases(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Databases, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving. +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format /operations/ and +// can be used to track preparation of the database. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The +// [response][google.longrunning.Operation.response] field type is +// [Database][google.spanner.admin.database.v1.Database], if successful. +func (c *DatabaseAdminClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDatabase[0:len(c.CallOptions.CreateDatabase):len(c.CallOptions.CreateDatabase)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.CreateDatabase(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CreateDatabaseOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// GetDatabase gets the state of a Cloud Spanner database. +func (c *DatabaseAdminClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDatabase[0:len(c.CallOptions.GetDatabase):len(c.CallOptions.GetDatabase)], opts...) + var resp *databasepb.Database + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.GetDatabase(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by +// creating/altering/dropping tables, columns, indexes, etc. The returned +// [long-running operation][google.longrunning.Operation] will have a name of +// the format /operations/ and can be used to +// track execution of the schema change(s). The +// [metadata][google.longrunning.Operation.metadata] field type is +// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. +func (c *DatabaseAdminClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateDatabaseDdl[0:len(c.CallOptions.UpdateDatabaseDdl):len(c.CallOptions.UpdateDatabaseDdl)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.UpdateDatabaseDdl(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &UpdateDatabaseDdlOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// DropDatabase drops (aka deletes) a Cloud Spanner database. +func (c *DatabaseAdminClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DropDatabase[0:len(c.CallOptions.DropDatabase):len(c.CallOptions.DropDatabase)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.databaseAdminClient.DropDatabase(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted +// DDL statements. This method does not show pending schema updates, those may +// be queried using the [Operations][google.longrunning.Operations] API. +func (c *DatabaseAdminClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDatabaseDdl[0:len(c.CallOptions.GetDatabaseDdl):len(c.CallOptions.GetDatabaseDdl)], opts...) + var resp *databasepb.GetDatabaseDdlResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.GetDatabaseDdl(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetIamPolicy sets the access control policy on a database resource. Replaces any +// existing policy. +// +// Authorization requires spanner.databases.setIamPolicy permission on +// [resource][google.iam.v1.SetIamPolicyRequest.resource]. +func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetIamPolicy gets the access control policy for a database resource. Returns an empty +// policy if a database exists but does not have a policy set. +// +// Authorization requires spanner.databases.getIamPolicy permission on +// [resource][google.iam.v1.GetIamPolicyRequest.resource]. +func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions returns permissions that the caller has on the specified database resource. +// +// Attempting this RPC on a non-existent Cloud Spanner database will result in +// a NOT_FOUND error if the user has spanner.databases.list permission on +// the containing Cloud Spanner instance. Otherwise returns an empty set of +// permissions. +func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DatabaseIterator manages a stream of *databasepb.Database. +type DatabaseIterator struct { + items []*databasepb.Database + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Database, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DatabaseIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DatabaseIterator) Next() (*databasepb.Database, error) { + var item *databasepb.Database + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DatabaseIterator) bufLen() int { + return len(it.items) +} + +func (it *DatabaseIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// CreateDatabaseOperation manages a long-running operation from CreateDatabase. +type CreateDatabaseOperation struct { + lro *longrunning.Operation +} + +// CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name. +// The name must be that of a previously created CreateDatabaseOperation, possibly from a different process. +func (c *DatabaseAdminClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation { + return &CreateDatabaseOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CreateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) { + var resp databasepb.Database + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CreateDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) { + var resp databasepb.Database + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CreateDatabaseOperation) Metadata() (*databasepb.CreateDatabaseMetadata, error) { + var meta databasepb.CreateDatabaseMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CreateDatabaseOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CreateDatabaseOperation) Name() string { + return op.lro.Name() +} + +// UpdateDatabaseDdlOperation manages a long-running operation from UpdateDatabaseDdl. +type UpdateDatabaseDdlOperation struct { + lro *longrunning.Operation +} + +// UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name. +// The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process. +func (c *DatabaseAdminClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation { + return &UpdateDatabaseDdlOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning any error encountered. +// +// See documentation of Poll for error-handling information. +func (op *UpdateDatabaseDdlOperation) Wait(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.WaitWithInterval(ctx, nil, 45000*time.Millisecond, opts...) +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, op.Done will return true. +func (op *UpdateDatabaseDdlOperation) Poll(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.Poll(ctx, nil, opts...) +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *UpdateDatabaseDdlOperation) Metadata() (*databasepb.UpdateDatabaseDdlMetadata, error) { + var meta databasepb.UpdateDatabaseDdlMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *UpdateDatabaseDdlOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *UpdateDatabaseDdlOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go new file mode 100644 index 0000000..be6dc82 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go @@ -0,0 +1,207 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database_test + +import ( + "cloud.google.com/go/spanner/admin/database/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + iampb "google.golang.org/genproto/googleapis/iam/v1" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +func ExampleNewDatabaseAdminClient() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleDatabaseAdminClient_ListDatabases() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.ListDatabasesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDatabases(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleDatabaseAdminClient_CreateDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.CreateDatabaseRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_GetDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.GetDatabaseRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_UpdateDatabaseDdl() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.UpdateDatabaseDdlRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateDatabaseDdl(ctx, req) + if err != nil { + // TODO: Handle error. + } + + err = op.Wait(ctx) + // TODO: Handle error. +} + +func ExampleDatabaseAdminClient_DropDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.DropDatabaseRequest{ + // TODO: Fill request struct fields. + } + err = c.DropDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDatabaseAdminClient_GetDatabaseDdl() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.GetDatabaseDdlRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDatabaseDdl(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_SetIamPolicy() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.SetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_GetIamPolicy() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.GetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_TestIamPermissions() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go new file mode 100644 index 0000000..5706f70 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package database is an auto-generated package for the +// Cloud Spanner Database Admin API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +package database // import "cloud.google.com/go/spanner/admin/database/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + } +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go new file mode 100644 index 0000000..5c08210 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go @@ -0,0 +1,798 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDatabaseAdminServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + databasepb.DatabaseAdminServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDatabaseAdminServer) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest) (*databasepb.ListDatabasesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.ListDatabasesResponse), nil +} + +func (s *mockDatabaseAdminServer) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDatabaseAdminServer) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest) (*databasepb.Database, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.Database), nil +} + +func (s *mockDatabaseAdminServer) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDatabaseAdminServer) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDatabaseAdminServer) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest) (*databasepb.GetDatabaseDdlResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.GetDatabaseDdlResponse), nil +} + +func (s *mockDatabaseAdminServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockDatabaseAdminServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockDatabaseAdminServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDatabaseAdmin mockDatabaseAdminServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + databasepb.RegisterDatabaseAdminServer(serv, &mockDatabaseAdmin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDatabaseAdminListDatabases(t *testing.T) { + var nextPageToken string = "" + var databasesElement *databasepb.Database = &databasepb.Database{} + var databases = []*databasepb.Database{databasesElement} + var expectedResponse = &databasepb.ListDatabasesResponse{ + NextPageToken: nextPageToken, + Databases: databases, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var request = &databasepb.ListDatabasesRequest{ + Parent: formattedParent, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDatabases(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Databases[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminListDatabasesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var request = &databasepb.ListDatabasesRequest{ + Parent: formattedParent, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDatabases(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminCreateDatabase(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &databasepb.Database{ + Name: name, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedParent string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var createStatement string = "createStatement552974828" + var request = &databasepb.CreateDatabaseRequest{ + Parent: formattedParent, + CreateStatement: createStatement, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateDatabase(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminCreateDatabaseError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedParent string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var createStatement string = "createStatement552974828" + var request = &databasepb.CreateDatabaseRequest{ + Parent: formattedParent, + CreateStatement: createStatement, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateDatabase(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminGetDatabase(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &databasepb.Database{ + Name: name2, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseRequest{ + Name: formattedName, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabase(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetDatabaseError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseRequest{ + Name: formattedName, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabase(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminUpdateDatabaseDdl(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var statements []string = nil + var request = &databasepb.UpdateDatabaseDdlRequest{ + Database: formattedDatabase, + Statements: statements, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDatabaseAdminUpdateDatabaseDdlError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var statements []string = nil + var request = &databasepb.UpdateDatabaseDdlRequest{ + Database: formattedDatabase, + Statements: statements, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDatabaseAdminDropDatabase(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.DropDatabaseRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DropDatabase(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDatabaseAdminDropDatabaseError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.DropDatabaseRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DropDatabase(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDatabaseAdminGetDatabaseDdl(t *testing.T) { + var expectedResponse *databasepb.GetDatabaseDdlResponse = &databasepb.GetDatabaseDdlResponse{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseDdlRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabaseDdl(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetDatabaseDdlError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseDdlRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabaseDdl(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminSetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminTestIamPermissionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go new file mode 100644 index 0000000..5490f57 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go @@ -0,0 +1,45 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +// DatabaseAdminInstancePath returns the path for the instance resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/instances/%s", project, instance) +// instead. +func DatabaseAdminInstancePath(project, instance string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "" +} + +// DatabaseAdminDatabasePath returns the path for the database resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/instances/%s/databases/%s", project, instance, database) +// instead. +func DatabaseAdminDatabasePath(project, instance, database string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "/databases/" + + database + + "" +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go new file mode 100644 index 0000000..574f289 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package instance is an auto-generated package for the +// Cloud Spanner Instance Admin API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +package instance // import "cloud.google.com/go/spanner/admin/instance/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + } +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go new file mode 100644 index 0000000..660428b --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go @@ -0,0 +1,700 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient. +type InstanceAdminCallOptions struct { + ListInstanceConfigs []gax.CallOption + GetInstanceConfig []gax.CallOption + ListInstances []gax.CallOption + GetInstance []gax.CallOption + CreateInstance []gax.CallOption + UpdateInstance []gax.CallOption + DeleteInstance []gax.CallOption + SetIamPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultInstanceAdminClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultInstanceAdminCallOptions() *InstanceAdminCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &InstanceAdminCallOptions{ + ListInstanceConfigs: retry[[2]string{"default", "idempotent"}], + GetInstanceConfig: retry[[2]string{"default", "idempotent"}], + ListInstances: retry[[2]string{"default", "idempotent"}], + GetInstance: retry[[2]string{"default", "idempotent"}], + CreateInstance: retry[[2]string{"default", "non_idempotent"}], + UpdateInstance: retry[[2]string{"default", "non_idempotent"}], + DeleteInstance: retry[[2]string{"default", "idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + } +} + +// InstanceAdminClient is a client for interacting with Cloud Spanner Instance Admin API. +type InstanceAdminClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + instanceAdminClient instancepb.InstanceAdminClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *InstanceAdminCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewInstanceAdminClient creates a new instance admin client. +// +// Cloud Spanner Instance Admin API +// +// The Cloud Spanner Instance Admin API can be used to create, delete, +// modify and list instances. Instances are dedicated Cloud Spanner serving +// and storage resources to be used by Cloud Spanner databases. +// +// Each instance has a "configuration", which dictates where the +// serving resources for the Cloud Spanner instance are located (e.g., +// US-central, Europe). Configurations are created by Google based on +// resource availability. +// +// Cloud Spanner billing is based on the instances that exist and their +// sizes. After an instance exists, there are no additional +// per-database or per-operation charges for use of the instance +// (though there may be additional network bandwidth charges). +// Instances offer isolation: problems with databases in one instance +// will not affect other instances. However, within an instance +// databases can affect each other. For example, if one database in an +// instance receives a lot of requests and consumes most of the +// instance resources, fewer resources are available for other +// databases in that instance, and their performance may suffer. +func NewInstanceAdminClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultInstanceAdminClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &InstanceAdminClient{ + conn: conn, + CallOptions: defaultInstanceAdminCallOptions(), + + instanceAdminClient: instancepb.NewInstanceAdminClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *InstanceAdminClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *InstanceAdminClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *InstanceAdminClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListInstanceConfigs lists the supported instance configurations for a given project. +func (c *InstanceAdminClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInstanceConfigs[0:len(c.CallOptions.ListInstanceConfigs):len(c.CallOptions.ListInstanceConfigs)], opts...) + it := &InstanceConfigIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) { + var resp *instancepb.ListInstanceConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.ListInstanceConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.InstanceConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetInstanceConfig gets information about a particular instance configuration. +func (c *InstanceAdminClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetInstanceConfig[0:len(c.CallOptions.GetInstanceConfig):len(c.CallOptions.GetInstanceConfig)], opts...) + var resp *instancepb.InstanceConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.GetInstanceConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInstances lists all instances in the given project. +func (c *InstanceAdminClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInstances[0:len(c.CallOptions.ListInstances):len(c.CallOptions.ListInstances)], opts...) + it := &InstanceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) { + var resp *instancepb.ListInstancesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.ListInstances(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Instances, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetInstance gets information about a particular instance. +func (c *InstanceAdminClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetInstance[0:len(c.CallOptions.GetInstance):len(c.CallOptions.GetInstance)], opts...) + var resp *instancepb.Instance + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.GetInstance(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateInstance creates an instance and begins preparing it to begin serving. The +// returned [long-running operation][google.longrunning.Operation] +// can be used to track the progress of preparing the new +// instance. The instance name is assigned by the caller. If the +// named instance already exists, CreateInstance returns +// ALREADY_EXISTS. +// +// Immediately upon completion of this request: +// +// The instance is readable via the API, with all requested attributes +// but no allocated resources. Its state is CREATING. +// +// Until completion of the returned operation: +// +// Cancelling the operation renders the instance immediately unreadable +// via the API. +// +// The instance can be deleted. +// +// All other attempts to modify the instance are rejected. +// +// Upon completion of the returned operation: +// +// Billing for all successfully-allocated resources begins (some types +// may have lower than the requested levels). +// +// Databases can be created in the instance. +// +// The instance's allocated resource levels are readable via the API. +// +// The instance's state becomes READY. +// +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format /operations/ and +// can be used to track creation of the instance. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. +// The [response][google.longrunning.Operation.response] field type is +// [Instance][google.spanner.admin.instance.v1.Instance], if successful. +func (c *InstanceAdminClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateInstance[0:len(c.CallOptions.CreateInstance):len(c.CallOptions.CreateInstance)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.CreateInstance(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CreateInstanceOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// UpdateInstance updates an instance, and begins allocating or releasing resources +// as requested. The returned [long-running +// operation][google.longrunning.Operation] can be used to track the +// progress of updating the instance. If the named instance does not +// exist, returns NOT_FOUND. +// +// Immediately upon completion of this request: +// +// For resource types for which a decrease in the instance's allocation +// has been requested, billing is based on the newly-requested level. +// +// Until completion of the returned operation: +// +// Cancelling the operation sets its metadata's +// [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins +// restoring resources to their pre-request values. The operation +// is guaranteed to succeed at undoing all resource changes, +// after which point it terminates with a CANCELLED status. +// +// All other attempts to modify the instance are rejected. +// +// Reading the instance via the API continues to give the pre-request +// resource levels. +// +// Upon completion of the returned operation: +// +// Billing begins for all successfully-allocated resources (some types +// may have lower than the requested levels). +// +// All newly-reserved resources are available for serving the instance's +// tables. +// +// The instance's new resource levels are readable via the API. +// +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format /operations/ and +// can be used to track the instance modification. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. +// The [response][google.longrunning.Operation.response] field type is +// [Instance][google.spanner.admin.instance.v1.Instance], if successful. +// +// Authorization requires spanner.instances.update permission on +// resource [name][google.spanner.admin.instance.v1.Instance.name]. +func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateInstance[0:len(c.CallOptions.UpdateInstance):len(c.CallOptions.UpdateInstance)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.UpdateInstance(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &UpdateInstanceOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// DeleteInstance deletes an instance. +// +// Immediately upon completion of the request: +// +// Billing ceases for all of the instance's reserved resources. +// +// Soon afterward: +// +// The instance and all of its databases immediately and +// irrevocably disappear from the API. All data in the databases +// is permanently deleted. +func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteInstance[0:len(c.CallOptions.DeleteInstance):len(c.CallOptions.DeleteInstance)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.instanceAdminClient.DeleteInstance(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// SetIamPolicy sets the access control policy on an instance resource. Replaces any +// existing policy. +// +// Authorization requires spanner.instances.setIamPolicy on +// [resource][google.iam.v1.SetIamPolicyRequest.resource]. +func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetIamPolicy gets the access control policy for an instance resource. Returns an empty +// policy if an instance exists but does not have a policy set. +// +// Authorization requires spanner.instances.getIamPolicy on +// [resource][google.iam.v1.GetIamPolicyRequest.resource]. +func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions returns permissions that the caller has on the specified instance resource. +// +// Attempting this RPC on a non-existent Cloud Spanner instance resource will +// result in a NOT_FOUND error if the user has spanner.instances.list +// permission on the containing Google Cloud Project. Otherwise returns an +// empty set of permissions. +func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// InstanceConfigIterator manages a stream of *instancepb.InstanceConfig. +type InstanceConfigIterator struct { + items []*instancepb.InstanceConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstanceConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InstanceConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InstanceConfigIterator) Next() (*instancepb.InstanceConfig, error) { + var item *instancepb.InstanceConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InstanceConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *InstanceConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InstanceIterator manages a stream of *instancepb.Instance. +type InstanceIterator struct { + items []*instancepb.Instance + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*instancepb.Instance, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InstanceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InstanceIterator) Next() (*instancepb.Instance, error) { + var item *instancepb.Instance + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InstanceIterator) bufLen() int { + return len(it.items) +} + +func (it *InstanceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// CreateInstanceOperation manages a long-running operation from CreateInstance. +type CreateInstanceOperation struct { + lro *longrunning.Operation +} + +// CreateInstanceOperation returns a new CreateInstanceOperation from a given name. +// The name must be that of a previously created CreateInstanceOperation, possibly from a different process. +func (c *InstanceAdminClient) CreateInstanceOperation(name string) *CreateInstanceOperation { + return &CreateInstanceOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CreateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CreateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CreateInstanceOperation) Metadata() (*instancepb.CreateInstanceMetadata, error) { + var meta instancepb.CreateInstanceMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CreateInstanceOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CreateInstanceOperation) Name() string { + return op.lro.Name() +} + +// UpdateInstanceOperation manages a long-running operation from UpdateInstance. +type UpdateInstanceOperation struct { + lro *longrunning.Operation +} + +// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name. +// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process. +func (c *InstanceAdminClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation { + return &UpdateInstanceOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *UpdateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *UpdateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *UpdateInstanceOperation) Metadata() (*instancepb.UpdateInstanceMetadata, error) { + var meta instancepb.UpdateInstanceMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *UpdateInstanceOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *UpdateInstanceOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go new file mode 100644 index 0000000..894fcae --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go @@ -0,0 +1,235 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance_test + +import ( + "cloud.google.com/go/spanner/admin/instance/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + iampb "google.golang.org/genproto/googleapis/iam/v1" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" +) + +func ExampleNewInstanceAdminClient() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleInstanceAdminClient_ListInstanceConfigs() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.ListInstanceConfigsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInstanceConfigs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleInstanceAdminClient_GetInstanceConfig() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.GetInstanceConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInstanceConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_ListInstances() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.ListInstancesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInstances(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleInstanceAdminClient_GetInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.GetInstanceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_CreateInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.CreateInstanceRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_UpdateInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.UpdateInstanceRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_DeleteInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.DeleteInstanceRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleInstanceAdminClient_SetIamPolicy() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.SetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_GetIamPolicy() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.GetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_TestIamPermissions() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go new file mode 100644 index 0000000..7b0e1ad --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go @@ -0,0 +1,917 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockInstanceAdminServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + instancepb.InstanceAdminServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockInstanceAdminServer) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest) (*instancepb.ListInstanceConfigsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.ListInstanceConfigsResponse), nil +} + +func (s *mockInstanceAdminServer) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest) (*instancepb.InstanceConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.InstanceConfig), nil +} + +func (s *mockInstanceAdminServer) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest) (*instancepb.ListInstancesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.ListInstancesResponse), nil +} + +func (s *mockInstanceAdminServer) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest) (*instancepb.Instance, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.Instance), nil +} + +func (s *mockInstanceAdminServer) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockInstanceAdminServer) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockInstanceAdminServer) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockInstanceAdminServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockInstanceAdminServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockInstanceAdminServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockInstanceAdmin mockInstanceAdminServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + instancepb.RegisterInstanceAdminServer(serv, &mockInstanceAdmin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestInstanceAdminListInstanceConfigs(t *testing.T) { + var nextPageToken string = "" + var instanceConfigsElement *instancepb.InstanceConfig = &instancepb.InstanceConfig{} + var instanceConfigs = []*instancepb.InstanceConfig{instanceConfigsElement} + var expectedResponse = &instancepb.ListInstanceConfigsResponse{ + NextPageToken: nextPageToken, + InstanceConfigs: instanceConfigs, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &instancepb.ListInstanceConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstanceConfigs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.InstanceConfigs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminListInstanceConfigsError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &instancepb.ListInstanceConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstanceConfigs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetInstanceConfig(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &instancepb.InstanceConfig{ + Name: name2, + DisplayName: displayName, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/instanceConfigs/%s", "[PROJECT]", "[INSTANCE_CONFIG]") + var request = &instancepb.GetInstanceConfigRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstanceConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetInstanceConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/instanceConfigs/%s", "[PROJECT]", "[INSTANCE_CONFIG]") + var request = &instancepb.GetInstanceConfigRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstanceConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminListInstances(t *testing.T) { + var nextPageToken string = "" + var instancesElement *instancepb.Instance = &instancepb.Instance{} + var instances = []*instancepb.Instance{instancesElement} + var expectedResponse = &instancepb.ListInstancesResponse{ + NextPageToken: nextPageToken, + Instances: instances, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &instancepb.ListInstancesRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstances(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Instances[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminListInstancesError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &instancepb.ListInstancesRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstances(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetInstance(t *testing.T) { + var name2 string = "name2-1052831874" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name2, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var request = &instancepb.GetInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstance(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetInstanceError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var request = &instancepb.GetInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstance(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminCreateInstance(t *testing.T) { + var name string = "name3373707" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var instanceId string = "instanceId-2101995259" + var instance *instancepb.Instance = &instancepb.Instance{} + var request = &instancepb.CreateInstanceRequest{ + Parent: formattedParent, + InstanceId: instanceId, + Instance: instance, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminCreateInstanceError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = nil + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var instanceId string = "instanceId-2101995259" + var instance *instancepb.Instance = &instancepb.Instance{} + var request = &instancepb.CreateInstanceRequest{ + Parent: formattedParent, + InstanceId: instanceId, + Instance: instance, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminUpdateInstance(t *testing.T) { + var name string = "name3373707" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var instance *instancepb.Instance = &instancepb.Instance{} + var fieldMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &instancepb.UpdateInstanceRequest{ + Instance: instance, + FieldMask: fieldMask, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminUpdateInstanceError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = nil + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var instance *instancepb.Instance = &instancepb.Instance{} + var fieldMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &instancepb.UpdateInstanceRequest{ + Instance: instance, + FieldMask: fieldMask, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminDeleteInstance(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var request = &instancepb.DeleteInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInstance(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestInstanceAdminDeleteInstanceError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var request = &instancepb.DeleteInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInstance(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestInstanceAdminSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminSetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminTestIamPermissionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = fmt.Sprintf("projects/%s/instances/%s", "[PROJECT]", "[INSTANCE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go new file mode 100644 index 0000000..a225580 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instance + +// InstanceAdminProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func InstanceAdminProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// InstanceAdminInstanceConfigPath returns the path for the instance config resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/instanceConfigs/%s", project, instanceConfig) +// instead. +func InstanceAdminInstanceConfigPath(project, instanceConfig string) string { + return "" + + "projects/" + + project + + "/instanceConfigs/" + + instanceConfig + + "" +} + +// InstanceAdminInstancePath returns the path for the instance resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/instances/%s", project, instance) +// instead. +func InstanceAdminInstancePath(project, instance string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "" +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/apiv1/doc.go new file mode 100644 index 0000000..61525dd --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/doc.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package spanner is an auto-generated package for the +// Cloud Spanner API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Cloud Spanner is a managed, mission-critical, globally consistent and +// scalable relational database service. +// +// Use the client at cloud.google.com/go/spanner in preference to this. +package spanner // import "cloud.google.com/go/spanner/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.data", + } +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go new file mode 100644 index 0000000..3315384 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go @@ -0,0 +1,1085 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package spanner + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + spannerpb "google.golang.org/genproto/googleapis/spanner/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockSpannerServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + spannerpb.SpannerServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSpannerServer) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest) (*spannerpb.Session, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.Session), nil +} + +func (s *mockSpannerServer) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest) (*spannerpb.Session, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.Session), nil +} + +func (s *mockSpannerServer) ListSessions(ctx context.Context, req *spannerpb.ListSessionsRequest) (*spannerpb.ListSessionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.ListSessionsResponse), nil +} + +func (s *mockSpannerServer) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSpannerServer) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest) (*spannerpb.ResultSet, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.ResultSet), nil +} + +func (s *mockSpannerServer) ExecuteStreamingSql(req *spannerpb.ExecuteSqlRequest, stream spannerpb.Spanner_ExecuteStreamingSqlServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*spannerpb.PartialResultSet)); err != nil { + return err + } + } + return nil +} + +func (s *mockSpannerServer) Read(ctx context.Context, req *spannerpb.ReadRequest) (*spannerpb.ResultSet, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.ResultSet), nil +} + +func (s *mockSpannerServer) StreamingRead(req *spannerpb.ReadRequest, stream spannerpb.Spanner_StreamingReadServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*spannerpb.PartialResultSet)); err != nil { + return err + } + } + return nil +} + +func (s *mockSpannerServer) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest) (*spannerpb.Transaction, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.Transaction), nil +} + +func (s *mockSpannerServer) Commit(ctx context.Context, req *spannerpb.CommitRequest) (*spannerpb.CommitResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.CommitResponse), nil +} + +func (s *mockSpannerServer) Rollback(ctx context.Context, req *spannerpb.RollbackRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSpannerServer) PartitionQuery(ctx context.Context, req *spannerpb.PartitionQueryRequest) (*spannerpb.PartitionResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.PartitionResponse), nil +} + +func (s *mockSpannerServer) PartitionRead(ctx context.Context, req *spannerpb.PartitionReadRequest) (*spannerpb.PartitionResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.PartitionResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockSpanner mockSpannerServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + spannerpb.RegisterSpannerServer(serv, &mockSpanner) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestSpannerCreateSession(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &spannerpb.Session{ + Name: name, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &spannerpb.CreateSessionRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSession(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerCreateSessionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &spannerpb.CreateSessionRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSession(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerGetSession(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &spannerpb.Session{ + Name: name2, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var request = &spannerpb.GetSessionRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSession(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerGetSessionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var request = &spannerpb.GetSessionRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSession(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerListSessions(t *testing.T) { + var nextPageToken string = "" + var sessionsElement *spannerpb.Session = &spannerpb.Session{} + var sessions = []*spannerpb.Session{sessionsElement} + var expectedResponse = &spannerpb.ListSessionsResponse{ + NextPageToken: nextPageToken, + Sessions: sessions, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &spannerpb.ListSessionsRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSessions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Sessions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerListSessionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &spannerpb.ListSessionsRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSessions(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerDeleteSession(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var request = &spannerpb.DeleteSessionRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSession(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSpannerDeleteSessionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var request = &spannerpb.DeleteSessionRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSession(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSpannerExecuteSql(t *testing.T) { + var expectedResponse *spannerpb.ResultSet = &spannerpb.ResultSet{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.ExecuteSqlRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ExecuteSql(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerExecuteSqlError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.ExecuteSqlRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ExecuteSql(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerExecuteStreamingSql(t *testing.T) { + var chunkedValue bool = true + var resumeToken []byte = []byte("103") + var expectedResponse = &spannerpb.PartialResultSet{ + ChunkedValue: chunkedValue, + ResumeToken: resumeToken, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.ExecuteSqlRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.ExecuteStreamingSql(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerExecuteStreamingSqlError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.ExecuteSqlRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.ExecuteStreamingSql(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerRead(t *testing.T) { + var expectedResponse *spannerpb.ResultSet = &spannerpb.ResultSet{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var columns []string = nil + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.ReadRequest{ + Session: formattedSession, + Table: table, + Columns: columns, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Read(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerReadError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var columns []string = nil + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.ReadRequest{ + Session: formattedSession, + Table: table, + Columns: columns, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Read(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerStreamingRead(t *testing.T) { + var chunkedValue bool = true + var resumeToken []byte = []byte("103") + var expectedResponse = &spannerpb.PartialResultSet{ + ChunkedValue: chunkedValue, + ResumeToken: resumeToken, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var columns []string = nil + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.ReadRequest{ + Session: formattedSession, + Table: table, + Columns: columns, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRead(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerStreamingReadError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var columns []string = nil + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.ReadRequest{ + Session: formattedSession, + Table: table, + Columns: columns, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRead(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerBeginTransaction(t *testing.T) { + var id []byte = []byte("27") + var expectedResponse = &spannerpb.Transaction{ + Id: id, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var options *spannerpb.TransactionOptions = &spannerpb.TransactionOptions{} + var request = &spannerpb.BeginTransactionRequest{ + Session: formattedSession, + Options: options, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BeginTransaction(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerBeginTransactionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var options *spannerpb.TransactionOptions = &spannerpb.TransactionOptions{} + var request = &spannerpb.BeginTransactionRequest{ + Session: formattedSession, + Options: options, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BeginTransaction(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerCommit(t *testing.T) { + var expectedResponse *spannerpb.CommitResponse = &spannerpb.CommitResponse{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var mutations []*spannerpb.Mutation = nil + var request = &spannerpb.CommitRequest{ + Session: formattedSession, + Mutations: mutations, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Commit(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerCommitError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var mutations []*spannerpb.Mutation = nil + var request = &spannerpb.CommitRequest{ + Session: formattedSession, + Mutations: mutations, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Commit(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerRollback(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var transactionId []byte = []byte("28") + var request = &spannerpb.RollbackRequest{ + Session: formattedSession, + TransactionId: transactionId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Rollback(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSpannerRollbackError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var transactionId []byte = []byte("28") + var request = &spannerpb.RollbackRequest{ + Session: formattedSession, + TransactionId: transactionId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Rollback(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSpannerPartitionQuery(t *testing.T) { + var expectedResponse *spannerpb.PartitionResponse = &spannerpb.PartitionResponse{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.PartitionQueryRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.PartitionQuery(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerPartitionQueryError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.PartitionQueryRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.PartitionQuery(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerPartitionRead(t *testing.T) { + var expectedResponse *spannerpb.PartitionResponse = &spannerpb.PartitionResponse{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.PartitionReadRequest{ + Session: formattedSession, + Table: table, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.PartitionRead(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerPartitionReadError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.PartitionReadRequest{ + Session: formattedSession, + Table: table, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.PartitionRead(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/path_funcs.go b/vendor/cloud.google.com/go/spanner/apiv1/path_funcs.go new file mode 100644 index 0000000..19f30ad --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/path_funcs.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanner + +// DatabasePath returns the path for the database resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/instances/%s/databases/%s", project, instance, database) +// instead. +func DatabasePath(project, instance, database string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "/databases/" + + database + + "" +} + +// SessionPath returns the path for the session resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", project, instance, database, session) +// instead. +func SessionPath(project, instance, database, session string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "/databases/" + + database + + "/sessions/" + + session + + "" +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go new file mode 100644 index 0000000..0ba50d3 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go @@ -0,0 +1,498 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package spanner + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + spannerpb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + CreateSession []gax.CallOption + GetSession []gax.CallOption + ListSessions []gax.CallOption + DeleteSession []gax.CallOption + ExecuteSql []gax.CallOption + ExecuteStreamingSql []gax.CallOption + Read []gax.CallOption + StreamingRead []gax.CallOption + BeginTransaction []gax.CallOption + Commit []gax.CallOption + Rollback []gax.CallOption + PartitionQuery []gax.CallOption + PartitionRead []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"long_running", "long_running"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + CreateSession: retry[[2]string{"default", "idempotent"}], + GetSession: retry[[2]string{"default", "idempotent"}], + ListSessions: retry[[2]string{"default", "idempotent"}], + DeleteSession: retry[[2]string{"default", "idempotent"}], + ExecuteSql: retry[[2]string{"default", "idempotent"}], + ExecuteStreamingSql: retry[[2]string{"default", "non_idempotent"}], + Read: retry[[2]string{"default", "idempotent"}], + StreamingRead: retry[[2]string{"default", "non_idempotent"}], + BeginTransaction: retry[[2]string{"default", "idempotent"}], + Commit: retry[[2]string{"long_running", "long_running"}], + Rollback: retry[[2]string{"default", "idempotent"}], + PartitionQuery: retry[[2]string{"default", "idempotent"}], + PartitionRead: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Cloud Spanner API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client spannerpb.SpannerClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new spanner client. +// +// Cloud Spanner API +// +// The Cloud Spanner API can be used to manage sessions and execute +// transactions on data stored in Cloud Spanner databases. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: spannerpb.NewSpannerClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// CreateSession creates a new session. A session can be used to perform +// transactions that read and/or modify data in a Cloud Spanner database. +// Sessions are meant to be reused for many consecutive +// transactions. +// +// Sessions can only execute one transaction at a time. To execute +// multiple concurrent read-write/write-only transactions, create +// multiple sessions. Note that standalone reads and queries use a +// transaction internally, and count toward the one transaction +// limit. +// +// Cloud Spanner limits the number of sessions that can exist at any given +// time; thus, it is a good idea to delete idle and/or unneeded sessions. +// Aside from explicit deletes, Cloud Spanner can delete sessions for which no +// operations are sent for more than an hour. If a session is deleted, +// requests to it return NOT_FOUND. +// +// Idle sessions can be kept alive by sending a trivial SQL query +// periodically, e.g., "SELECT 1". +func (c *Client) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSession[0:len(c.CallOptions.CreateSession):len(c.CallOptions.CreateSession)], opts...) + var resp *spannerpb.Session + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateSession(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetSession gets a session. Returns NOT_FOUND if the session does not exist. +// This is mainly useful for determining whether a session is still +// alive. +func (c *Client) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetSession[0:len(c.CallOptions.GetSession):len(c.CallOptions.GetSession)], opts...) + var resp *spannerpb.Session + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetSession(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListSessions lists all sessions in a given database. +func (c *Client) ListSessions(ctx context.Context, req *spannerpb.ListSessionsRequest, opts ...gax.CallOption) *SessionIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListSessions[0:len(c.CallOptions.ListSessions):len(c.CallOptions.ListSessions)], opts...) + it := &SessionIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*spannerpb.Session, string, error) { + var resp *spannerpb.ListSessionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListSessions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Sessions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteSession ends a session, releasing server resources associated with it. +func (c *Client) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSession[0:len(c.CallOptions.DeleteSession):len(c.CallOptions.DeleteSession)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteSession(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ExecuteSql executes an SQL query, returning all rows in a single reply. This +// method cannot be used to return a result set larger than 10 MiB; +// if the query yields more data than that, the query fails with +// a FAILED_PRECONDITION error. +// +// Queries inside read-write transactions might return ABORTED. If +// this occurs, the application should restart the transaction from +// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. +// +// Larger result sets can be fetched in streaming fashion by calling +// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. +func (c *Client) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ExecuteSql[0:len(c.CallOptions.ExecuteSql):len(c.CallOptions.ExecuteSql)], opts...) + var resp *spannerpb.ResultSet + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ExecuteSql(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ExecuteStreamingSql like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result +// set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there +// is no limit on the size of the returned result set. However, no +// individual row in the result set can exceed 100 MiB, and no +// column value can exceed 10 MiB. +func (c *Client) ExecuteStreamingSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (spannerpb.Spanner_ExecuteStreamingSqlClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ExecuteStreamingSql[0:len(c.CallOptions.ExecuteStreamingSql):len(c.CallOptions.ExecuteStreamingSql)], opts...) + var resp spannerpb.Spanner_ExecuteStreamingSqlClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ExecuteStreamingSql(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Read reads rows from the database using key lookups and scans, as a +// simple key/value style alternative to +// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to +// return a result set larger than 10 MiB; if the read matches more +// data than that, the read fails with a FAILED_PRECONDITION +// error. +// +// Reads inside read-write transactions might return ABORTED. If +// this occurs, the application should restart the transaction from +// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. +// +// Larger result sets can be yielded in streaming fashion by calling +// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. +func (c *Client) Read(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Read[0:len(c.CallOptions.Read):len(c.CallOptions.Read)], opts...) + var resp *spannerpb.ResultSet + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Read(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StreamingRead like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a +// stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the +// size of the returned result set. However, no individual row in +// the result set can exceed 100 MiB, and no column value can exceed +// 10 MiB. +func (c *Client) StreamingRead(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (spannerpb.Spanner_StreamingReadClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingRead[0:len(c.CallOptions.StreamingRead):len(c.CallOptions.StreamingRead)], opts...) + var resp spannerpb.Spanner_StreamingReadClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StreamingRead(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// BeginTransaction begins a new transaction. This step can often be skipped: +// [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and +// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a +// side-effect. +func (c *Client) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest, opts ...gax.CallOption) (*spannerpb.Transaction, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...) + var resp *spannerpb.Transaction + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Commit commits a transaction. The request includes the mutations to be +// applied to rows in the database. +// +// Commit might return an ABORTED error. This can occur at any time; +// commonly, the cause is conflicts with concurrent +// transactions. However, it can also happen for a variety of other +// reasons. If Commit returns ABORTED, the caller should re-attempt +// the transaction from the beginning, re-using the same session. +func (c *Client) Commit(ctx context.Context, req *spannerpb.CommitRequest, opts ...gax.CallOption) (*spannerpb.CommitResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...) + var resp *spannerpb.CommitResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Commit(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Rollback rolls back a transaction, releasing any locks it holds. It is a good +// idea to call this for any transaction that includes one or more +// [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and +// ultimately decides not to commit. +// +// Rollback returns OK if it successfully aborts the transaction, the +// transaction was already aborted, or the transaction is not +// found. Rollback never returns ABORTED. +func (c *Client) Rollback(ctx context.Context, req *spannerpb.RollbackRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.Rollback(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// PartitionQuery creates a set of partition tokens that can be used to execute a query +// operation in parallel. Each of the returned partition tokens can be used +// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset +// of the query result to read. The same session and read-only transaction +// must be used by the PartitionQueryRequest used to create the +// partition tokens and the ExecuteSqlRequests that use the partition tokens. +// Partition tokens become invalid when the session used to create them +// is deleted or begins a new transaction. +func (c *Client) PartitionQuery(ctx context.Context, req *spannerpb.PartitionQueryRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.PartitionQuery[0:len(c.CallOptions.PartitionQuery):len(c.CallOptions.PartitionQuery)], opts...) + var resp *spannerpb.PartitionResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.PartitionQuery(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// PartitionRead creates a set of partition tokens that can be used to execute a read +// operation in parallel. Each of the returned partition tokens can be used +// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read +// result to read. The same session and read-only transaction must be used by +// the PartitionReadRequest used to create the partition tokens and the +// ReadRequests that use the partition tokens. +// Partition tokens become invalid when the session used to create them +// is deleted or begins a new transaction. +func (c *Client) PartitionRead(ctx context.Context, req *spannerpb.PartitionReadRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.PartitionRead[0:len(c.CallOptions.PartitionRead):len(c.CallOptions.PartitionRead)], opts...) + var resp *spannerpb.PartitionResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.PartitionRead(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SessionIterator manages a stream of *spannerpb.Session. +type SessionIterator struct { + items []*spannerpb.Session + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*spannerpb.Session, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SessionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SessionIterator) Next() (*spannerpb.Session, error) { + var item *spannerpb.Session + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SessionIterator) bufLen() int { + return len(it.items) +} + +func (it *SessionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go new file mode 100644 index 0000000..6806af9 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go @@ -0,0 +1,290 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package spanner_test + +import ( + "io" + + "cloud.google.com/go/spanner/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + spannerpb "google.golang.org/genproto/googleapis/spanner/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_CreateSession() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.CreateSessionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSession(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetSession() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.GetSessionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSession(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListSessions() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ListSessionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSessions(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteSession() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.DeleteSessionRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSession(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_ExecuteSql() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ExecuteSqlRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ExecuteSql(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ExecuteStreamingSql() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ExecuteSqlRequest{ + // TODO: Fill request struct fields. + } + stream, err := c.ExecuteStreamingSql(ctx, req) + if err != nil { + // TODO: Handle error. + } + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_Read() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ReadRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Read(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_StreamingRead() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ReadRequest{ + // TODO: Fill request struct fields. + } + stream, err := c.StreamingRead(ctx, req) + if err != nil { + // TODO: Handle error. + } + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_BeginTransaction() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.BeginTransactionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BeginTransaction(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_Commit() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.CommitRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Commit(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_Rollback() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.RollbackRequest{ + // TODO: Fill request struct fields. + } + err = c.Rollback(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_PartitionQuery() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.PartitionQueryRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.PartitionQuery(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_PartitionRead() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.PartitionReadRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.PartitionRead(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/appengine.go b/vendor/cloud.google.com/go/spanner/appengine.go new file mode 100644 index 0000000..c8526f1 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/appengine.go @@ -0,0 +1,20 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package spanner + +// numChannels is the default value for NumChannels of client +const numChannels = 1 diff --git a/vendor/cloud.google.com/go/spanner/backoff.go b/vendor/cloud.google.com/go/spanner/backoff.go new file mode 100644 index 0000000..d387238 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/backoff.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math/rand" + "time" +) + +const ( + // minBackoff is the minimum backoff used by default. + minBackoff = 1 * time.Second + // maxBackoff is the maximum backoff used by default. + maxBackoff = 32 * time.Second + // jitter is the jitter factor. + jitter = 0.4 + // rate is the rate of exponential increase in the backoff. + rate = 1.3 +) + +var defaultBackoff = exponentialBackoff{minBackoff, maxBackoff} + +type exponentialBackoff struct { + min, max time.Duration +} + +// delay calculates the delay that should happen at n-th +// exponential backoff in a series. +func (b exponentialBackoff) delay(retries int) time.Duration { + min, max := float64(b.min), float64(b.max) + delay := min + for delay < max && retries > 0 { + delay *= rate + retries-- + } + if delay > max { + delay = max + } + delay -= delay * jitter * rand.Float64() + if delay < min { + delay = min + } + return time.Duration(delay) +} diff --git a/vendor/cloud.google.com/go/spanner/backoff_test.go b/vendor/cloud.google.com/go/spanner/backoff_test.go new file mode 100644 index 0000000..7a0314e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/backoff_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "time" + + "testing" +) + +// Test if exponential backoff helper can produce correct series of +// retry delays. +func TestBackoff(t *testing.T) { + b := exponentialBackoff{minBackoff, maxBackoff} + tests := []struct { + retries int + min time.Duration + max time.Duration + }{ + { + retries: 0, + min: minBackoff, + max: minBackoff, + }, + { + retries: 1, + min: minBackoff, + max: time.Duration(rate * float64(minBackoff)), + }, + { + retries: 3, + min: time.Duration(math.Pow(rate, 3) * (1 - jitter) * float64(minBackoff)), + max: time.Duration(math.Pow(rate, 3) * float64(minBackoff)), + }, + { + retries: 1000, + min: time.Duration((1 - jitter) * float64(maxBackoff)), + max: maxBackoff, + }, + } + for _, test := range tests { + got := b.delay(test.retries) + if float64(got) < float64(test.min) || float64(got) > float64(test.max) { + t.Errorf("delay(%v) = %v, want in range [%v, %v]", test.retries, got, test.min, test.max) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/batch.go b/vendor/cloud.google.com/go/spanner/batch.go new file mode 100644 index 0000000..9c25f8e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/batch.go @@ -0,0 +1,345 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "encoding/gob" + "log" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// BatchReadOnlyTransaction is a ReadOnlyTransaction that allows for exporting +// arbitrarily large amounts of data from Cloud Spanner databases. +// BatchReadOnlyTransaction partitions a read/query request. Read/query request +// can then be executed independently over each partition while observing the +// same snapshot of the database. BatchReadOnlyTransaction can also be shared +// across multiple clients by passing around the BatchReadOnlyTransactionID and +// then recreating the transaction using Client.BatchReadOnlyTransactionFromID. +// +// Note: if a client is used only to run partitions, you can +// create it using a ClientConfig with both MinOpened and MaxIdle set to +// zero to avoid creating unnecessary sessions. You can also avoid excess +// gRPC channels by setting ClientConfig.NumChannels to the number of +// concurrently active BatchReadOnlyTransactions you expect to have. +type BatchReadOnlyTransaction struct { + ReadOnlyTransaction + ID BatchReadOnlyTransactionID +} + +// BatchReadOnlyTransactionID is a unique identifier for a +// BatchReadOnlyTransaction. It can be used to re-create a +// BatchReadOnlyTransaction on a different machine or process by calling +// Client.BatchReadOnlyTransactionFromID. +type BatchReadOnlyTransactionID struct { + // unique ID for the transaction. + tid transactionID + // sid is the id of the Cloud Spanner session used for this transaction. + sid string + // rts is the read timestamp of this transaction. + rts time.Time +} + +// Partition defines a segment of data to be read in a batch read or query. A +// partition can be serialized and processed across several different machines +// or processes. +type Partition struct { + pt []byte + qreq *sppb.ExecuteSqlRequest + rreq *sppb.ReadRequest +} + +// PartitionOptions specifies options for a PartitionQueryRequest and +// PartitionReadRequest. See +// https://godoc.org/google.golang.org/genproto/googleapis/spanner/v1#PartitionOptions +// for more details. +type PartitionOptions struct { + // The desired data size for each partition generated. + PartitionBytes int64 + // The desired maximum number of partitions to return. + MaxPartitions int64 +} + +// toProto converts a spanner.PartitionOptions into a sppb.PartitionOptions +func (opt PartitionOptions) toProto() *sppb.PartitionOptions { + return &sppb.PartitionOptions{ + PartitionSizeBytes: opt.PartitionBytes, + MaxPartitions: opt.MaxPartitions, + } +} + +// PartitionRead returns a list of Partitions that can be used to read rows from +// the database. These partitions can be executed across multiple processes, +// even across different machines. The partition size and count hints can be +// configured using PartitionOptions. +func (t *BatchReadOnlyTransaction) PartitionRead(ctx context.Context, table string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error) { + return t.PartitionReadUsingIndex(ctx, table, "", keys, columns, opt) +} + +// PartitionReadUsingIndex returns a list of Partitions that can be used to read +// rows from the database using an index. +func (t *BatchReadOnlyTransaction) PartitionReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error) { + sh, ts, err := t.acquire(ctx) + if err != nil { + return nil, err + } + sid, client := sh.getID(), sh.getClient() + var ( + kset *sppb.KeySet + resp *sppb.PartitionResponse + partitions []*Partition + ) + kset, err = keys.keySetProto() + // request Partitions + if err != nil { + return nil, err + } + resp, err = client.PartitionRead(ctx, &sppb.PartitionReadRequest{ + Session: sid, + Transaction: ts, + Table: table, + Index: index, + Columns: columns, + KeySet: kset, + PartitionOptions: opt.toProto(), + }) + // prepare ReadRequest + req := &sppb.ReadRequest{ + Session: sid, + Transaction: ts, + Table: table, + Index: index, + Columns: columns, + KeySet: kset, + } + // generate Partitions + for _, p := range resp.GetPartitions() { + partitions = append(partitions, &Partition{ + pt: p.PartitionToken, + rreq: req, + }) + } + return partitions, err +} + +// PartitionQuery returns a list of Partitions that can be used to execute a query against the database. +func (t *BatchReadOnlyTransaction) PartitionQuery(ctx context.Context, statement Statement, opt PartitionOptions) ([]*Partition, error) { + sh, ts, err := t.acquire(ctx) + if err != nil { + return nil, err + } + sid, client := sh.getID(), sh.getClient() + var ( + resp *sppb.PartitionResponse + partitions []*Partition + ) + // request Partitions + req := &sppb.PartitionQueryRequest{ + Session: sid, + Transaction: ts, + Sql: statement.SQL, + PartitionOptions: opt.toProto(), + } + if err := statement.bindParams(req); err != nil { + return nil, err + } + resp, err = client.PartitionQuery(ctx, req) + // prepare ExecuteSqlRequest + r := &sppb.ExecuteSqlRequest{ + Session: sid, + Transaction: ts, + Sql: statement.SQL, + } + if err := statement.bindParams(r); err != nil { + return nil, err + } + // generate Partitions + for _, p := range resp.GetPartitions() { + partitions = append(partitions, &Partition{ + pt: p.PartitionToken, + qreq: r, + }) + } + return partitions, err +} + +// release implements txReadEnv.release, noop. +func (t *BatchReadOnlyTransaction) release(err error) { +} + +// setTimestamp implements txReadEnv.setTimestamp, noop. +// read timestamp is ready on txn initialization, avoid contending writing to it with future partitions. +func (t *BatchReadOnlyTransaction) setTimestamp(ts time.Time) { +} + +// Close marks the txn as closed. +func (t *BatchReadOnlyTransaction) Close() { + t.mu.Lock() + defer t.mu.Unlock() + t.state = txClosed +} + +// Cleanup cleans up all the resources used by this transaction and makes +// it unusable. Once this method is invoked, the transaction is no longer +// usable anywhere, including other clients/processes with which this +// transaction was shared. +// +// Calling Cleanup is optional, but recommended. If Cleanup is not called, the +// transaction's resources will be freed when the session expires on the backend and +// is deleted. For more information about recycled sessions, see +// https://cloud.google.com/spanner/docs/sessions. +func (t *BatchReadOnlyTransaction) Cleanup(ctx context.Context) { + t.Close() + t.mu.Lock() + defer t.mu.Unlock() + sh := t.sh + if sh == nil { + return + } + t.sh = nil + sid, client := sh.getID(), sh.getClient() + err := runRetryable(ctx, func(ctx context.Context) error { + _, e := client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: sid}) + return e + }) + if err != nil { + log.Printf("Failed to delete session %v. Error: %v", sid, err) + } +} + +// Execute runs a single Partition obtained from PartitionRead or PartitionQuery. +func (t *BatchReadOnlyTransaction) Execute(ctx context.Context, p *Partition) *RowIterator { + var ( + sh *sessionHandle + err error + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + ) + if sh, _, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + client := sh.getClient() + if client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + // read or query partition + if p.rreq != nil { + p.rreq.PartitionToken = p.pt + rpc = func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + p.rreq.ResumeToken = resumeToken + return client.StreamingRead(ctx, p.rreq) + } + } else { + p.qreq.PartitionToken = p.pt + rpc = func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + p.qreq.ResumeToken = resumeToken + return client.ExecuteStreamingSql(ctx, p.qreq) + } + } + return stream( + contextWithOutgoingMetadata(ctx, sh.getMetadata()), + rpc, + t.setTimestamp, + t.release) +} + +// MarshalBinary implements BinaryMarshaler. +func (tid BatchReadOnlyTransactionID) MarshalBinary() (data []byte, err error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(tid.tid); err != nil { + return nil, err + } + if err := enc.Encode(tid.sid); err != nil { + return nil, err + } + if err := enc.Encode(tid.rts); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalBinary implements BinaryUnmarshaler. +func (tid *BatchReadOnlyTransactionID) UnmarshalBinary(data []byte) error { + dec := gob.NewDecoder(bytes.NewReader(data)) + if err := dec.Decode(&tid.tid); err != nil { + return err + } + if err := dec.Decode(&tid.sid); err != nil { + return err + } + return dec.Decode(&tid.rts) +} + +// MarshalBinary implements BinaryMarshaler. +func (p Partition) MarshalBinary() (data []byte, err error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(p.pt); err != nil { + return nil, err + } + var isReadPartition bool + var req proto.Message + if p.rreq != nil { + isReadPartition = true + req = p.rreq + } else { + isReadPartition = false + req = p.qreq + } + if err := enc.Encode(isReadPartition); err != nil { + return nil, err + } + if data, err = proto.Marshal(req); err != nil { + return nil, err + } + if err := enc.Encode(data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalBinary implements BinaryUnmarshaler. +func (p *Partition) UnmarshalBinary(data []byte) error { + var ( + isReadPartition bool + d []byte + err error + ) + dec := gob.NewDecoder(bytes.NewReader(data)) + if err := dec.Decode(&p.pt); err != nil { + return err + } + if err := dec.Decode(&isReadPartition); err != nil { + return err + } + if err := dec.Decode(&d); err != nil { + return err + } + if isReadPartition { + p.rreq = &sppb.ReadRequest{} + err = proto.Unmarshal(d, p.rreq) + } else { + p.qreq = &sppb.ExecuteSqlRequest{} + err = proto.Unmarshal(d, p.qreq) + } + return err +} diff --git a/vendor/cloud.google.com/go/spanner/batch_test.go b/vendor/cloud.google.com/go/spanner/batch_test.go new file mode 100644 index 0000000..e30c140 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/batch_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "testing" + "time" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +func TestPartitionRoundTrip(t *testing.T) { + t.Parallel() + for i, want := range []Partition{ + {rreq: &sppb.ReadRequest{Table: "t"}}, + {qreq: &sppb.ExecuteSqlRequest{Sql: "sql"}}, + } { + got := serdesPartition(t, i, &want) + if !testEqual(got, want) { + t.Errorf("got: %#v\nwant:%#v", got, want) + } + } +} + +func TestBROTIDRoundTrip(t *testing.T) { + t.Parallel() + tm := time.Now() + want := BatchReadOnlyTransactionID{ + tid: []byte("tid"), + sid: "sid", + rts: tm, + } + data, err := want.MarshalBinary() + if err != nil { + t.Fatal(err) + } + var got BatchReadOnlyTransactionID + if err := got.UnmarshalBinary(data); err != nil { + t.Fatal(err) + } + if !testEqual(got, want) { + t.Errorf("got: %#v\nwant:%#v", got, want) + } +} + +// serdesPartition is a helper that serialize a Partition then deserialize it +func serdesPartition(t *testing.T, i int, p1 *Partition) (p2 Partition) { + var ( + data []byte + err error + ) + if data, err = p1.MarshalBinary(); err != nil { + t.Fatalf("#%d: encoding failed %v", i, err) + } + if err = p2.UnmarshalBinary(data); err != nil { + t.Fatalf("#%d: decoding failed %v", i, err) + } + return p2 +} diff --git a/vendor/cloud.google.com/go/spanner/client.go b/vendor/cloud.google.com/go/spanner/client.go new file mode 100644 index 0000000..eaf16fa --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/client.go @@ -0,0 +1,442 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "log" + "regexp" + "sync/atomic" + "time" + + "cloud.google.com/go/internal/version" + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const ( + endpoint = "spanner.googleapis.com:443" + + // resourcePrefixHeader is the name of the metadata header used to indicate + // the resource being operated on. + resourcePrefixHeader = "google-cloud-resource-prefix" + // xGoogHeaderKey is the name of the metadata header used to indicate client + // information. + xGoogHeaderKey = "x-goog-api-client" +) + +const ( + // Scope is the scope for Cloud Spanner Data API. + Scope = "https://www.googleapis.com/auth/spanner.data" + + // AdminScope is the scope for Cloud Spanner Admin APIs. + AdminScope = "https://www.googleapis.com/auth/spanner.admin" +) + +var ( + validDBPattern = regexp.MustCompile("^projects/[^/]+/instances/[^/]+/databases/[^/]+$") + xGoogHeaderVal = fmt.Sprintf("gl-go/%s gccl/%s grpc/%s", version.Go(), version.Repo, grpc.Version) +) + +func validDatabaseName(db string) error { + if matched := validDBPattern.MatchString(db); !matched { + return fmt.Errorf("database name %q should conform to pattern %q", + db, validDBPattern.String()) + } + return nil +} + +// Client is a client for reading and writing data to a Cloud Spanner database. A +// client is safe to use concurrently, except for its Close method. +type Client struct { + // rr must be accessed through atomic operations. + rr uint32 + conns []*grpc.ClientConn + clients []sppb.SpannerClient + database string + // Metadata to be sent with each request. + md metadata.MD + idleSessions *sessionPool +} + +// ClientConfig has configurations for the client. +type ClientConfig struct { + // NumChannels is the number of gRPC channels. + // If zero, a reasonable default is used based on the execution environment. + NumChannels int + co []option.ClientOption + // SessionPoolConfig is the configuration for session pool. + SessionPoolConfig +} + +// errDial returns error for dialing to Cloud Spanner. +func errDial(ci int, err error) error { + e := toSpannerError(err).(*Error) + e.decorate(fmt.Sprintf("dialing fails for channel[%v]", ci)) + return e +} + +func contextWithOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context { + existing, ok := metadata.FromOutgoingContext(ctx) + if ok { + md = metadata.Join(existing, md) + } + return metadata.NewOutgoingContext(ctx, md) +} + +// NewClient creates a client to a database. A valid database name has the +// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses a default +// configuration. +func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) { + return NewClientWithConfig(ctx, database, ClientConfig{}, opts...) +} + +// NewClientWithConfig creates a client to a database. A valid database name has the +// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. +func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (c *Client, err error) { + ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.NewClient") + defer func() { traceEndSpan(ctx, err) }() + + // Validate database path. + if err := validDatabaseName(database); err != nil { + return nil, err + } + c = &Client{ + database: database, + md: metadata.Pairs( + resourcePrefixHeader, database, + xGoogHeaderKey, xGoogHeaderVal), + } + allOpts := []option.ClientOption{ + option.WithEndpoint(endpoint), + option.WithScopes(Scope), + option.WithGRPCDialOption( + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(100<<20), + grpc.MaxCallRecvMsgSize(100<<20), + ), + ), + } + allOpts = append(allOpts, opts...) + // Prepare gRPC channels. + if config.NumChannels == 0 { + config.NumChannels = numChannels + } + // Default MaxOpened sessions + if config.MaxOpened == 0 { + config.MaxOpened = uint64(config.NumChannels * 100) + } + if config.MaxBurst == 0 { + config.MaxBurst = 10 + } + for i := 0; i < config.NumChannels; i++ { + conn, err := gtransport.Dial(ctx, allOpts...) + if err != nil { + return nil, errDial(i, err) + } + c.conns = append(c.conns, conn) + c.clients = append(c.clients, sppb.NewSpannerClient(conn)) + } + // Prepare session pool. + config.SessionPoolConfig.getRPCClient = func() (sppb.SpannerClient, error) { + // TODO: support more loadbalancing options. + return c.rrNext(), nil + } + sp, err := newSessionPool(database, config.SessionPoolConfig, c.md) + if err != nil { + c.Close() + return nil, err + } + c.idleSessions = sp + return c, nil +} + +// rrNext returns the next available Cloud Spanner RPC client in a round-robin manner. +func (c *Client) rrNext() sppb.SpannerClient { + return c.clients[atomic.AddUint32(&c.rr, 1)%uint32(len(c.clients))] +} + +// Close closes the client. +func (c *Client) Close() { + if c.idleSessions != nil { + c.idleSessions.close() + } + for _, conn := range c.conns { + conn.Close() + } +} + +// Single provides a read-only snapshot transaction optimized for the case +// where only a single read or query is needed. This is more efficient than +// using ReadOnlyTransaction() for a single read or query. +// +// Single will use a strong TimestampBound by default. Use +// ReadOnlyTransaction.WithTimestampBound to specify a different +// TimestampBound. A non-strong bound can be used to reduce latency, or +// "time-travel" to prior versions of the database, see the documentation of +// TimestampBound for details. +func (c *Client) Single() *ReadOnlyTransaction { + t := &ReadOnlyTransaction{singleUse: true, sp: c.idleSessions} + t.txReadOnly.txReadEnv = t + return t +} + +// ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for +// multiple reads from the database. You must call Close() when the +// ReadOnlyTransaction is no longer needed to release resources on the server. +// +// ReadOnlyTransaction will use a strong TimestampBound by default. Use +// ReadOnlyTransaction.WithTimestampBound to specify a different +// TimestampBound. A non-strong bound can be used to reduce latency, or +// "time-travel" to prior versions of the database, see the documentation of +// TimestampBound for details. +func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction { + t := &ReadOnlyTransaction{ + singleUse: false, + sp: c.idleSessions, + txReadyOrClosed: make(chan struct{}), + } + t.txReadOnly.txReadEnv = t + return t +} + +// BatchReadOnlyTransaction returns a BatchReadOnlyTransaction that can be used +// for partitioned reads or queries from a snapshot of the database. This is +// useful in batch processing pipelines where one wants to divide the work of +// reading from the database across multiple machines. +// +// Note: This transaction does not use the underlying session pool but creates a +// new session each time, and the session is reused across clients. +// +// You should call Close() after the txn is no longer needed on local +// client, and call Cleanup() when the txn is finished for all clients, to free +// the session. +func (c *Client) BatchReadOnlyTransaction(ctx context.Context, tb TimestampBound) (*BatchReadOnlyTransaction, error) { + var ( + tx transactionID + rts time.Time + s *session + sh *sessionHandle + err error + ) + defer func() { + if err != nil && sh != nil { + e := runRetryable(ctx, func(ctx context.Context) error { + _, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()}) + return e + }) + if e != nil { + log.Printf("Failed to delete session %v. Error: %v", s.getID(), e) + } + } + }() + // create session + sc := c.rrNext() + err = runRetryable(ctx, func(ctx context.Context) error { + sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: c.database}) + if e != nil { + return e + } + // If no error, construct the new session. + s = &session{valid: true, client: sc, id: sid.Name, createTime: time.Now(), md: c.md} + return nil + }) + if err != nil { + return nil, err + } + sh = &sessionHandle{session: s} + // begin transaction + err = runRetryable(contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error { + res, e := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sh.getID(), + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(tb, true), + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + if res.ReadTimestamp != nil { + rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos)) + } + return nil + }) + if err != nil { + return nil, err + } + + t := &BatchReadOnlyTransaction{ + ReadOnlyTransaction: ReadOnlyTransaction{ + tx: tx, + txReadyOrClosed: make(chan struct{}), + state: txActive, + sh: sh, + rts: rts, + }, + ID: BatchReadOnlyTransactionID{ + tid: tx, + sid: sh.getID(), + rts: rts, + }, + } + t.txReadOnly.txReadEnv = t + return t, nil +} + +// BatchReadOnlyTransactionFromID reconstruct a BatchReadOnlyTransaction from BatchReadOnlyTransactionID +func (c *Client) BatchReadOnlyTransactionFromID(tid BatchReadOnlyTransactionID) *BatchReadOnlyTransaction { + sc := c.rrNext() + s := &session{valid: true, client: sc, id: tid.sid, createTime: time.Now(), md: c.md} + sh := &sessionHandle{session: s} + + t := &BatchReadOnlyTransaction{ + ReadOnlyTransaction: ReadOnlyTransaction{ + tx: tid.tid, + txReadyOrClosed: make(chan struct{}), + state: txActive, + sh: sh, + rts: tid.rts, + }, + ID: tid, + } + t.txReadOnly.txReadEnv = t + return t +} + +type transactionInProgressKey struct{} + +func checkNestedTxn(ctx context.Context) error { + if ctx.Value(transactionInProgressKey{}) != nil { + return spannerErrorf(codes.FailedPrecondition, "Cloud Spanner does not support nested transactions") + } + return nil +} + +// ReadWriteTransaction executes a read-write transaction, with retries as +// necessary. +// +// The function f will be called one or more times. It must not maintain +// any state between calls. +// +// If the transaction cannot be committed or if f returns an IsAborted error, +// ReadWriteTransaction will call f again. It will continue to call f until the +// transaction can be committed or the Context times out or is cancelled. If f +// returns an error other than IsAborted, ReadWriteTransaction will abort the +// transaction and return the error. +// +// To limit the number of retries, set a deadline on the Context rather than +// using a fixed limit on the number of attempts. ReadWriteTransaction will +// retry as needed until that deadline is met. +func (c *Client) ReadWriteTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (commitTimestamp time.Time, err error) { + ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.ReadWriteTransaction") + defer func() { traceEndSpan(ctx, err) }() + if err := checkNestedTxn(ctx); err != nil { + return time.Time{}, err + } + var ( + ts time.Time + sh *sessionHandle + ) + err = runRetryableNoWrap(ctx, func(ctx context.Context) error { + var ( + err error + t *ReadWriteTransaction + ) + if sh == nil || sh.getID() == "" || sh.getClient() == nil { + // Session handle hasn't been allocated or has been destroyed. + sh, err = c.idleSessions.takeWriteSession(ctx) + if err != nil { + // If session retrieval fails, just fail the transaction. + return err + } + t = &ReadWriteTransaction{ + sh: sh, + tx: sh.getTransactionID(), + } + } else { + t = &ReadWriteTransaction{ + sh: sh, + } + } + t.txReadOnly.txReadEnv = t + tracePrintf(ctx, map[string]interface{}{"transactionID": string(sh.getTransactionID())}, + "Starting transaction attempt") + if err = t.begin(ctx); err != nil { + // Mask error from begin operation as retryable error. + return errRetry(err) + } + ts, err = t.runInTransaction(ctx, f) + return err + }) + if sh != nil { + sh.recycle() + } + return ts, err +} + +// applyOption controls the behavior of Client.Apply. +type applyOption struct { + // If atLeastOnce == true, Client.Apply will execute the mutations on Cloud Spanner at least once. + atLeastOnce bool +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption func(*applyOption) + +// ApplyAtLeastOnce returns an ApplyOption that removes replay protection. +// +// With this option, Apply may attempt to apply mutations more than once; if +// the mutations are not idempotent, this may lead to a failure being reported +// when the mutation was applied more than once. For example, an insert may +// fail with ALREADY_EXISTS even though the row did not exist before Apply was +// called. For this reason, most users of the library will prefer not to use +// this option. However, ApplyAtLeastOnce requires only a single RPC, whereas +// Apply's default replay protection may require an additional RPC. So this +// option may be appropriate for latency sensitive and/or high throughput blind +// writing. +func ApplyAtLeastOnce() ApplyOption { + return func(ao *applyOption) { + ao.atLeastOnce = true + } +} + +// Apply applies a list of mutations atomically to the database. +func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (commitTimestamp time.Time, err error) { + ao := &applyOption{} + for _, opt := range opts { + opt(ao) + } + if !ao.atLeastOnce { + return c.ReadWriteTransaction(ctx, func(ctx context.Context, t *ReadWriteTransaction) error { + t.BufferWrite(ms) + return nil + }) + } + + ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Apply") + defer func() { traceEndSpan(ctx, err) }() + t := &writeOnlyTransaction{c.idleSessions} + return t.applyAtLeastOnce(ctx, ms...) +} diff --git a/vendor/cloud.google.com/go/spanner/client_test.go b/vendor/cloud.google.com/go/spanner/client_test.go new file mode 100644 index 0000000..951d958 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/client_test.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "strings" + "testing" +) + +// Test validDatabaseName() +func TestValidDatabaseName(t *testing.T) { + validDbURI := "projects/spanner-cloud-test/instances/foo/databases/foodb" + invalidDbUris := []string{ + // Completely wrong DB URI. + "foobarDB", + // Project ID contains "/". + "projects/spanner-cloud/test/instances/foo/databases/foodb", + // No instance ID. + "projects/spanner-cloud-test/instances//databases/foodb", + } + if err := validDatabaseName(validDbURI); err != nil { + t.Errorf("validateDatabaseName(%q) = %v, want nil", validDbURI, err) + } + for _, d := range invalidDbUris { + if err, wantErr := validDatabaseName(d), "should conform to pattern"; !strings.Contains(err.Error(), wantErr) { + t.Errorf("validateDatabaseName(%q) = %q, want error pattern %q", validDbURI, err, wantErr) + } + } +} + +func TestReadOnlyTransactionClose(t *testing.T) { + // Closing a ReadOnlyTransaction shouldn't panic. + c := &Client{} + tx := c.ReadOnlyTransaction() + tx.Close() +} diff --git a/vendor/cloud.google.com/go/spanner/doc.go b/vendor/cloud.google.com/go/spanner/doc.go new file mode 100644 index 0000000..97a3c9a --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/doc.go @@ -0,0 +1,316 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package spanner provides a client for reading and writing to Cloud Spanner +databases. See the packages under admin for clients that operate on databases +and instances. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + +See https://cloud.google.com/spanner/docs/getting-started/go/ for an introduction +to Cloud Spanner and additional help on using this API. + +Creating a Client + +To start working with this package, create a client that refers to the database +of interest: + + ctx := context.Background() + client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + +Remember to close the client after use to free up the sessions in the session +pool. + + +Simple Reads and Writes + +Two Client methods, Apply and Single, work well for simple reads and writes. As +a quick introduction, here we write a new row to the database and read it back: + + _, err := client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) + if err != nil { + // TODO: Handle error. + } + +All the methods used above are discussed in more detail below. + + +Keys + +Every Cloud Spanner row has a unique key, composed of one or more columns. +Construct keys with a literal of type Key: + + key1 := spanner.Key{"alice"} + + +KeyRanges + +The keys of a Cloud Spanner table are ordered. You can specify ranges of keys +using the KeyRange type: + + kr1 := spanner.KeyRange{Start: key1, End: key2} + +By default, a KeyRange includes its start key but not its end key. Use +the Kind field to specify other boundary conditions: + + // include both keys + kr2 := spanner.KeyRange{Start: key1, End: key2, Kind: spanner.ClosedClosed} + + +KeySets + +A KeySet represents a set of keys. A single Key or KeyRange can act as a KeySet. Use +the KeySets function to build the union of several KeySets: + + ks1 := spanner.KeySets(key1, key2, kr1, kr2) + +AllKeys returns a KeySet that refers to all the keys in a table: + + ks2 := spanner.AllKeys() + + +Transactions + +All Cloud Spanner reads and writes occur inside transactions. There are two +types of transactions, read-only and read-write. Read-only transactions cannot +change the database, do not acquire locks, and may access either the current +database state or states in the past. Read-write transactions can read the +database before writing to it, and always apply to the most recent database +state. + + +Single Reads + +The simplest and fastest transaction is a ReadOnlyTransaction that supports a +single read operation. Use Client.Single to create such a transaction. You can +chain the call to Single with a call to a Read method. + +When you only want one row whose key you know, use ReadRow. Provide the table +name, key, and the columns you want to read: + + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + +Read multiple rows with the Read method. It takes a table name, KeySet, and list +of columns: + + iter := client.Single().Read(ctx, "Accounts", keyset1, columns) + +Read returns a RowIterator. You can call the Do method on the iterator and pass +a callback: + + err := iter.Do(func(row *Row) error { + // TODO: use row + return nil + }) + +RowIterator also follows the standard pattern for the Google +Cloud Client Libraries: + + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use row + } + +Always call Stop when you finish using an iterator this way, whether or not you +iterate to the end. (Failing to call Stop could lead you to exhaust the +database's session quota.) + +To read rows with an index, use ReadUsingIndex. + +Statements + +The most general form of reading uses SQL statements. Construct a Statement +with NewStatement, setting any parameters using the Statement's Params map: + + stmt := spanner.NewStatement("SELECT First, Last FROM SINGERS WHERE Last >= @start") + stmt.Params["start"] = "Dylan" + +You can also construct a Statement directly with a struct literal, providing +your own map of parameters. + +Use the Query method to run the statement and obtain an iterator: + + iter := client.Single().Query(ctx, stmt) + + +Rows + +Once you have a Row, via an iterator or a call to ReadRow, you can extract +column values in several ways. Pass in a pointer to a Go variable of the +appropriate type when you extract a value. + +You can extract by column position or name: + + err := row.Column(0, &name) + err = row.ColumnByName("balance", &balance) + +You can extract all the columns at once: + + err = row.Columns(&name, &balance) + +Or you can define a Go struct that corresponds to your columns, and extract +into that: + + var s struct { Name string; Balance int64 } + err = row.ToStruct(&s) + + +For Cloud Spanner columns that may contain NULL, use one of the NullXXX types, +like NullString: + + var ns spanner.NullString + if err := row.Column(0, &ns); err != nil { + // TODO: Handle error. + } + if ns.Valid { + fmt.Println(ns.StringVal) + } else { + fmt.Println("column is NULL") + } + + +Multiple Reads + +To perform more than one read in a transaction, use ReadOnlyTransaction: + + txn := client.ReadOnlyTransaction() + defer txn.Close() + iter := txn.Query(ctx, stmt1) + // ... + iter = txn.Query(ctx, stmt2) + // ... + +You must call Close when you are done with the transaction. + + +Timestamps and Timestamp Bounds + +Cloud Spanner read-only transactions conceptually perform all their reads at a +single moment in time, called the transaction's read timestamp. Once a read has +started, you can call ReadOnlyTransaction's Timestamp method to obtain the read +timestamp. + +By default, a transaction will pick the most recent time (a time where all +previously committed transactions are visible) for its reads. This provides the +freshest data, but may involve some delay. You can often get a quicker response +if you are willing to tolerate "stale" data. You can control the read timestamp +selected by a transaction by calling the WithTimestampBound method on the +transaction before using it. For example, to perform a query on data that is at +most one minute stale, use + + client.Single(). + WithTimestampBound(spanner.MaxStaleness(1*time.Minute)). + Query(ctx, stmt) + +See the documentation of TimestampBound for more details. + + +Mutations + +To write values to a Cloud Spanner database, construct a Mutation. The spanner +package has functions for inserting, updating and deleting rows. Except for the +Delete methods, which take a Key or KeyRange, each mutation-building function +comes in three varieties. + +One takes lists of columns and values along with the table name: + + m1 := spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"}) + +One takes a map from column names to values: + + m2 := spanner.InsertMap("Users", map[string]interface{}{ + "name": "alice", + "email": "a@example.com", + }) + +And the third accepts a struct value, and determines the columns from the +struct field names: + + type User struct { Name, Email string } + u := User{Name: "alice", Email: "a@example.com"} + m3, err := spanner.InsertStruct("Users", u) + + +Writes + +To apply a list of mutations to the database, use Apply: + + _, err := client.Apply(ctx, []*spanner.Mutation{m1, m2, m3}) + +If you need to read before writing in a single transaction, use a +ReadWriteTransaction. ReadWriteTransactions may abort and need to be retried. +You pass in a function to ReadWriteTransaction, and the client will handle the +retries automatically. Use the transaction's BufferWrite method to buffer +mutations, which will all be executed at the end of the transaction: + + _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + var balance int64 + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + // This function will be called again if this is an IsAborted error. + return err + } + if err := row.Column(0, &balance); err != nil { + return err + } + + if balance <= 10 { + return errors.New("insufficient funds in account") + } + balance -= 10 + m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) + txn.BufferWrite([]*spanner.Mutation{m}) + + // The buffered mutation will be committed. If the commit + // fails with an IsAborted error, this function will be called + // again. + return nil + }) + +Tracing + +This client has been instrumented to use OpenCensus tracing (http://opencensus.io). +To enable tracing, see "Enabling Tracing for a Program" at +https://godoc.org/go.opencensus.io/trace. OpenCensus tracing requires Go 1.8 or higher. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package spanner // import "cloud.google.com/go/spanner" diff --git a/vendor/cloud.google.com/go/spanner/errors.go b/vendor/cloud.google.com/go/spanner/errors.go new file mode 100644 index 0000000..9b53bde --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/errors.go @@ -0,0 +1,115 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Error is the structured error returned by Cloud Spanner client. +type Error struct { + // Code is the canonical error code for describing the nature of a + // particular error. + Code codes.Code + // Desc explains more details of the error. + Desc string + // trailers are the trailers returned in the response, if any. + trailers metadata.MD +} + +// Error implements error.Error. +func (e *Error) Error() string { + if e == nil { + return fmt.Sprintf("spanner: OK") + } + return fmt.Sprintf("spanner: code = %q, desc = %q", e.Code, e.Desc) +} + +// decorate decorates an existing spanner.Error with more information. +func (e *Error) decorate(info string) { + e.Desc = fmt.Sprintf("%v, %v", info, e.Desc) +} + +// spannerErrorf generates a *spanner.Error with the given error code and +// description. +func spannerErrorf(ec codes.Code, format string, args ...interface{}) error { + return &Error{ + Code: ec, + Desc: fmt.Sprintf(format, args...), + } +} + +// toSpannerError converts general Go error to *spanner.Error. +func toSpannerError(err error) error { + return toSpannerErrorWithMetadata(err, nil) +} + +// toSpannerErrorWithMetadata converts general Go error and grpc trailers to *spanner.Error. +// Note: modifies original error if trailers aren't nil +func toSpannerErrorWithMetadata(err error, trailers metadata.MD) error { + if err == nil { + return nil + } + if se, ok := err.(*Error); ok { + if trailers != nil { + se.trailers = metadata.Join(se.trailers, trailers) + } + return se + } + switch { + case err == context.DeadlineExceeded: + return &Error{codes.DeadlineExceeded, err.Error(), trailers} + case err == context.Canceled: + return &Error{codes.Canceled, err.Error(), trailers} + case grpc.Code(err) == codes.Unknown: + return &Error{codes.Unknown, err.Error(), trailers} + default: + return &Error{grpc.Code(err), grpc.ErrorDesc(err), trailers} + } +} + +// ErrCode extracts the canonical error code from a Go error. +func ErrCode(err error) codes.Code { + se, ok := toSpannerError(err).(*Error) + if !ok { + return codes.Unknown + } + return se.Code +} + +// ErrDesc extracts the Cloud Spanner error description from a Go error. +func ErrDesc(err error) string { + se, ok := toSpannerError(err).(*Error) + if !ok { + return err.Error() + } + return se.Desc +} + +// errTrailers extracts the grpc trailers if present from a Go error. +func errTrailers(err error) metadata.MD { + se, ok := err.(*Error) + if !ok { + return nil + } + return se.trailers +} diff --git a/vendor/cloud.google.com/go/spanner/errors_test.go b/vendor/cloud.google.com/go/spanner/errors_test.go new file mode 100644 index 0000000..1385ef8 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/errors_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "testing" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestToSpannerError(t *testing.T) { + for _, test := range []struct { + err error + wantCode codes.Code + }{ + {errors.New("wha?"), codes.Unknown}, + {context.Canceled, codes.Canceled}, + {context.DeadlineExceeded, codes.DeadlineExceeded}, + {status.Errorf(codes.ResourceExhausted, "so tired"), codes.ResourceExhausted}, + {spannerErrorf(codes.InvalidArgument, "bad"), codes.InvalidArgument}, + } { + err := toSpannerError(test.err) + if got, want := err.(*Error).Code, test.wantCode; got != want { + t.Errorf("%v: got %s, want %s", test.err, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/examples_test.go b/vendor/cloud.google.com/go/spanner/examples_test.go new file mode 100644 index 0000000..ce9f8f5 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/examples_test.go @@ -0,0 +1,640 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner_test + +import ( + "errors" + "fmt" + "sync" + "time" + + "cloud.google.com/go/spanner" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + const myDB = "projects/my-project/instances/my-instance/database/my-db" + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +const myDB = "projects/my-project/instances/my-instance/database/my-db" + +func ExampleNewClientWithConfig() { + ctx := context.Background() + const myDB = "projects/my-project/instances/my-instance/database/my-db" + client, err := spanner.NewClientWithConfig(ctx, myDB, spanner.ClientConfig{ + NumChannels: 10, + }) + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. + client.Close() // Close client when done. +} + +func ExampleClient_Single() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleClient_ReadOnlyTransaction() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + t := client.ReadOnlyTransaction() + defer t.Close() + // TODO: Read with t using Read, ReadRow, ReadUsingIndex, or Query. +} + +func ExampleClient_ReadWriteTransaction() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + var balance int64 + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + // This function will be called again if this is an + // IsAborted error. + return err + } + if err := row.Column(0, &balance); err != nil { + return err + } + + if balance <= 10 { + return errors.New("insufficient funds in account") + } + balance -= 10 + m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) + return txn.BufferWrite([]*spanner.Mutation{m}) + // The buffered mutation will be committed. If the commit + // fails with an IsAborted error, this function will be called + // again. + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleUpdate() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + return err + } + var balance int64 + if err := row.Column(0, &balance); err != nil { + return err + } + return txn.BufferWrite([]*spanner.Mutation{ + spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance + 10}), + }) + }) + if err != nil { + // TODO: Handle error. + } +} + +// This example is the same as the one for Update, except for the use of UpdateMap. +func ExampleUpdateMap() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + return err + } + var balance int64 + if err := row.Column(0, &balance); err != nil { + return err + } + return txn.BufferWrite([]*spanner.Mutation{ + spanner.UpdateMap("Accounts", map[string]interface{}{ + "user": "alice", + "balance": balance + 10, + }), + }) + }) + if err != nil { + // TODO: Handle error. + } +} + +// This example is the same as the one for Update, except for the use of UpdateStruct. +func ExampleUpdateStruct() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + type account struct { + User string `spanner:"user"` + Balance int64 `spanner:"balance"` + } + _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + return err + } + var balance int64 + if err := row.Column(0, &balance); err != nil { + return err + } + m, err := spanner.UpdateStruct("Accounts", account{ + User: "alice", + Balance: balance + 10, + }) + if err != nil { + return err + } + return txn.BufferWrite([]*spanner.Mutation{m}) + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Apply() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + m := spanner.Update("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) + _, err = client.Apply(ctx, []*spanner.Mutation{m}) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleInsert() { + m := spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleInsertMap() { + m := spanner.InsertMap("Users", map[string]interface{}{ + "name": "alice", + "email": "a@example.com", + }) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleInsertStruct() { + type User struct { + Name, Email string + } + u := User{Name: "alice", Email: "a@example.com"} + m, err := spanner.InsertStruct("Users", u) + if err != nil { + // TODO: Handle error. + } + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleDelete() { + m := spanner.Delete("Users", spanner.Key{"alice"}) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleDelete_keyRange() { + m := spanner.Delete("Users", spanner.KeyRange{ + Start: spanner.Key{"alice"}, + End: spanner.Key{"bob"}, + Kind: spanner.ClosedClosed, + }) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleRowIterator_Next() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + var firstName string + if err := row.Column(0, &firstName); err != nil { + // TODO: Handle error. + } + fmt.Println(firstName) + } +} + +func ExampleRowIterator_Do() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + err = iter.Do(func(r *spanner.Row) error { + var firstName string + if err := r.Column(0, &firstName); err != nil { + return err + } + fmt.Println(firstName) + return nil + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleRow_Size() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.Size()) // size is 2 +} + +func ExampleRow_ColumnName() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.ColumnName(1)) // prints "balance" +} + +func ExampleRow_ColumnIndex() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + index, err := row.ColumnIndex("balance") + if err != nil { + // TODO: Handle error. + } + fmt.Println(index) +} + +func ExampleRow_ColumnNames() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.ColumnNames()) +} + +func ExampleRow_ColumnByName() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + var balance int64 + if err := row.ColumnByName("balance", &balance); err != nil { + // TODO: Handle error. + } + fmt.Println(balance) +} + +func ExampleRow_Columns() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + var name string + var balance int64 + if err := row.Columns(&name, &balance); err != nil { + // TODO: Handle error. + } + fmt.Println(name, balance) +} + +func ExampleRow_ToStruct() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + + type Account struct { + Name string + Balance int64 + } + + var acct Account + if err := row.ToStruct(&acct); err != nil { + // TODO: Handle error. + } + fmt.Println(acct) +} + +func ExampleReadOnlyTransaction_Read() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Read(ctx, "Users", + spanner.KeySets(spanner.Key{"alice"}, spanner.Key{"bob"}), + []string{"name", "email"}) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadUsingIndex() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().ReadUsingIndex(ctx, "Users", + "UsersByEmail", + spanner.KeySets(spanner.Key{"a@example.com"}, spanner.Key{"b@example.com"}), + []string{"name", "email"}) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadWithOptions() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + // Use an index, and limit to 100 rows at most. + iter := client.Single().ReadWithOptions(ctx, "Users", + spanner.KeySets(spanner.Key{"a@example.com"}, spanner.Key{"b@example.com"}), + []string{"name", "email"}, &spanner.ReadOptions{ + Index: "UsersByEmail", + Limit: 100, + }) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadRow() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Users", spanner.Key{"alice"}, + []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + _ = row // TODO: use row +} + +func ExampleReadOnlyTransaction_Query() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleNewStatement() { + stmt := spanner.NewStatement("SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start") + stmt.Params["start"] = "Dylan" + // TODO: Use stmt in Query. +} + +func ExampleNewStatement_structLiteral() { + stmt := spanner.Statement{ + SQL: "SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start", + Params: map[string]interface{}{"start": "Dylan"}, + } + _ = stmt // TODO: Use stmt in Query. +} + +func ExampleReadOnlyTransaction_Timestamp() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + txn := client.Single() + row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, + []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + readTimestamp, err := txn.Timestamp() + if err != nil { + // TODO: Handle error. + } + fmt.Println("read happened at", readTimestamp) + _ = row // TODO: use row +} + +func ExampleReadOnlyTransaction_WithTimestampBound() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + txn := client.Single().WithTimestampBound(spanner.MaxStaleness(30 * time.Second)) + row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + _ = row // TODO: use row + readTimestamp, err := txn.Timestamp() + if err != nil { + // TODO: Handle error. + } + fmt.Println("read happened at", readTimestamp) +} + +func ExampleGenericColumnValue_Decode() { + // In real applications, rows can be retrieved by methods like client.Single().ReadRow(). + row, err := spanner.NewRow([]string{"intCol", "strCol"}, []interface{}{42, "my-text"}) + if err != nil { + // TODO: Handle error. + } + for i := 0; i < row.Size(); i++ { + var col spanner.GenericColumnValue + if err := row.Column(i, &col); err != nil { + // TODO: Handle error. + } + switch col.Type.Code { + case sppb.TypeCode_INT64: + var v int64 + if err := col.Decode(&v); err != nil { + // TODO: Handle error. + } + fmt.Println("int", v) + case sppb.TypeCode_STRING: + var v string + if err := col.Decode(&v); err != nil { + // TODO: Handle error. + } + fmt.Println("string", v) + } + } + // Output: + // int 42 + // string my-text +} + +func ExampleClient_BatchReadOnlyTransaction() { + ctx := context.Background() + var ( + client *spanner.Client + txn *spanner.BatchReadOnlyTransaction + err error + ) + if client, err = spanner.NewClient(ctx, myDB); err != nil { + // TODO: Handle error. + } + defer client.Close() + if txn, err = client.BatchReadOnlyTransaction(ctx, spanner.StrongRead()); err != nil { + // TODO: Handle error. + } + defer txn.Close() + + // Singer represents the elements in a row from the Singers table. + type Singer struct { + SingerID int64 + FirstName string + LastName string + SingerInfo []byte + } + stmt := spanner.Statement{SQL: "SELECT * FROM Singers;"} + partitions, err := txn.PartitionQuery(ctx, stmt, spanner.PartitionOptions{}) + if err != nil { + // TODO: Handle error. + } + // Note: here we use multiple goroutines, but you should use separate processes/machines. + wg := sync.WaitGroup{} + for i, p := range partitions { + wg.Add(1) + go func(i int, p *spanner.Partition) { + defer wg.Done() + iter := txn.Execute(ctx, p) + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } else if err != nil { + // TODO: Handle error. + } + var s Singer + if err := row.ToStruct(&s); err != nil { + // TODO: Handle error. + } + _ = s // TODO: Process the row. + } + }(i, p) + } + wg.Wait() +} + +func ExampleCommitTimestamp() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + + type account struct { + User string + Creation spanner.NullTime // time.Time can also be used if column is NOT NULL + } + + a := account{User: "Joe", Creation: spanner.NullTime{spanner.CommitTimestamp, true}} + m, err := spanner.InsertStruct("Accounts", a) + if err != nil { + // TODO: Handle error. + } + _, err = client.Apply(ctx, []*spanner.Mutation{m}, spanner.ApplyAtLeastOnce()) + if err != nil { + // TODO: Handle error. + } + + if r, e := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"Joe"}, []string{"User", "Creation"}); e != nil { + // TODO: Handle error. + } else { + var got account + if err := r.ToStruct(&got); err != nil { + // TODO: Handle error. + } + _ = got // TODO: Process row. + } +} diff --git a/vendor/cloud.google.com/go/spanner/go18.go b/vendor/cloud.google.com/go/spanner/go18.go new file mode 100644 index 0000000..81176a0 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/go18.go @@ -0,0 +1,59 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package spanner + +import ( + "fmt" + + "go.opencensus.io/trace" + "golang.org/x/net/context" +) + +func traceStartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name) + return ctx +} + +func traceEndSpan(ctx context.Context, err error) { + span := trace.FromContext(ctx) + if err != nil { + // TODO(jba): Add error code to the status. + span.SetStatus(trace.Status{Message: err.Error()}) + } + span.End() +} + +func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { + var attrs []trace.Attribute + for k, v := range attrMap { + var a trace.Attribute + switch v := v.(type) { + case string: + a = trace.StringAttribute(k, v) + case bool: + a = trace.BoolAttribute(k, v) + case int: + a = trace.Int64Attribute(k, int64(v)) + case int64: + a = trace.Int64Attribute(k, v) + default: + a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) + } + attrs = append(attrs, a) + } + trace.FromContext(ctx).Annotatef(attrs, format, args...) +} diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go new file mode 100644 index 0000000..f137b48 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go @@ -0,0 +1,383 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "errors" + "fmt" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/status" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + proto3 "github.com/golang/protobuf/ptypes/struct" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Action is a mocked RPC activity that MockCloudSpannerClient will take. +type Action struct { + Method string + Err error +} + +// MockCloudSpannerClient is a mock implementation of sppb.SpannerClient. +type MockCloudSpannerClient struct { + sppb.SpannerClient + + mu sync.Mutex + t *testing.T + // Live sessions on the client. + sessions map[string]bool + // Expected set of actions that will be executed by the client. + actions []Action + // Session ping history. + pings []string + // Injected error, will be returned by all APIs. + injErr map[string]error + // Client will not fail on any request. + nice bool + // Client will stall on any requests. + freezed chan struct{} +} + +// NewMockCloudSpannerClient creates new MockCloudSpannerClient instance. +func NewMockCloudSpannerClient(t *testing.T, acts ...Action) *MockCloudSpannerClient { + mc := &MockCloudSpannerClient{t: t, sessions: map[string]bool{}, injErr: map[string]error{}} + mc.SetActions(acts...) + // Produce a closed channel, so the default action of ready is to not block. + mc.Freeze() + mc.Unfreeze() + return mc +} + +// MakeNice makes this a nice mock which will not fail on any request. +func (m *MockCloudSpannerClient) MakeNice() { + m.mu.Lock() + defer m.mu.Unlock() + m.nice = true +} + +// MakeStrict makes this a strict mock which will fail on any unexpected request. +func (m *MockCloudSpannerClient) MakeStrict() { + m.mu.Lock() + defer m.mu.Unlock() + m.nice = false +} + +// InjectError injects a global error that will be returned by all calls to method +// regardless of the actions array. +func (m *MockCloudSpannerClient) InjectError(method string, err error) { + m.mu.Lock() + defer m.mu.Unlock() + m.injErr[method] = err +} + +// SetActions sets the new set of expected actions to MockCloudSpannerClient. +func (m *MockCloudSpannerClient) SetActions(acts ...Action) { + m.mu.Lock() + defer m.mu.Unlock() + m.actions = nil + for _, act := range acts { + m.actions = append(m.actions, act) + } +} + +// DumpPings dumps the ping history. +func (m *MockCloudSpannerClient) DumpPings() []string { + m.mu.Lock() + defer m.mu.Unlock() + return append([]string(nil), m.pings...) +} + +// DumpSessions dumps the internal session table. +func (m *MockCloudSpannerClient) DumpSessions() map[string]bool { + m.mu.Lock() + defer m.mu.Unlock() + st := map[string]bool{} + for s, v := range m.sessions { + st[s] = v + } + return st +} + +// CreateSession is a placeholder for SpannerClient.CreateSession. +func (m *MockCloudSpannerClient) CreateSession(c context.Context, r *sppb.CreateSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["CreateSession"]; err != nil { + return nil, err + } + s := &sppb.Session{} + if r.Database != "mockdb" { + // Reject other databases + return s, status.Errorf(codes.NotFound, fmt.Sprintf("database not found: %v", r.Database)) + } + // Generate & record session name. + s.Name = fmt.Sprintf("mockdb-%v", time.Now().UnixNano()) + m.sessions[s.Name] = true + return s, nil +} + +// GetSession is a placeholder for SpannerClient.GetSession. +func (m *MockCloudSpannerClient) GetSession(c context.Context, r *sppb.GetSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["GetSession"]; err != nil { + return nil, err + } + m.pings = append(m.pings, r.Name) + if _, ok := m.sessions[r.Name]; !ok { + return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + } + return &sppb.Session{Name: r.Name}, nil +} + +// DeleteSession is a placeholder for SpannerClient.DeleteSession. +func (m *MockCloudSpannerClient) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["DeleteSession"]; err != nil { + return nil, err + } + if _, ok := m.sessions[r.Name]; !ok { + // Session not found. + return &empty.Empty{}, status.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + } + // Delete session from in-memory table. + delete(m.sessions, r.Name) + return &empty.Empty{}, nil +} + +// ExecuteStreamingSql is a mock implementation of SpannerClient.ExecuteStreamingSql. +func (m *MockCloudSpannerClient) ExecuteStreamingSql(c context.Context, r *sppb.ExecuteSqlRequest, opts ...grpc.CallOption) (sppb.Spanner_ExecuteStreamingSqlClient, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + act, err := m.expectAction("ExecuteStreamingSql") + if err != nil { + return nil, err + } + wantReq := &sppb.ExecuteSqlRequest{ + Session: "mocksession", + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: &sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + }, + ReturnReadTimestamp: false, + }, + }, + }, + }, + }, + Sql: "mockquery", + Params: &proto3.Struct{ + Fields: map[string]*proto3.Value{"var1": &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "abc"}}}, + }, + ParamTypes: map[string]*sppb.Type{"var1": &sppb.Type{Code: sppb.TypeCode_STRING}}, + } + if !proto.Equal(r, wantReq) { + return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) + } + if act.Err != nil { + return nil, act.Err + } + return nil, errors.New("query never succeeds on mock client") +} + +// StreamingRead is a placeholder for SpannerClient.StreamingRead. +func (m *MockCloudSpannerClient) StreamingRead(c context.Context, r *sppb.ReadRequest, opts ...grpc.CallOption) (sppb.Spanner_StreamingReadClient, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + act, err := m.expectAction("StreamingRead", "StreamingReadIndex") + if err != nil { + return nil, err + } + wantReq := &sppb.ReadRequest{ + Session: "mocksession", + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: &sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + }, + ReturnReadTimestamp: false, + }, + }, + }, + }, + }, + Table: "t_mock", + Columns: []string{"col1", "col2"}, + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{ + &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + Ranges: []*sppb.KeyRange{}, + All: false, + }, + } + if act.Method == "StreamingIndexRead" { + wantReq.Index = "idx1" + } + if !proto.Equal(r, wantReq) { + return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) + } + if act.Err != nil { + return nil, act.Err + } + return nil, errors.New("read never succeeds on mock client") +} + +// BeginTransaction is a placeholder for SpannerClient.BeginTransaction. +func (m *MockCloudSpannerClient) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest, opts ...grpc.CallOption) (*sppb.Transaction, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + act, err := m.expectAction("BeginTransaction") + if err != nil { + return nil, err + } + if act.Err != nil { + return nil, act.Err + } + } + resp := &sppb.Transaction{Id: []byte("transaction-1")} + if _, ok := r.Options.Mode.(*sppb.TransactionOptions_ReadOnly_); ok { + resp.ReadTimestamp = &pbt.Timestamp{Seconds: 3, Nanos: 4} + } + return resp, nil +} + +// Commit is a placeholder for SpannerClient.Commit. +func (m *MockCloudSpannerClient) Commit(c context.Context, r *sppb.CommitRequest, opts ...grpc.CallOption) (*sppb.CommitResponse, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + act, err := m.expectAction("Commit") + if err != nil { + return nil, err + } + if act.Err != nil { + return nil, act.Err + } + } + return &sppb.CommitResponse{CommitTimestamp: &pbt.Timestamp{Seconds: 1, Nanos: 2}}, nil +} + +// Rollback is a placeholder for SpannerClient.Rollback. +func (m *MockCloudSpannerClient) Rollback(c context.Context, r *sppb.RollbackRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + act, err := m.expectAction("Rollback") + if err != nil { + return nil, err + } + if act.Err != nil { + return nil, act.Err + } + } + return nil, nil +} + +// PartitionQuery is a placeholder for SpannerServer.PartitionQuery. +func (m *MockCloudSpannerClient) PartitionQuery(ctx context.Context, r *sppb.PartitionQueryRequest, opts ...grpc.CallOption) (*sppb.PartitionResponse, error) { + m.ready() + return nil, errors.New("Unimplemented") +} + +// PartitionRead is a placeholder for SpannerServer.PartitionRead. +func (m *MockCloudSpannerClient) PartitionRead(ctx context.Context, r *sppb.PartitionReadRequest, opts ...grpc.CallOption) (*sppb.PartitionResponse, error) { + m.ready() + return nil, errors.New("Unimplemented") +} + +func (m *MockCloudSpannerClient) expectAction(methods ...string) (Action, error) { + for _, me := range methods { + if err := m.injErr[me]; err != nil { + return Action{}, err + } + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected %v executed", methods) + } + act := m.actions[0] + m.actions = m.actions[1:] + for _, me := range methods { + if me == act.Method { + return act, nil + } + } + m.t.Fatalf("unexpected call of one of %v, want method %s", methods, act.Method) + return Action{}, nil +} + +// Freeze stalls all requests. +func (m *MockCloudSpannerClient) Freeze() { + m.mu.Lock() + defer m.mu.Unlock() + m.freezed = make(chan struct{}) +} + +// Unfreeze restores processing requests. +func (m *MockCloudSpannerClient) Unfreeze() { + m.mu.Lock() + defer m.mu.Unlock() + close(m.freezed) +} + +// CheckActionsConsumed checks that all actions have been consumed. +func (m *MockCloudSpannerClient) CheckActionsConsumed() { + if len(m.actions) != 0 { + m.t.Fatalf("unconsumed mock client actions: %v", m.actions) + } +} + +// ready checks conditions before executing requests +// TODO: add checks for injected errors, actions +func (m *MockCloudSpannerClient) ready() { + m.mu.Lock() + freezed := m.freezed + m.mu.Unlock() + // check if client should be freezed + <-freezed +} diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go new file mode 100644 index 0000000..aace72f --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go @@ -0,0 +1,243 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "encoding/binary" + "fmt" + "io" + "net" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes/empty" + proto3 "github.com/golang/protobuf/ptypes/struct" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + // KvMeta is the Metadata for mocked KV table. + KvMeta = sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Key", + Type: &sppb.Type{Code: sppb.TypeCode_STRING}, + }, + { + Name: "Value", + Type: &sppb.Type{Code: sppb.TypeCode_STRING}, + }, + }, + }, + } +) + +// MockCtlMsg encapsulates PartialResultSet/error that might be sent to +// client +type MockCtlMsg struct { + // If ResumeToken == true, mock server will generate a row with + // resume token. + ResumeToken bool + // If Err != nil, mock server will return error in RPC response. + Err error +} + +// MockCloudSpanner is a mock implementation of SpannerServer interface. +// TODO: make MockCloudSpanner a full-fleged Cloud Spanner implementation. +type MockCloudSpanner struct { + sppb.SpannerServer + + s *grpc.Server + t *testing.T + addr string + msgs chan MockCtlMsg + readTs time.Time + next int + + mu sync.Mutex + nextSession int + sessions map[string]*sppb.Session +} + +// Addr returns the listening address of mock server. +func (m *MockCloudSpanner) Addr() string { + return m.addr +} + +// AddMsg generates a new mocked row which can be received by client. +func (m *MockCloudSpanner) AddMsg(err error, resumeToken bool) { + msg := MockCtlMsg{ + ResumeToken: resumeToken, + Err: err, + } + if err == io.EOF { + close(m.msgs) + } else { + m.msgs <- msg + } +} + +// Done signals an end to a mocked stream. +func (m *MockCloudSpanner) Done() { + close(m.msgs) +} + +// CreateSession is a placeholder for SpannerServer.CreateSession. +func (m *MockCloudSpanner) CreateSession(c context.Context, r *sppb.CreateSessionRequest) (*sppb.Session, error) { + m.mu.Lock() + defer m.mu.Unlock() + name := fmt.Sprintf("session-%d", m.nextSession) + m.nextSession++ + s := &sppb.Session{Name: name} + m.sessions[name] = s + return s, nil +} + +// GetSession is a placeholder for SpannerServer.GetSession. +func (m *MockCloudSpanner) GetSession(c context.Context, r *sppb.GetSessionRequest) (*sppb.Session, error) { + m.mu.Lock() + defer m.mu.Unlock() + if s, ok := m.sessions[r.Name]; ok { + return s, nil + } + return nil, status.Errorf(codes.NotFound, "not found") +} + +// DeleteSession is a placeholder for SpannerServer.DeleteSession. +func (m *MockCloudSpanner) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest) (*empty.Empty, error) { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.sessions, r.Name) + return &empty.Empty{}, nil +} + +// EncodeResumeToken return mock resume token encoding for an uint64 integer. +func EncodeResumeToken(t uint64) []byte { + rt := make([]byte, 16) + binary.PutUvarint(rt, t) + return rt +} + +// DecodeResumeToken decodes a mock resume token into an uint64 integer. +func DecodeResumeToken(t []byte) (uint64, error) { + s, n := binary.Uvarint(t) + if n <= 0 { + return 0, fmt.Errorf("invalid resume token: %v", t) + } + return s, nil +} + +// ExecuteStreamingSql is a mock implementation of SpannerServer.ExecuteStreamingSql. +func (m *MockCloudSpanner) ExecuteStreamingSql(r *sppb.ExecuteSqlRequest, s sppb.Spanner_ExecuteStreamingSqlServer) error { + switch r.Sql { + case "SELECT * from t_unavailable": + return status.Errorf(codes.Unavailable, "mock table unavailable") + case "SELECT t.key key, t.value value FROM t_mock t": + if r.ResumeToken != nil { + s, err := DecodeResumeToken(r.ResumeToken) + if err != nil { + return err + } + m.next = int(s) + 1 + } + for { + msg, more := <-m.msgs + if !more { + break + } + if msg.Err == nil { + var rt []byte + if msg.ResumeToken { + rt = EncodeResumeToken(uint64(m.next)) + } + meta := KvMeta + meta.Transaction = &sppb.Transaction{ + ReadTimestamp: &pbt.Timestamp{ + Seconds: m.readTs.Unix(), + Nanos: int32(m.readTs.Nanosecond()), + }, + } + err := s.Send(&sppb.PartialResultSet{ + Metadata: &meta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("foo-%02d", m.next)}}, + {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("bar-%02d", m.next)}}, + }, + ResumeToken: rt, + }) + m.next = m.next + 1 + if err != nil { + return err + } + continue + } + return msg.Err + } + return nil + default: + return fmt.Errorf("unsupported SQL: %v", r.Sql) + } +} + +// StreamingRead is a placeholder for SpannerServer.StreamingRead. +func (m *MockCloudSpanner) StreamingRead(r *sppb.ReadRequest, s sppb.Spanner_StreamingReadServer) error { + return s.Send(&sppb.PartialResultSet{}) +} + +// Serve runs a MockCloudSpanner listening on a random localhost address. +func (m *MockCloudSpanner) Serve() { + m.s = grpc.NewServer() + if m.addr == "" { + m.addr = "localhost:0" + } + lis, err := net.Listen("tcp", m.addr) + if err != nil { + m.t.Fatalf("Failed to listen: %v", err) + } + _, port, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + m.t.Fatalf("Failed to parse listener address: %v", err) + } + sppb.RegisterSpannerServer(m.s, m) + m.addr = "localhost:" + port + go m.s.Serve(lis) +} + +// Stop terminates MockCloudSpanner and closes the serving port. +func (m *MockCloudSpanner) Stop() { + m.s.Stop() +} + +// NewMockCloudSpanner creates a new MockCloudSpanner instance. +func NewMockCloudSpanner(t *testing.T, ts time.Time) *MockCloudSpanner { + mcs := &MockCloudSpanner{ + t: t, + msgs: make(chan MockCtlMsg, 1000), + readTs: ts, + sessions: map[string]*sppb.Session{}, + } + return mcs +} diff --git a/vendor/cloud.google.com/go/spanner/key.go b/vendor/cloud.google.com/go/spanner/key.go new file mode 100644 index 0000000..7bb1ef5 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/key.go @@ -0,0 +1,398 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "fmt" + "time" + + "google.golang.org/grpc/codes" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// A Key can be either a Cloud Spanner row's primary key or a secondary index key. +// It is essentially an interface{} array, which represents a set of Cloud Spanner +// columns. A Key can be used as: +// +// - A primary key which uniquely identifies a Cloud Spanner row. +// - A secondary index key which maps to a set of Cloud Spanner rows indexed under it. +// - An endpoint of primary key/secondary index ranges; see the KeyRange type. +// +// Rows that are identified by the Key type are outputs of read operation or targets of +// delete operation in a mutation. Note that for Insert/Update/InsertOrUpdate/Update +// mutation types, although they don't require a primary key explicitly, the column list +// provided must contain enough columns that can comprise a primary key. +// +// Keys are easy to construct. For example, suppose you have a table with a +// primary key of username and product ID. To make a key for this table: +// +// key := spanner.Key{"john", 16} +// +// See the description of Row and Mutation types for how Go types are +// mapped to Cloud Spanner types. For convenience, Key type supports a wide range +// of Go types: +// - int, int8, int16, int32, int64, and NullInt64 are mapped to Cloud Spanner's INT64 type. +// - uint8, uint16 and uint32 are also mapped to Cloud Spanner's INT64 type. +// - float32, float64, NullFloat64 are mapped to Cloud Spanner's FLOAT64 type. +// - bool and NullBool are mapped to Cloud Spanner's BOOL type. +// - []byte is mapped to Cloud Spanner's BYTES type. +// - string and NullString are mapped to Cloud Spanner's STRING type. +// - time.Time and NullTime are mapped to Cloud Spanner's TIMESTAMP type. +// - civil.Date and NullDate are mapped to Cloud Spanner's DATE type. +type Key []interface{} + +// errInvdKeyPartType returns error for unsupported key part type. +func errInvdKeyPartType(part interface{}) error { + return spannerErrorf(codes.InvalidArgument, "key part has unsupported type %T", part) +} + +// keyPartValue converts a part of the Key (which is a valid Cloud Spanner type) +// into a proto3.Value. Used for encoding Key type into protobuf. +func keyPartValue(part interface{}) (pb *proto3.Value, err error) { + switch v := part.(type) { + case int: + pb, _, err = encodeValue(int64(v)) + case int8: + pb, _, err = encodeValue(int64(v)) + case int16: + pb, _, err = encodeValue(int64(v)) + case int32: + pb, _, err = encodeValue(int64(v)) + case uint8: + pb, _, err = encodeValue(int64(v)) + case uint16: + pb, _, err = encodeValue(int64(v)) + case uint32: + pb, _, err = encodeValue(int64(v)) + case float32: + pb, _, err = encodeValue(float64(v)) + case int64, float64, NullInt64, NullFloat64, bool, NullBool, []byte, string, NullString, time.Time, civil.Date, NullTime, NullDate: + pb, _, err = encodeValue(v) + default: + return nil, errInvdKeyPartType(v) + } + return pb, err +} + +// proto converts a spanner.Key into a proto3.ListValue. +func (key Key) proto() (*proto3.ListValue, error) { + lv := &proto3.ListValue{} + lv.Values = make([]*proto3.Value, 0, len(key)) + for _, part := range key { + v, err := keyPartValue(part) + if err != nil { + return nil, err + } + lv.Values = append(lv.Values, v) + } + return lv, nil +} + +// keySetProto lets a single Key act as a KeySet. +func (key Key) keySetProto() (*sppb.KeySet, error) { + kp, err := key.proto() + if err != nil { + return nil, err + } + return &sppb.KeySet{Keys: []*proto3.ListValue{kp}}, nil +} + +// String implements fmt.Stringer for Key. For string, []byte and NullString, it +// prints the uninterpreted bytes of their contents, leaving caller with the +// opportunity to escape the output. +func (key Key) String() string { + b := &bytes.Buffer{} + fmt.Fprint(b, "(") + for i, part := range []interface{}(key) { + if i != 0 { + fmt.Fprint(b, ",") + } + switch v := part.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool: + // Use %v to print numeric types and bool. + fmt.Fprintf(b, "%v", v) + case string: + fmt.Fprintf(b, "%q", v) + case []byte: + if v != nil { + fmt.Fprintf(b, "%q", v) + } else { + fmt.Fprint(b, "") + } + case NullInt64, NullFloat64, NullBool, NullString, NullTime, NullDate: + // The above types implement fmt.Stringer. + fmt.Fprintf(b, "%s", v) + case civil.Date: + fmt.Fprintf(b, "%q", v) + case time.Time: + fmt.Fprintf(b, "%q", v.Format(time.RFC3339Nano)) + default: + fmt.Fprintf(b, "%v", v) + } + } + fmt.Fprint(b, ")") + return b.String() +} + +// AsPrefix returns a KeyRange for all keys where k is the prefix. +func (key Key) AsPrefix() KeyRange { + return KeyRange{ + Start: key, + End: key, + Kind: ClosedClosed, + } +} + +// KeyRangeKind describes the kind of interval represented by a KeyRange: +// whether it is open or closed on the left and right. +type KeyRangeKind int + +const ( + // ClosedOpen is closed on the left and open on the right: the Start + // key is included, the End key is excluded. + ClosedOpen KeyRangeKind = iota + + // ClosedClosed is closed on the left and the right: both keys are included. + ClosedClosed + + // OpenClosed is open on the left and closed on the right: the Start + // key is excluded, the End key is included. + OpenClosed + + // OpenOpen is open on the left and the right: neither key is included. + OpenOpen +) + +// A KeyRange represents a range of rows in a table or index. +// +// A range has a Start key and an End key. IncludeStart and IncludeEnd +// indicate whether the Start and End keys are included in the range. +// +// For example, consider the following table definition: +// +// CREATE TABLE UserEvents ( +// UserName STRING(MAX), +// EventDate STRING(10), +// ) PRIMARY KEY(UserName, EventDate); +// +// The following keys name rows in this table: +// +// spanner.Key{"Bob", "2014-09-23"} +// spanner.Key{"Alfred", "2015-06-12"} +// +// Since the UserEvents table's PRIMARY KEY clause names two columns, each +// UserEvents key has two elements; the first is the UserName, and the second +// is the EventDate. +// +// Key ranges with multiple components are interpreted lexicographically by +// component using the table or index key's declared sort order. For example, +// the following range returns all events for user "Bob" that occurred in the +// year 2015: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob", "2015-01-01"}, +// End: spanner.Key{"Bob", "2015-12-31"}, +// Kind: ClosedClosed, +// } +// +// Start and end keys can omit trailing key components. This affects the +// inclusion and exclusion of rows that exactly match the provided key +// components: if IncludeStart is true, then rows that exactly match the +// provided components of the Start key are included; if IncludeStart is false +// then rows that exactly match are not included. IncludeEnd and End key +// behave in the same fashion. +// +// For example, the following range includes all events for "Bob" that occurred +// during and after the year 2000: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob", "2000-01-01"}, +// End: spanner.Key{"Bob"}, +// Kind: ClosedClosed, +// } +// +// The next example retrieves all events for "Bob": +// +// spanner.Key{"Bob"}.AsPrefix() +// +// To retrieve events before the year 2000: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob"}, +// End: spanner.Key{"Bob", "2000-01-01"}, +// Kind: ClosedOpen, +// } +// +// Although we specified a Kind for this KeyRange, we didn't need to, because +// the default is ClosedOpen. In later examples we'll omit Kind if it is +// ClosedOpen. +// +// The following range includes all rows in a table or under a +// index: +// +// spanner.AllKeys() +// +// This range returns all users whose UserName begins with any +// character from A to C: +// +// spanner.KeyRange{ +// Start: spanner.Key{"A"}, +// End: spanner.Key{"D"}, +// } +// +// This range returns all users whose UserName begins with B: +// +// spanner.KeyRange{ +// Start: spanner.Key{"B"}, +// End: spanner.Key{"C"}, +// } +// +// Key ranges honor column sort order. For example, suppose a table is defined +// as follows: +// +// CREATE TABLE DescendingSortedTable { +// Key INT64, +// ... +// ) PRIMARY KEY(Key DESC); +// +// The following range retrieves all rows with key values between 1 and 100 +// inclusive: +// +// spanner.KeyRange{ +// Start: spanner.Key{100}, +// End: spanner.Key{1}, +// Kind: ClosedClosed, +// } +// +// Note that 100 is passed as the start, and 1 is passed as the end, because +// Key is a descending column in the schema. +type KeyRange struct { + // Start specifies the left boundary of the key range; End specifies + // the right boundary of the key range. + Start, End Key + + // Kind describes whether the boundaries of the key range include + // their keys. + Kind KeyRangeKind +} + +// String implements fmt.Stringer for KeyRange type. +func (r KeyRange) String() string { + var left, right string + switch r.Kind { + case ClosedClosed: + left, right = "[", "]" + case ClosedOpen: + left, right = "[", ")" + case OpenClosed: + left, right = "(", "]" + case OpenOpen: + left, right = "(", ")" + default: + left, right = "?", "?" + } + return fmt.Sprintf("%s%s,%s%s", left, r.Start, r.End, right) +} + +// proto converts KeyRange into sppb.KeyRange. +func (r KeyRange) proto() (*sppb.KeyRange, error) { + var err error + var start, end *proto3.ListValue + pb := &sppb.KeyRange{} + if start, err = r.Start.proto(); err != nil { + return nil, err + } + if end, err = r.End.proto(); err != nil { + return nil, err + } + if r.Kind == ClosedClosed || r.Kind == ClosedOpen { + pb.StartKeyType = &sppb.KeyRange_StartClosed{StartClosed: start} + } else { + pb.StartKeyType = &sppb.KeyRange_StartOpen{StartOpen: start} + } + if r.Kind == ClosedClosed || r.Kind == OpenClosed { + pb.EndKeyType = &sppb.KeyRange_EndClosed{EndClosed: end} + } else { + pb.EndKeyType = &sppb.KeyRange_EndOpen{EndOpen: end} + } + return pb, nil +} + +// keySetProto lets a KeyRange act as a KeySet. +func (r KeyRange) keySetProto() (*sppb.KeySet, error) { + rp, err := r.proto() + if err != nil { + return nil, err + } + return &sppb.KeySet{Ranges: []*sppb.KeyRange{rp}}, nil +} + +// A KeySet defines a collection of Cloud Spanner keys and/or key ranges. All the +// keys are expected to be in the same table or index. The keys need not be sorted in +// any particular way. +// +// An individual Key can act as a KeySet, as can a KeyRange. Use the KeySets function +// to create a KeySet consisting of multiple Keys and KeyRanges. To obtain an empty +// KeySet, call KeySets with no arguments. +// +// If the same key is specified multiple times in the set (for example if two +// ranges, two keys, or a key and a range overlap), the Cloud Spanner backend behaves +// as if the key were only specified once. +type KeySet interface { + keySetProto() (*sppb.KeySet, error) +} + +// AllKeys returns a KeySet that represents all Keys of a table or a index. +func AllKeys() KeySet { + return all{} +} + +type all struct{} + +func (all) keySetProto() (*sppb.KeySet, error) { + return &sppb.KeySet{All: true}, nil +} + +// KeySets returns the union of the KeySets. If any of the KeySets is AllKeys, then +// the resulting KeySet will be equivalent to AllKeys. +func KeySets(keySets ...KeySet) KeySet { + u := make(union, len(keySets)) + copy(u, keySets) + return u +} + +type union []KeySet + +func (u union) keySetProto() (*sppb.KeySet, error) { + upb := &sppb.KeySet{} + for _, ks := range u { + pb, err := ks.keySetProto() + if err != nil { + return nil, err + } + if pb.All { + return pb, nil + } + upb.Keys = append(upb.Keys, pb.Keys...) + upb.Ranges = append(upb.Ranges, pb.Ranges...) + } + return upb, nil +} diff --git a/vendor/cloud.google.com/go/spanner/key_test.go b/vendor/cloud.google.com/go/spanner/key_test.go new file mode 100644 index 0000000..e9f623c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/key_test.go @@ -0,0 +1,372 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "testing" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test Key.String() and Key.proto(). +func TestKey(t *testing.T) { + tm, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + dt, _ := civil.ParseDate("2016-11-15") + for _, test := range []struct { + k Key + wantProto *proto3.ListValue + wantStr string + }{ + { + k: Key{int(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int8(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int16(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int32(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int64(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint8(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint16(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint32(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{true}, + wantProto: listValueProto(boolProto(true)), + wantStr: "(true)", + }, + { + k: Key{float32(1.5)}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{float64(1.5)}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{"value"}, + wantProto: listValueProto(stringProto("value")), + wantStr: `("value")`, + }, + { + k: Key{[]byte(nil)}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{[]byte{}}, + wantProto: listValueProto(stringProto("")), + wantStr: `("")`, + }, + { + k: Key{tm}, + wantProto: listValueProto(stringProto("2016-11-15T15:04:05.999999999Z")), + wantStr: `("2016-11-15T15:04:05.999999999Z")`, + }, + {k: Key{dt}, + wantProto: listValueProto(stringProto("2016-11-15")), + wantStr: `("2016-11-15")`, + }, + { + k: Key{[]byte("value")}, + wantProto: listValueProto(bytesProto([]byte("value"))), + wantStr: `("value")`, + }, + { + k: Key{NullInt64{1, true}}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{NullInt64{2, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullFloat64{1.5, true}}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{NullFloat64{2.0, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullBool{true, true}}, + wantProto: listValueProto(boolProto(true)), + wantStr: "(true)", + }, + { + k: Key{NullBool{true, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullString{"value", true}}, + wantProto: listValueProto(stringProto("value")), + wantStr: `("value")`, + }, + { + k: Key{NullString{"value", false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullTime{tm, true}}, + wantProto: listValueProto(timeProto(tm)), + wantStr: `("2016-11-15T15:04:05.999999999Z")`, + }, + + { + k: Key{NullTime{time.Now(), false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullDate{dt, true}}, + wantProto: listValueProto(dateProto(dt)), + wantStr: `("2016-11-15")`, + }, + { + k: Key{NullDate{civil.Date{}, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{int(1), NullString{"value", false}, "value", 1.5, true}, + wantProto: listValueProto(stringProto("1"), nullProto(), stringProto("value"), floatProto(1.5), boolProto(true)), + wantStr: `(1,,"value",1.5,true)`, + }, + } { + if got := test.k.String(); got != test.wantStr { + t.Errorf("%v.String() = %v, want %v", test.k, got, test.wantStr) + } + gotProto, err := test.k.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.k, err) + } + if !testEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.k, gotProto, test.wantProto) + } + } +} + +// Test KeyRange.String() and KeyRange.proto(). +func TestKeyRange(t *testing.T) { + for _, test := range []struct { + kr KeyRange + wantProto *sppb.KeyRange + wantStr string + }{ + { + kr: KeyRange{Key{"A"}, Key{"D"}, OpenOpen}, + wantProto: &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartOpen{StartOpen: listValueProto(stringProto("A"))}, + EndKeyType: &sppb.KeyRange_EndOpen{EndOpen: listValueProto(stringProto("D"))}, + }, + wantStr: `(("A"),("D"))`, + }, + { + kr: KeyRange{Key{1}, Key{10}, OpenClosed}, + wantProto: &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartOpen{StartOpen: listValueProto(stringProto("1"))}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(stringProto("10"))}, + }, + wantStr: "((1),(10)]", + }, + { + kr: KeyRange{Key{1.5, 2.1, 0.2}, Key{1.9, 0.7}, ClosedOpen}, + wantProto: &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(floatProto(1.5), floatProto(2.1), floatProto(0.2))}, + EndKeyType: &sppb.KeyRange_EndOpen{EndOpen: listValueProto(floatProto(1.9), floatProto(0.7))}, + }, + wantStr: "[(1.5,2.1,0.2),(1.9,0.7))", + }, + { + kr: KeyRange{Key{NullInt64{1, true}}, Key{10}, ClosedClosed}, + wantProto: &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(stringProto("1"))}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(stringProto("10"))}, + }, + wantStr: "[(1),(10)]", + }, + } { + if got := test.kr.String(); got != test.wantStr { + t.Errorf("%v.String() = %v, want %v", test.kr, got, test.wantStr) + } + gotProto, err := test.kr.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.kr, err) + } + if !testEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.kr, gotProto.String(), test.wantProto.String()) + } + } +} + +func TestPrefixRange(t *testing.T) { + got := Key{1}.AsPrefix() + want := KeyRange{Start: Key{1}, End: Key{1}, Kind: ClosedClosed} + if !testEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestKeySets(t *testing.T) { + int1 := intProto(1) + int2 := intProto(2) + int3 := intProto(3) + int4 := intProto(4) + for i, test := range []struct { + ks KeySet + wantProto *sppb.KeySet + }{ + { + KeySets(), + &sppb.KeySet{}, + }, + { + Key{4}, + &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(int4)}, + }, + }, + { + AllKeys(), + &sppb.KeySet{All: true}, + }, + { + KeySets(Key{1, 2}, Key{3, 4}), + &sppb.KeySet{ + Keys: []*proto3.ListValue{ + listValueProto(int1, int2), + listValueProto(int3, int4), + }, + }, + }, + { + KeyRange{Key{1}, Key{2}, ClosedOpen}, + &sppb.KeySet{Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(int1)}, + EndKeyType: &sppb.KeyRange_EndOpen{EndOpen: listValueProto(int2)}, + }, + }}, + }, + { + Key{2}.AsPrefix(), + &sppb.KeySet{Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(int2)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(int2)}, + }, + }}, + }, + { + KeySets( + KeyRange{Key{1}, Key{2}, ClosedClosed}, + KeyRange{Key{3}, Key{4}, OpenClosed}, + ), + &sppb.KeySet{ + Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(int1)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(int2)}, + }, + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartOpen{StartOpen: listValueProto(int3)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(int4)}, + }, + }, + }, + }, + { + KeySets( + Key{1}, + KeyRange{Key{2}, Key{3}, ClosedClosed}, + KeyRange{Key{4}, Key{5}, OpenClosed}, + KeySets(), + Key{6}), + &sppb.KeySet{ + Keys: []*proto3.ListValue{ + listValueProto(int1), + listValueProto(intProto(6)), + }, + Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(int2)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(int3)}, + }, + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartOpen{StartOpen: listValueProto(int4)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(intProto(5))}, + }, + }, + }, + }, + { + KeySets( + Key{1}, + KeyRange{Key{2}, Key{3}, ClosedClosed}, + AllKeys(), + KeyRange{Key{4}, Key{5}, OpenClosed}, + Key{6}), + &sppb.KeySet{All: true}, + }, + } { + gotProto, err := test.ks.keySetProto() + if err != nil { + t.Errorf("#%d: %v.proto() returns error %v; want nil error", i, test.ks, err) + } + if !testEqual(gotProto, test.wantProto) { + t.Errorf("#%d: %v.proto() = \n%v\nwant:\n%v", i, test.ks, gotProto.String(), test.wantProto.String()) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/mutation.go b/vendor/cloud.google.com/go/spanner/mutation.go new file mode 100644 index 0000000..5801c29 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/mutation.go @@ -0,0 +1,431 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// op is the mutation operation. +type op int + +const ( + // opDelete removes a row from a table. Succeeds whether or not the + // key was present. + opDelete op = iota + // opInsert inserts a row into a table. If the row already exists, the + // write or transaction fails. + opInsert + // opInsertOrUpdate inserts a row into a table. If the row already + // exists, it updates it instead. Any column values not explicitly + // written are preserved. + opInsertOrUpdate + // opReplace inserts a row into a table, deleting any existing row. + // Unlike InsertOrUpdate, this means any values not explicitly written + // become NULL. + opReplace + // opUpdate updates a row in a table. If the row does not already + // exist, the write or transaction fails. + opUpdate +) + +// A Mutation describes a modification to one or more Cloud Spanner rows. The +// mutation represents an insert, update, delete, etc on a table. +// +// Many mutations can be applied in a single atomic commit. For purposes of +// constraint checking (such as foreign key constraints), the operations can be +// viewed as applying in the same order as the mutations are provided (so that, e.g., +// a row and its logical "child" can be inserted in the same commit). +// +// The Apply function applies series of mutations. For example, +// +// m := spanner.Insert("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// inserts a new row into the User table. The primary key +// for the new row is UserID (presuming that "user_id" has been declared as the +// primary key of the "User" table). +// +// To apply a series of mutations as part of an atomic read-modify-write operation, +// use ReadWriteTransaction. +// +// Updating a row +// +// Changing the values of columns in an existing row is very similar to +// inserting a new row: +// +// m := spanner.Update("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// Deleting a row +// +// To delete a row, use spanner.Delete: +// +// m := spanner.Delete("User", spanner.Key{UserId}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// spanner.Delete accepts a KeySet, so you can also pass in a KeyRange, or use the +// spanner.KeySets function to build any combination of Keys and KeyRanges. +// +// Note that deleting a row in a table may also delete rows from other tables +// if cascading deletes are specified in those tables' schemas. Delete does +// nothing if the named row does not exist (does not yield an error). +// +// Deleting a field +// +// To delete/clear a field within a row, use spanner.Update with the value nil: +// +// m := spanner.Update("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, nil}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// The valid Go types and their corresponding Cloud Spanner types that can be +// used in the Insert/Update/InsertOrUpdate functions are: +// +// string, NullString - STRING +// []string, []NullString - STRING ARRAY +// []byte - BYTES +// [][]byte - BYTES ARRAY +// int, int64, NullInt64 - INT64 +// []int, []int64, []NullInt64 - INT64 ARRAY +// bool, NullBool - BOOL +// []bool, []NullBool - BOOL ARRAY +// float64, NullFloat64 - FLOAT64 +// []float64, []NullFloat64 - FLOAT64 ARRAY +// time.Time, NullTime - TIMESTAMP +// []time.Time, []NullTime - TIMESTAMP ARRAY +// Date, NullDate - DATE +// []Date, []NullDate - DATE ARRAY +// +// To compare two Mutations for testing purposes, use reflect.DeepEqual. +type Mutation struct { + // op is the operation type of the mutation. + // See documentation for spanner.op for more details. + op op + // Table is the name of the target table to be modified. + table string + // keySet is a set of primary keys that names the rows + // in a delete operation. + keySet KeySet + // columns names the set of columns that are going to be + // modified by Insert, InsertOrUpdate, Replace or Update + // operations. + columns []string + // values specifies the new values for the target columns + // named by Columns. + values []interface{} +} + +// mapToMutationParams converts Go map into mutation parameters. +func mapToMutationParams(in map[string]interface{}) ([]string, []interface{}) { + cols := []string{} + vals := []interface{}{} + for k, v := range in { + cols = append(cols, k) + vals = append(vals, v) + } + return cols, vals +} + +// errNotStruct returns error for not getting a go struct type. +func errNotStruct(in interface{}) error { + return spannerErrorf(codes.InvalidArgument, "%T is not a go struct type", in) +} + +// structToMutationParams converts Go struct into mutation parameters. +// If the input is not a valid Go struct type, structToMutationParams +// returns error. +func structToMutationParams(in interface{}) ([]string, []interface{}, error) { + if in == nil { + return nil, nil, errNotStruct(in) + } + v := reflect.ValueOf(in) + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + // t is a pointer to a struct. + if v.IsNil() { + // Return empty results. + return nil, nil, nil + } + // Get the struct value that in points to. + v = v.Elem() + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, nil, errNotStruct(in) + } + fields, err := fieldCache.Fields(t) + if err != nil { + return nil, nil, toSpannerError(err) + } + var cols []string + var vals []interface{} + for _, f := range fields { + cols = append(cols, f.Name) + vals = append(vals, v.FieldByIndex(f.Index).Interface()) + } + return cols, vals, nil +} + +// Insert returns a Mutation to insert a row into a table. If the row already +// exists, the write or transaction fails. +func Insert(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opInsert, + table: table, + columns: cols, + values: vals, + } +} + +// InsertMap returns a Mutation to insert a row into a table, specified by +// a map of column name to value. If the row already exists, the write or +// transaction fails. +func InsertMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Insert(table, cols, vals) +} + +// InsertStruct returns a Mutation to insert a row into a table, specified by +// a Go struct. If the row already exists, the write or transaction fails. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +func InsertStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Insert(table, cols, vals), nil +} + +// Update returns a Mutation to update a row in a table. If the row does not +// already exist, the write or transaction fails. +func Update(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opUpdate, + table: table, + columns: cols, + values: vals, + } +} + +// UpdateMap returns a Mutation to update a row in a table, specified by +// a map of column to value. If the row does not already exist, the write or +// transaction fails. +func UpdateMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Update(table, cols, vals) +} + +// UpdateStruct returns a Mutation to update a row in a table, specified by a Go +// struct. If the row does not already exist, the write or transaction fails. +func UpdateStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Update(table, cols, vals), nil +} + +// InsertOrUpdate returns a Mutation to insert a row into a table. If the row +// already exists, it updates it instead. Any column values not explicitly +// written are preserved. +// +// For a similar example, See Update. +func InsertOrUpdate(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opInsertOrUpdate, + table: table, + columns: cols, + values: vals, + } +} + +// InsertOrUpdateMap returns a Mutation to insert a row into a table, +// specified by a map of column to value. If the row already exists, it +// updates it instead. Any column values not explicitly written are preserved. +// +// For a similar example, See UpdateMap. +func InsertOrUpdateMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return InsertOrUpdate(table, cols, vals) +} + +// InsertOrUpdateStruct returns a Mutation to insert a row into a table, +// specified by a Go struct. If the row already exists, it updates it instead. +// Any column values not explicitly written are preserved. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +// +// For a similar example, See UpdateStruct. +func InsertOrUpdateStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return InsertOrUpdate(table, cols, vals), nil +} + +// Replace returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdate, this means any values not explicitly +// written become NULL. +// +// For a similar example, See Update. +func Replace(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opReplace, + table: table, + columns: cols, + values: vals, + } +} + +// ReplaceMap returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly +// written become NULL. The row is specified by a map of column to value. +// +// For a similar example, See UpdateMap. +func ReplaceMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Replace(table, cols, vals) +} + +// ReplaceStruct returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly +// written become NULL. The row is specified by a Go struct. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +// +// For a similar example, See UpdateStruct. +func ReplaceStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Replace(table, cols, vals), nil +} + +// Delete removes the rows described by the KeySet from the table. It succeeds +// whether or not the keys were present. +func Delete(table string, ks KeySet) *Mutation { + return &Mutation{ + op: opDelete, + table: table, + keySet: ks, + } +} + +// prepareWrite generates sppb.Mutation_Write from table name, column names +// and new column values. +func prepareWrite(table string, columns []string, vals []interface{}) (*sppb.Mutation_Write, error) { + v, err := encodeValueArray(vals) + if err != nil { + return nil, err + } + return &sppb.Mutation_Write{ + Table: table, + Columns: columns, + Values: []*proto3.ListValue{v}, + }, nil +} + +// errInvdMutationOp returns error for unrecognized mutation operation. +func errInvdMutationOp(m Mutation) error { + return spannerErrorf(codes.InvalidArgument, "Unknown op type: %d", m.op) +} + +// proto converts spanner.Mutation to sppb.Mutation, in preparation to send +// RPCs. +func (m Mutation) proto() (*sppb.Mutation, error) { + var pb *sppb.Mutation + switch m.op { + case opDelete: + var kp *sppb.KeySet + if m.keySet != nil { + var err error + kp, err = m.keySet.keySetProto() + if err != nil { + return nil, err + } + } + pb = &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: m.table, + KeySet: kp, + }, + }, + } + case opInsert: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Insert{Insert: w}} + case opInsertOrUpdate: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_InsertOrUpdate{InsertOrUpdate: w}} + case opReplace: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Replace{Replace: w}} + case opUpdate: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Update{Update: w}} + default: + return nil, errInvdMutationOp(m) + } + return pb, nil +} + +// mutationsProto turns a spanner.Mutation array into a sppb.Mutation array, +// it is convenient for sending batch mutations to Cloud Spanner. +func mutationsProto(ms []*Mutation) ([]*sppb.Mutation, error) { + l := make([]*sppb.Mutation, 0, len(ms)) + for _, m := range ms { + pb, err := m.proto() + if err != nil { + return nil, err + } + l = append(l, pb) + } + return l, nil +} diff --git a/vendor/cloud.google.com/go/spanner/mutation_test.go b/vendor/cloud.google.com/go/spanner/mutation_test.go new file mode 100644 index 0000000..d57d4dd --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/mutation_test.go @@ -0,0 +1,571 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "sort" + "strings" + "testing" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// keysetProto returns protobuf encoding of valid spanner.KeySet. +func keysetProto(t *testing.T, ks KeySet) *sppb.KeySet { + k, err := ks.keySetProto() + if err != nil { + t.Fatalf("cannot convert keyset %v to protobuf: %v", ks, err) + } + return k +} + +// Test encoding from spanner.Mutation to protobuf. +func TestMutationToProto(t *testing.T) { + for i, test := range []struct { + m *Mutation + want *sppb.Mutation + }{ + // Delete Mutation + { + &Mutation{opDelete, "t_foo", Key{"foo"}, nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_foo", + KeySet: keysetProto(t, Key{"foo"}), + }, + }, + }, + }, + // Insert Mutation + { + &Mutation{opInsert, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{intProto(1), intProto(2)}, + }, + }, + }, + }, + }, + }, + // InsertOrUpdate Mutation + { + &Mutation{opInsertOrUpdate, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{floatProto(1.0), floatProto(2.0)}, + }, + }, + }, + }, + }, + }, + // Replace Mutation + { + &Mutation{opReplace, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{stringProto("one"), floatProto(2.0)}, + }, + }, + }, + }, + }, + }, + // Update Mutation + { + &Mutation{opUpdate, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{stringProto("one"), nullProto()}, + }, + }, + }, + }, + }, + }, + } { + if got, err := test.m.proto(); err != nil || !testEqual(got, test.want) { + t.Errorf("%d: (%#v).proto() = (%v, %v), want (%v, nil)", i, test.m, got, err, test.want) + } + } +} + +// mutationColumnSorter implements sort.Interface for sorting column-value pairs in a Mutation by column names. +type mutationColumnSorter struct { + Mutation +} + +// newMutationColumnSorter creates new instance of mutationColumnSorter by duplicating the input Mutation so that +// sorting won't change the input Mutation. +func newMutationColumnSorter(m *Mutation) *mutationColumnSorter { + return &mutationColumnSorter{ + Mutation{ + m.op, + m.table, + m.keySet, + append([]string(nil), m.columns...), + append([]interface{}(nil), m.values...), + }, + } +} + +// Len implements sort.Interface.Len. +func (ms *mutationColumnSorter) Len() int { + return len(ms.columns) +} + +// Swap implements sort.Interface.Swap. +func (ms *mutationColumnSorter) Swap(i, j int) { + ms.columns[i], ms.columns[j] = ms.columns[j], ms.columns[i] + ms.values[i], ms.values[j] = ms.values[j], ms.values[i] +} + +// Less implements sort.Interface.Less. +func (ms *mutationColumnSorter) Less(i, j int) bool { + return strings.Compare(ms.columns[i], ms.columns[j]) < 0 +} + +// mutationEqual returns true if two mutations in question are equal +// to each other. +func mutationEqual(t *testing.T, m1, m2 Mutation) bool { + // Two mutations are considered to be equal even if their column values have different + // orders. + ms1 := newMutationColumnSorter(&m1) + ms2 := newMutationColumnSorter(&m2) + sort.Sort(ms1) + sort.Sort(ms2) + return testEqual(ms1, ms2) +} + +// Test helper functions which help to generate spanner.Mutation. +func TestMutationHelpers(t *testing.T) { + for _, test := range []struct { + m string + got *Mutation + want *Mutation + }{ + { + "Insert", + Insert("t_foo", []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}), + &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "InsertMap", + InsertMap("t_foo", map[string]interface{}{"col1": int64(1), "col2": int64(2)}), + &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "InsertStruct", + func() *Mutation { + m, err := InsertStruct( + "t_foo", + struct { + notCol bool + Col1 int64 `spanner:"col1"` + Col2 int64 `spanner:"col2"` + }{false, int64(1), int64(2)}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "Update", + Update("t_foo", []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}), + &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "UpdateMap", + UpdateMap("t_foo", map[string]interface{}{"col1": "one", "col2": []byte(nil)}), + &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "UpdateStruct", + func() *Mutation { + m, err := UpdateStruct( + "t_foo", + struct { + Col1 string `spanner:"col1"` + notCol int + Col2 []byte `spanner:"col2"` + }{"one", 1, nil}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "InsertOrUpdate", + InsertOrUpdate("t_foo", []string{"col1", "col2"}, []interface{}{1.0, 2.0}), + &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "InsertOrUpdateMap", + InsertOrUpdateMap("t_foo", map[string]interface{}{"col1": 1.0, "col2": 2.0}), + &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "InsertOrUpdateStruct", + func() *Mutation { + m, err := InsertOrUpdateStruct( + "t_foo", + struct { + Col1 float64 `spanner:"col1"` + Col2 float64 `spanner:"col2"` + notCol float64 + }{1.0, 2.0, 3.0}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "Replace", + Replace("t_foo", []string{"col1", "col2"}, []interface{}{"one", 2.0}), + &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "ReplaceMap", + ReplaceMap("t_foo", map[string]interface{}{"col1": "one", "col2": 2.0}), + &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "ReplaceStruct", + func() *Mutation { + m, err := ReplaceStruct( + "t_foo", + struct { + Col1 string `spanner:"col1"` + Col2 float64 `spanner:"col2"` + notCol string + }{"one", 2.0, "foo"}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "Delete", + Delete("t_foo", Key{"foo"}), + &Mutation{opDelete, "t_foo", Key{"foo"}, nil, nil}, + }, + { + "DeleteRange", + Delete("t_foo", KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}), + &Mutation{opDelete, "t_foo", KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}, nil, nil}, + }, + } { + if !mutationEqual(t, *test.got, *test.want) { + t.Errorf("%v: got Mutation %v, want %v", test.m, test.got, test.want) + } + } +} + +// Test encoding non-struct types by using *Struct helpers. +func TestBadStructs(t *testing.T) { + val := "i_am_not_a_struct" + wantErr := errNotStruct(val) + if _, gotErr := InsertStruct("t_test", val); !testEqual(gotErr, wantErr) { + t.Errorf("InsertStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := InsertOrUpdateStruct("t_test", val); !testEqual(gotErr, wantErr) { + t.Errorf("InsertOrUpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := UpdateStruct("t_test", val); !testEqual(gotErr, wantErr) { + t.Errorf("UpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := ReplaceStruct("t_test", val); !testEqual(gotErr, wantErr) { + t.Errorf("ReplaceStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } +} + +func TestStructToMutationParams(t *testing.T) { + // Tests cases not covered elsewhere. + type S struct{ F interface{} } + + for _, test := range []struct { + in interface{} + wantCols []string + wantVals []interface{} + wantErr error + }{ + {nil, nil, nil, errNotStruct(nil)}, + {3, nil, nil, errNotStruct(3)}, + {(*S)(nil), nil, nil, nil}, + {&S{F: 1}, []string{"F"}, []interface{}{1}, nil}, + {&S{F: CommitTimestamp}, []string{"F"}, []interface{}{CommitTimestamp}, nil}, + } { + gotCols, gotVals, gotErr := structToMutationParams(test.in) + if !testEqual(gotCols, test.wantCols) { + t.Errorf("%#v: got cols %v, want %v", test.in, gotCols, test.wantCols) + } + if !testEqual(gotVals, test.wantVals) { + t.Errorf("%#v: got vals %v, want %v", test.in, gotVals, test.wantVals) + } + if !testEqual(gotErr, test.wantErr) { + t.Errorf("%#v: got err %v, want %v", test.in, gotErr, test.wantErr) + } + } +} + +// Test encoding Mutation into proto. +func TestEncodeMutation(t *testing.T) { + for _, test := range []struct { + name string + mutation Mutation + wantProto *sppb.Mutation + wantErr error + }{ + { + "OpDelete", + Mutation{opDelete, "t_test", Key{1}, nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(intProto(1))}, + }, + }, + }, + }, + nil, + }, + { + "OpDelete - Key error", + Mutation{opDelete, "t_test", Key{struct{}{}}, nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{}, + }, + }, + }, + errInvdKeyPartType(struct{}{}), + }, + { + "OpInsert", + Mutation{opInsert, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpInsert - Value Type Error", + Mutation{opInsert, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpInsertOrUpdate", + Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpInsertOrUpdate - Value Type Error", + Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpReplace", + Mutation{opReplace, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpReplace - Value Type Error", + Mutation{opReplace, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpUpdate", + Mutation{opUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpUpdate - Value Type Error", + Mutation{opUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpKnown - Unknown Mutation Operation Code", + Mutation{op(100), "t_test", nil, nil, nil}, + &sppb.Mutation{}, + errInvdMutationOp(Mutation{op(100), "t_test", nil, nil, nil}), + }, + } { + gotProto, gotErr := test.mutation.proto() + if gotErr != nil { + if !testEqual(gotErr, test.wantErr) { + t.Errorf("%s: %v.proto() returns error %v, want %v", test.name, test.mutation, gotErr, test.wantErr) + } + continue + } + if !testEqual(gotProto, test.wantProto) { + t.Errorf("%s: %v.proto() = (%v, nil), want (%v, nil)", test.name, test.mutation, gotProto, test.wantProto) + } + } +} + +// Test Encoding an array of mutations. +func TestEncodeMutationArray(t *testing.T) { + for _, test := range []struct { + name string + ms []*Mutation + want []*sppb.Mutation + wantErr error + }{ + { + "Multiple Mutations", + []*Mutation{ + &Mutation{opDelete, "t_test", Key{"bar"}, nil, nil}, + &Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + }, + []*sppb.Mutation{ + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(stringProto("bar"))}, + }, + }, + }, + }, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + }, + nil, + }, + { + "Multiple Mutations - Bad Mutation", + []*Mutation{ + &Mutation{opDelete, "t_test", Key{"bar"}, nil, nil}, + &Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", struct{}{}}}, + }, + []*sppb.Mutation{}, + errEncoderUnsupportedType(struct{}{}), + }, + } { + gotProto, gotErr := mutationsProto(test.ms) + if gotErr != nil { + if !testEqual(gotErr, test.wantErr) { + t.Errorf("%v: mutationsProto(%v) returns error %v, want %v", test.name, test.ms, gotErr, test.wantErr) + } + continue + } + if !testEqual(gotProto, test.want) { + t.Errorf("%v: mutationsProto(%v) = (%v, nil), want (%v, nil)", test.name, test.ms, gotProto, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/not_appengine.go b/vendor/cloud.google.com/go/spanner/not_appengine.go new file mode 100644 index 0000000..2ef265d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/not_appengine.go @@ -0,0 +1,20 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !appengine + +package spanner + +// numChannels is the default value for NumChannels of client +const numChannels = 4 diff --git a/vendor/cloud.google.com/go/spanner/not_go18.go b/vendor/cloud.google.com/go/spanner/not_go18.go new file mode 100644 index 0000000..c9e62a1 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/not_go18.go @@ -0,0 +1,31 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package spanner + +import "golang.org/x/net/context" + +// OpenCensus only supports go 1.8 and higher. + +func traceStartSpan(ctx context.Context, _ string) context.Context { + return ctx +} + +func traceEndSpan(context.Context, error) { +} + +func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) { +} diff --git a/vendor/cloud.google.com/go/spanner/oc_test.go b/vendor/cloud.google.com/go/spanner/oc_test.go new file mode 100644 index 0000000..8ff0b3e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/oc_test.go @@ -0,0 +1,54 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package spanner + +import ( + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + stestutil "cloud.google.com/go/spanner/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +// Check that stats are being exported. +func TestOCStats(t *testing.T) { + te := testutil.NewTestExporter() + defer te.Unregister() + + ms := stestutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + ctx := context.Background() + c, err := NewClient(ctx, "projects/P/instances/I/databases/D", + option.WithEndpoint(ms.Addr()), + option.WithGRPCDialOption(grpc.WithInsecure()), + option.WithoutAuthentication()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + c.Single().ReadRow(ctx, "Users", Key{"alice"}, []string{"email"}) + // Wait until we see data from the view. + select { + case <-te.Stats: + case <-time.After(1 * time.Second): + t.Fatal("no stats were exported before timeout") + } +} diff --git a/vendor/cloud.google.com/go/spanner/protoutils.go b/vendor/cloud.google.com/go/spanner/protoutils.go new file mode 100644 index 0000000..a6fcdd7 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/protoutils.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "strconv" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Helpers to generate protobuf values and Cloud Spanner types. + +func stringProto(s string) *proto3.Value { + return &proto3.Value{Kind: stringKind(s)} +} + +func stringKind(s string) *proto3.Value_StringValue { + return &proto3.Value_StringValue{StringValue: s} +} + +func stringType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_STRING} +} + +func boolProto(b bool) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_BoolValue{BoolValue: b}} +} + +func boolType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_BOOL} +} + +func intProto(n int64) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: strconv.FormatInt(n, 10)}} +} + +func intType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_INT64} +} + +func floatProto(n float64) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_NumberValue{NumberValue: n}} +} + +func floatType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_FLOAT64} +} + +func bytesProto(b []byte) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: base64.StdEncoding.EncodeToString(b)}} +} + +func bytesType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_BYTES} +} + +func timeProto(t time.Time) *proto3.Value { + return stringProto(t.UTC().Format(time.RFC3339Nano)) +} + +func timeType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_TIMESTAMP} +} + +func dateProto(d civil.Date) *proto3.Value { + return stringProto(d.String()) +} + +func dateType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_DATE} +} + +func listProto(p ...*proto3.Value) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_ListValue{ListValue: &proto3.ListValue{Values: p}}} +} + +func listValueProto(p ...*proto3.Value) *proto3.ListValue { + return &proto3.ListValue{Values: p} +} + +func listType(t *sppb.Type) *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_ARRAY, ArrayElementType: t} +} + +func mkField(n string, t *sppb.Type) *sppb.StructType_Field { + return &sppb.StructType_Field{Name: n, Type: t} +} + +func structType(fields ...*sppb.StructType_Field) *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_STRUCT, StructType: &sppb.StructType{Fields: fields}} +} + +func nullProto() *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}} +} diff --git a/vendor/cloud.google.com/go/spanner/read.go b/vendor/cloud.google.com/go/spanner/read.go new file mode 100644 index 0000000..645c84d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/read.go @@ -0,0 +1,704 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "io" + "log" + "sync/atomic" + "time" + + "cloud.google.com/go/internal/protostruct" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// streamingReceiver is the interface for receiving data from a client side +// stream. +type streamingReceiver interface { + Recv() (*sppb.PartialResultSet, error) +} + +// errEarlyReadEnd returns error for read finishes when gRPC stream is still active. +func errEarlyReadEnd() error { + return spannerErrorf(codes.FailedPrecondition, "read completed with active stream") +} + +// stream is the internal fault tolerant method for streaming data from +// Cloud Spanner. +func stream(ctx context.Context, rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error), setTimestamp func(time.Time), release func(error)) *RowIterator { + ctx, cancel := context.WithCancel(ctx) + ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.RowIterator") + return &RowIterator{ + streamd: newResumableStreamDecoder(ctx, rpc), + rowd: &partialResultSetDecoder{}, + setTimestamp: setTimestamp, + release: release, + cancel: cancel, + } +} + +// RowIterator is an iterator over Rows. +type RowIterator struct { + // The plan for the query. Available after RowIterator.Next returns iterator.Done + // if QueryWithStats was called. + QueryPlan *sppb.QueryPlan + + // Execution statistics for the query. Available after RowIterator.Next returns iterator.Done + // if QueryWithStats was called. + QueryStats map[string]interface{} + + streamd *resumableStreamDecoder + rowd *partialResultSetDecoder + setTimestamp func(time.Time) + release func(error) + cancel func() + err error + rows []*Row +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns Done, all subsequent calls +// will return Done. +func (r *RowIterator) Next() (*Row, error) { + if r.err != nil { + return nil, r.err + } + for len(r.rows) == 0 && r.streamd.next() { + prs := r.streamd.get() + if prs.Stats != nil { + r.QueryPlan = prs.Stats.QueryPlan + r.QueryStats = protostruct.DecodeToMap(prs.Stats.QueryStats) + } + r.rows, r.err = r.rowd.add(prs) + if r.err != nil { + return nil, r.err + } + if !r.rowd.ts.IsZero() && r.setTimestamp != nil { + r.setTimestamp(r.rowd.ts) + r.setTimestamp = nil + } + } + if len(r.rows) > 0 { + row := r.rows[0] + r.rows = r.rows[1:] + return row, nil + } + if err := r.streamd.lastErr(); err != nil { + r.err = toSpannerError(err) + } else if !r.rowd.done() { + r.err = errEarlyReadEnd() + } else { + r.err = iterator.Done + } + return nil, r.err +} + +// Do calls the provided function once in sequence for each row in the iteration. If the +// function returns a non-nil error, Do immediately returns that error. +// +// If there are no rows in the iterator, Do will return nil without calling the +// provided function. +// +// Do always calls Stop on the iterator. +func (r *RowIterator) Do(f func(r *Row) error) error { + defer r.Stop() + for { + row, err := r.Next() + switch err { + case iterator.Done: + return nil + case nil: + if err = f(row); err != nil { + return err + } + default: + return err + } + } +} + +// Stop terminates the iteration. It should be called after every iteration. +func (r *RowIterator) Stop() { + if r.streamd != nil { + defer traceEndSpan(r.streamd.ctx, r.err) + } + if r.cancel != nil { + r.cancel() + } + if r.release != nil { + r.release(r.err) + if r.err == nil { + r.err = spannerErrorf(codes.FailedPrecondition, "Next called after Stop") + } + r.release = nil + + } +} + +// partialResultQueue implements a simple FIFO queue. The zero value is a +// valid queue. +type partialResultQueue struct { + q []*sppb.PartialResultSet + first int + last int + n int // number of elements in queue +} + +// empty returns if the partialResultQueue is empty. +func (q *partialResultQueue) empty() bool { + return q.n == 0 +} + +// errEmptyQueue returns error for dequeuing an empty queue. +func errEmptyQueue() error { + return spannerErrorf(codes.OutOfRange, "empty partialResultQueue") +} + +// peekLast returns the last item in partialResultQueue; if the queue +// is empty, it returns error. +func (q *partialResultQueue) peekLast() (*sppb.PartialResultSet, error) { + if q.empty() { + return nil, errEmptyQueue() + } + return q.q[(q.last+cap(q.q)-1)%cap(q.q)], nil +} + +// push adds an item to the tail of partialResultQueue. +func (q *partialResultQueue) push(r *sppb.PartialResultSet) { + if q.q == nil { + q.q = make([]*sppb.PartialResultSet, 8 /* arbitrary */) + } + if q.n == cap(q.q) { + buf := make([]*sppb.PartialResultSet, cap(q.q)*2) + for i := 0; i < q.n; i++ { + buf[i] = q.q[(q.first+i)%cap(q.q)] + } + q.q = buf + q.first = 0 + q.last = q.n + } + q.q[q.last] = r + q.last = (q.last + 1) % cap(q.q) + q.n++ +} + +// pop removes an item from the head of partialResultQueue and returns +// it. +func (q *partialResultQueue) pop() *sppb.PartialResultSet { + if q.n == 0 { + return nil + } + r := q.q[q.first] + q.q[q.first] = nil + q.first = (q.first + 1) % cap(q.q) + q.n-- + return r +} + +// clear empties partialResultQueue. +func (q *partialResultQueue) clear() { + *q = partialResultQueue{} +} + +// dump retrieves all items from partialResultQueue and return them in a slice. +// It is used only in tests. +func (q *partialResultQueue) dump() []*sppb.PartialResultSet { + var dq []*sppb.PartialResultSet + for i := q.first; len(dq) < q.n; i = (i + 1) % cap(q.q) { + dq = append(dq, q.q[i]) + } + return dq +} + +// resumableStreamDecoderState encodes resumableStreamDecoder's status. +// See also the comments for resumableStreamDecoder.Next. +type resumableStreamDecoderState int + +const ( + unConnected resumableStreamDecoderState = iota // 0 + queueingRetryable // 1 + queueingUnretryable // 2 + aborted // 3 + finished // 4 +) + +// resumableStreamDecoder provides a resumable interface for receiving +// sppb.PartialResultSet(s) from a given query wrapped by +// resumableStreamDecoder.rpc(). +type resumableStreamDecoder struct { + // state is the current status of resumableStreamDecoder, see also + // the comments for resumableStreamDecoder.Next. + state resumableStreamDecoderState + // stateWitness when non-nil is called to observe state change, + // used for testing. + stateWitness func(resumableStreamDecoderState) + // ctx is the caller's context, used for cancel/timeout Next(). + ctx context.Context + // rpc is a factory of streamingReceiver, which might resume + // a pervious stream from the point encoded in restartToken. + // rpc is always a wrapper of a Cloud Spanner query which is + // resumable. + rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error) + // stream is the current RPC streaming receiver. + stream streamingReceiver + // q buffers received yet undecoded partial results. + q partialResultQueue + // bytesBetweenResumeTokens is the proxy of the byte size of PartialResultSets being queued + // between two resume tokens. Once bytesBetweenResumeTokens is greater than + // maxBytesBetweenResumeTokens, resumableStreamDecoder goes into queueingUnretryable state. + bytesBetweenResumeTokens int32 + // maxBytesBetweenResumeTokens is the max number of bytes that can be buffered + // between two resume tokens. It is always copied from the global maxBytesBetweenResumeTokens + // atomically. + maxBytesBetweenResumeTokens int32 + // np is the next sppb.PartialResultSet ready to be returned + // to caller of resumableStreamDecoder.Get(). + np *sppb.PartialResultSet + // resumeToken stores the resume token that resumableStreamDecoder has + // last revealed to caller. + resumeToken []byte + // retryCount is the number of retries that have been carried out so far + retryCount int + // err is the last error resumableStreamDecoder has encountered so far. + err error + // backoff to compute delays between retries. + backoff exponentialBackoff +} + +// newResumableStreamDecoder creates a new resumeableStreamDecoder instance. +// Parameter rpc should be a function that creates a new stream +// beginning at the restartToken if non-nil. +func newResumableStreamDecoder(ctx context.Context, rpc func(ct context.Context, restartToken []byte) (streamingReceiver, error)) *resumableStreamDecoder { + return &resumableStreamDecoder{ + ctx: ctx, + rpc: rpc, + maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens), + backoff: defaultBackoff, + } +} + +// changeState fulfills state transition for resumableStateDecoder. +func (d *resumableStreamDecoder) changeState(target resumableStreamDecoderState) { + if d.state == queueingRetryable && d.state != target { + // Reset bytesBetweenResumeTokens because it is only meaningful/changed under + // queueingRetryable state. + d.bytesBetweenResumeTokens = 0 + } + d.state = target + if d.stateWitness != nil { + d.stateWitness(target) + } +} + +// isNewResumeToken returns if the observed resume token is different from +// the one returned from server last time. +func (d *resumableStreamDecoder) isNewResumeToken(rt []byte) bool { + if rt == nil { + return false + } + if bytes.Compare(rt, d.resumeToken) == 0 { + return false + } + return true +} + +// Next advances to the next available partial result set. If error or no +// more, returns false, call Err to determine if an error was encountered. +// The following diagram illustrates the state machine of resumableStreamDecoder +// that Next() implements. Note that state transition can be only triggered by +// RPC activities. +/* + rpc() fails retryable + +---------+ + | | rpc() fails unretryable/ctx timeouts or cancelled + | | +------------------------------------------------+ + | | | | + | v | v + | +---+---+---+ +--------+ +------+--+ + +-----+unConnected| |finished| | aborted |<----+ + | | ++-----+-+ +------+--+ | + +---+----+--+ ^ ^ ^ | + | ^ | | | | + | | | | recv() fails | + | | | | | | + | |recv() fails retryable | | | | + | |with valid ctx | | | | + | | | | | | + rpc() succeeds | +-----------------------+ | | | + | | | recv EOF recv EOF | | + | | | | | | + v | | Queue size exceeds | | | + +---+----+---+----+threshold +-------+-----------+ | | ++---------->+ +--------------->+ +-+ | +| |queueingRetryable| |queueingUnretryable| | +| | +<---------------+ | | +| +---+----------+--+ pop() returns +--+----+-----------+ | +| | | resume token | ^ | +| | | | | | +| | | | | | ++---------------+ | | | | + recv() succeeds | +----+ | + | recv() succeeds | + | | + | | + | | + | | + | | + +--------------------------------------------------+ + recv() fails unretryable + +*/ +var ( + // maxBytesBetweenResumeTokens is the maximum amount of bytes that resumableStreamDecoder + // in queueingRetryable state can use to queue PartialResultSets before getting + // into queueingUnretryable state. + maxBytesBetweenResumeTokens = int32(128 * 1024 * 1024) +) + +func (d *resumableStreamDecoder) next() bool { + for { + select { + case <-d.ctx.Done(): + // Do context check here so that even gRPC failed to do + // so, resumableStreamDecoder can still break the loop + // as expected. + d.err = errContextCanceled(d.ctx, d.err) + d.changeState(aborted) + default: + } + switch d.state { + case unConnected: + // If no gRPC stream is available, try to initiate one. + if d.stream, d.err = d.rpc(d.ctx, d.resumeToken); d.err != nil { + if isRetryable(d.err) { + d.doBackOff() + // Be explicit about state transition, although the + // state doesn't actually change. State transition + // will be triggered only by RPC activity, regardless of + // whether there is an actual state change or not. + d.changeState(unConnected) + continue + } + d.changeState(aborted) + continue + } + d.resetBackOff() + d.changeState(queueingRetryable) + continue + case queueingRetryable: + fallthrough + case queueingUnretryable: + // Receiving queue is not empty. + last, err := d.q.peekLast() + if err != nil { + // Only the case that receiving queue is empty could cause peekLast to + // return error and in such case, we should try to receive from stream. + d.tryRecv() + continue + } + if d.isNewResumeToken(last.ResumeToken) { + // Got new resume token, return buffered sppb.PartialResultSets to caller. + d.np = d.q.pop() + if d.q.empty() { + d.bytesBetweenResumeTokens = 0 + // The new resume token was just popped out from queue, record it. + d.resumeToken = d.np.ResumeToken + d.changeState(queueingRetryable) + } + return true + } + if d.bytesBetweenResumeTokens >= d.maxBytesBetweenResumeTokens && d.state == queueingRetryable { + d.changeState(queueingUnretryable) + continue + } + if d.state == queueingUnretryable { + // When there is no resume token observed, + // only yield sppb.PartialResultSets to caller under + // queueingUnretryable state. + d.np = d.q.pop() + return true + } + // Needs to receive more from gRPC stream till a new resume token + // is observed. + d.tryRecv() + continue + case aborted: + // Discard all pending items because none of them + // should be yield to caller. + d.q.clear() + return false + case finished: + // If query has finished, check if there are still buffered messages. + if d.q.empty() { + // No buffered PartialResultSet. + return false + } + // Although query has finished, there are still buffered PartialResultSets. + d.np = d.q.pop() + return true + + default: + log.Printf("Unexpected resumableStreamDecoder.state: %v", d.state) + return false + } + } +} + +// tryRecv attempts to receive a PartialResultSet from gRPC stream. +func (d *resumableStreamDecoder) tryRecv() { + var res *sppb.PartialResultSet + if res, d.err = d.stream.Recv(); d.err != nil { + if d.err == io.EOF { + d.err = nil + d.changeState(finished) + return + } + if isRetryable(d.err) && d.state == queueingRetryable { + d.err = nil + // Discard all queue items (none have resume tokens). + d.q.clear() + d.stream = nil + d.changeState(unConnected) + d.doBackOff() + return + } + d.changeState(aborted) + return + } + d.q.push(res) + if d.state == queueingRetryable && !d.isNewResumeToken(res.ResumeToken) { + // adjusting d.bytesBetweenResumeTokens + d.bytesBetweenResumeTokens += int32(proto.Size(res)) + } + d.resetBackOff() + d.changeState(d.state) +} + +// resetBackOff clears the internal retry counter of +// resumableStreamDecoder so that the next exponential +// backoff will start at a fresh state. +func (d *resumableStreamDecoder) resetBackOff() { + d.retryCount = 0 +} + +// doBackoff does an exponential backoff sleep. +func (d *resumableStreamDecoder) doBackOff() { + delay := d.backoff.delay(d.retryCount) + tracePrintf(d.ctx, nil, "Backing off stream read for %s", delay) + ticker := time.NewTicker(delay) + defer ticker.Stop() + d.retryCount++ + select { + case <-d.ctx.Done(): + case <-ticker.C: + } +} + +// get returns the most recent PartialResultSet generated by a call to next. +func (d *resumableStreamDecoder) get() *sppb.PartialResultSet { + return d.np +} + +// lastErr returns the last non-EOF error encountered. +func (d *resumableStreamDecoder) lastErr() error { + return d.err +} + +// partialResultSetDecoder assembles PartialResultSet(s) into Cloud Spanner +// Rows. +type partialResultSetDecoder struct { + row Row + tx *sppb.Transaction + chunked bool // if true, next value should be merged with last values entry. + ts time.Time // read timestamp +} + +// yield checks we have a complete row, and if so returns it. A row is not +// complete if it doesn't have enough columns, or if this is a chunked response +// and there are no further values to process. +func (p *partialResultSetDecoder) yield(chunked, last bool) *Row { + if len(p.row.vals) == len(p.row.fields) && (!chunked || !last) { + // When partialResultSetDecoder gets enough number of + // Column values, There are two cases that a new Row + // should be yield: + // 1. The incoming PartialResultSet is not chunked; + // 2. The incoming PartialResultSet is chunked, but the + // proto3.Value being merged is not the last one in + // the PartialResultSet. + // + // Use a fresh Row to simplify clients that want to use yielded results + // after the next row is retrieved. Note that fields is never changed + // so it doesn't need to be copied. + fresh := Row{ + fields: p.row.fields, + vals: make([]*proto3.Value, len(p.row.vals)), + } + copy(fresh.vals, p.row.vals) + p.row.vals = p.row.vals[:0] // empty and reuse slice + return &fresh + } + return nil +} + +// yieldTx returns transaction information via caller supplied callback. +func errChunkedEmptyRow() error { + return spannerErrorf(codes.FailedPrecondition, "got invalid chunked PartialResultSet with empty Row") +} + +// add tries to merge a new PartialResultSet into buffered Row. It returns +// any rows that have been completed as a result. +func (p *partialResultSetDecoder) add(r *sppb.PartialResultSet) ([]*Row, error) { + var rows []*Row + if r.Metadata != nil { + // Metadata should only be returned in the first result. + if p.row.fields == nil { + p.row.fields = r.Metadata.RowType.Fields + } + if p.tx == nil && r.Metadata.Transaction != nil { + p.tx = r.Metadata.Transaction + if p.tx.ReadTimestamp != nil { + p.ts = time.Unix(p.tx.ReadTimestamp.Seconds, int64(p.tx.ReadTimestamp.Nanos)) + } + } + } + if len(r.Values) == 0 { + return nil, nil + } + if p.chunked { + p.chunked = false + // Try to merge first value in r.Values into + // uncompleted row. + last := len(p.row.vals) - 1 + if last < 0 { // sanity check + return nil, errChunkedEmptyRow() + } + var err error + // If p is chunked, then we should always try to merge p.last with r.first. + if p.row.vals[last], err = p.merge(p.row.vals[last], r.Values[0]); err != nil { + return nil, err + } + r.Values = r.Values[1:] + // Merge is done, try to yield a complete Row. + if row := p.yield(r.ChunkedValue, len(r.Values) == 0); row != nil { + rows = append(rows, row) + } + } + for i, v := range r.Values { + // The rest values in r can be appened into p directly. + p.row.vals = append(p.row.vals, v) + // Again, check to see if a complete Row can be yielded because of + // the newly added value. + if row := p.yield(r.ChunkedValue, i == len(r.Values)-1); row != nil { + rows = append(rows, row) + } + } + if r.ChunkedValue { + // After dealing with all values in r, if r is chunked then p must + // be also chunked. + p.chunked = true + } + return rows, nil +} + +// isMergeable returns if a protobuf Value can be potentially merged with +// other protobuf Values. +func (p *partialResultSetDecoder) isMergeable(a *proto3.Value) bool { + switch a.Kind.(type) { + case *proto3.Value_StringValue: + return true + case *proto3.Value_ListValue: + return true + default: + return false + } +} + +// errIncompatibleMergeTypes returns error for incompatible protobuf types +// that cannot be merged by partialResultSetDecoder. +func errIncompatibleMergeTypes(a, b *proto3.Value) error { + return spannerErrorf(codes.FailedPrecondition, "incompatible type in chunked PartialResultSet. expected (%T), got (%T)", a.Kind, b.Kind) +} + +// errUnsupportedMergeType returns error for protobuf type that cannot be +// merged to other protobufs. +func errUnsupportedMergeType(a *proto3.Value) error { + return spannerErrorf(codes.FailedPrecondition, "unsupported type merge (%T)", a.Kind) +} + +// merge tries to combine two protobuf Values if possible. +func (p *partialResultSetDecoder) merge(a, b *proto3.Value) (*proto3.Value, error) { + var err error + typeErr := errIncompatibleMergeTypes(a, b) + switch t := a.Kind.(type) { + case *proto3.Value_StringValue: + s, ok := b.Kind.(*proto3.Value_StringValue) + if !ok { + return nil, typeErr + } + return &proto3.Value{ + Kind: &proto3.Value_StringValue{StringValue: t.StringValue + s.StringValue}, + }, nil + case *proto3.Value_ListValue: + l, ok := b.Kind.(*proto3.Value_ListValue) + if !ok { + return nil, typeErr + } + if l.ListValue == nil || len(l.ListValue.Values) <= 0 { + // b is an empty list, just return a. + return a, nil + } + if t.ListValue == nil || len(t.ListValue.Values) <= 0 { + // a is an empty list, just return b. + return b, nil + } + if la := len(t.ListValue.Values) - 1; p.isMergeable(t.ListValue.Values[la]) { + // When the last item in a is of type String, + // List or Struct(encoded into List by Cloud Spanner), + // try to Merge last item in a and first item in b. + t.ListValue.Values[la], err = p.merge(t.ListValue.Values[la], l.ListValue.Values[0]) + if err != nil { + return nil, err + } + l.ListValue.Values = l.ListValue.Values[1:] + } + return &proto3.Value{ + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: append(t.ListValue.Values, l.ListValue.Values...), + }, + }, + }, nil + default: + return nil, errUnsupportedMergeType(a) + } + +} + +// Done returns if partialResultSetDecoder has already done with all buffered +// values. +func (p *partialResultSetDecoder) done() bool { + // There is no explicit end of stream marker, but ending part way + // through a row is obviously bad, or ending with the last column still + // awaiting completion. + return len(p.row.vals) == 0 && !p.chunked +} diff --git a/vendor/cloud.google.com/go/spanner/read_test.go b/vendor/cloud.google.com/go/spanner/read_test.go new file mode 100644 index 0000000..1a2e693 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/read_test.go @@ -0,0 +1,1733 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + "io" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/status" + + "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + + "cloud.google.com/go/spanner/internal/testutil" + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // Mocked transaction timestamp. + trxTs = time.Unix(1, 2) + // Metadata for mocked KV table, its rows are returned by SingleUse transactions. + kvMeta = func() *sppb.ResultSetMetadata { + meta := testutil.KvMeta + meta.Transaction = &sppb.Transaction{ + ReadTimestamp: timestampProto(trxTs), + } + return &meta + }() + // Metadata for mocked ListKV table, which uses List for its key and value. + // Its rows are returned by snapshot readonly transactions, as indicated in the transaction metadata. + kvListMeta = &sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Key", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + { + Name: "Value", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + Transaction: &sppb.Transaction{ + Id: transactionID{5, 6, 7, 8, 9}, + ReadTimestamp: timestampProto(trxTs), + }, + } + // Metadata for mocked schema of a query result set, which has two struct + // columns named "Col1" and "Col2", the struct's schema is like the + // following: + // + // STRUCT { + // INT + // LIST + // } + // + // Its rows are returned in readwrite transaction, as indicated in the transaction metadata. + kvObjectMeta = &sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Col1", + Type: &sppb.Type{ + Code: sppb.TypeCode_STRUCT, + StructType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "foo-f1", + Type: &sppb.Type{ + Code: sppb.TypeCode_INT64, + }, + }, + { + Name: "foo-f2", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + }, + }, + { + Name: "Col2", + Type: &sppb.Type{ + Code: sppb.TypeCode_STRUCT, + StructType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "bar-f1", + Type: &sppb.Type{ + Code: sppb.TypeCode_INT64, + }, + }, + { + Name: "bar-f2", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Transaction: &sppb.Transaction{ + Id: transactionID{1, 2, 3, 4, 5}, + }, + } +) + +// String implements fmt.stringer. +func (r *Row) String() string { + return fmt.Sprintf("{fields: %s, val: %s}", r.fields, r.vals) +} + +func describeRows(l []*Row) string { + // generate a nice test failure description + var s = "[" + for i, r := range l { + if i != 0 { + s += ",\n " + } + s += fmt.Sprint(r) + } + s += "]" + return s +} + +// Helper for generating proto3 Value_ListValue instances, making +// test code shorter and readable. +func genProtoListValue(v ...string) *proto3.Value_ListValue { + r := &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{}, + }, + } + for _, e := range v { + r.ListValue.Values = append( + r.ListValue.Values, + &proto3.Value{ + Kind: &proto3.Value_StringValue{StringValue: e}, + }, + ) + } + return r +} + +// Test Row generation logics of partialResultSetDecoder. +func TestPartialResultSetDecoder(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + var tests = []struct { + input []*sppb.PartialResultSet + wantF []*Row + wantTxID transactionID + wantTs time.Time + wantD bool + }{ + { + // Empty input. + wantD: true, + }, + // String merging examples. + { + // Single KV result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Incomplete partial result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + wantTs: trxTs, + wantD: false, + }, + { + // Complete splitted result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Multi-row example with splitted row in the middle. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + {Kind: &proto3.Value_StringValue{StringValue: "A"}}, + }, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "1"}}, + {Kind: &proto3.Value_StringValue{StringValue: "B"}}, + {Kind: &proto3.Value_StringValue{StringValue: "2"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "A"}}, + {Kind: &proto3.Value_StringValue{StringValue: "1"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "B"}}, + {Kind: &proto3.Value_StringValue{StringValue: "2"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Merging example in result_set.proto. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "W"}}, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "orl"}}, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "d"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "World"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // More complex example showing completing a merge and + // starting a new merge in the same partialResultSet. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "W"}}, // start split in value + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "orld"}}, // complete value + {Kind: &proto3.Value_StringValue{StringValue: "i"}}, // start split in key + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "s"}}, // complete key + {Kind: &proto3.Value_StringValue{StringValue: "not"}}, + {Kind: &proto3.Value_StringValue{StringValue: "a"}}, + {Kind: &proto3.Value_StringValue{StringValue: "qu"}}, // split in value + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "estion"}}, // complete value + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "World"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "is"}}, + {Kind: &proto3.Value_StringValue{StringValue: "not"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "a"}}, + {Kind: &proto3.Value_StringValue{StringValue: "question"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + // List merging examples. + { + // Non-splitting Lists. + input: []*sppb.PartialResultSet{ + { + Metadata: kvListMeta, + Values: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + }, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvListMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantTxID: transactionID{5, 6, 7, 8, 9}, + wantTs: trxTs, + wantD: true, + }, + { + // Simple List merge case: splitted string element. + input: []*sppb.PartialResultSet{ + { + Metadata: kvListMeta, + Values: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-"), + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("2"), + }, + }, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvListMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantTxID: transactionID{5, 6, 7, 8, 9}, + wantTs: trxTs, + wantD: true, + }, + { + // Struct merging is also implemented by List merging. Note that + // Cloud Spanner uses proto.ListValue to encode Structs as well. + input: []*sppb.PartialResultSet{ + { + Metadata: kvObjectMeta, + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, + {Kind: genProtoListValue("foo-1", "fo")}, + }, + }, + }, + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: genProtoListValue("o-2", "f")}, + }, + }, + }, + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: genProtoListValue("oo-3")}, + }, + }, + }, + }, + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, + {Kind: genProtoListValue("bar-1")}, + }, + }, + }, + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvObjectMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, + {Kind: genProtoListValue("foo-1", "foo-2", "foo-3")}, + }, + }, + }, + }, + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, + {Kind: genProtoListValue("bar-1")}, + }, + }, + }, + }, + }, + }, + }, + wantTxID: transactionID{1, 2, 3, 4, 5}, + wantD: true, + }, + } + +nextTest: + for i, test := range tests { + var rows []*Row + p := &partialResultSetDecoder{} + for j, v := range test.input { + rs, err := p.add(v) + if err != nil { + t.Errorf("test %d.%d: partialResultSetDecoder.add(%v) = %v; want nil", i, j, v, err) + continue nextTest + } + rows = append(rows, rs...) + } + if !testEqual(p.ts, test.wantTs) { + t.Errorf("got transaction(%v), want %v", p.ts, test.wantTs) + } + if !testEqual(rows, test.wantF) { + t.Errorf("test %d: rows=\n%v\n; want\n%v\n; p.row:\n%v\n", i, describeRows(rows), describeRows(test.wantF), p.row) + } + if got := p.done(); got != test.wantD { + t.Errorf("test %d: partialResultSetDecoder.done() = %v", i, got) + } + } +} + +const ( + maxBuffers = 16 // max number of PartialResultSets that will be buffered in tests. +) + +// setMaxBytesBetweenResumeTokens sets the global maxBytesBetweenResumeTokens to a smaller +// value more suitable for tests. It returns a function which should be called to restore +// the maxBytesBetweenResumeTokens to its old value +func setMaxBytesBetweenResumeTokens() func() { + o := atomic.LoadInt32(&maxBytesBetweenResumeTokens) + atomic.StoreInt32(&maxBytesBetweenResumeTokens, int32(maxBuffers*proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }))) + return func() { + atomic.StoreInt32(&maxBytesBetweenResumeTokens, o) + } +} + +// keyStr generates key string for kvMeta schema. +func keyStr(i int) string { + return fmt.Sprintf("foo-%02d", i) +} + +// valStr generates value string for kvMeta schema. +func valStr(i int) string { + return fmt.Sprintf("bar-%02d", i) +} + +// Test state transitions of resumableStreamDecoder where state machine +// ends up to a non-blocking state(resumableStreamDecoder.Next returns +// on non-blocking state). +func TestRsdNonblockingStates(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + tests := []struct { + name string + msgs []testutil.MockCtlMsg + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + sql string + // Expected values + want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller + queue []*sppb.PartialResultSet // PartialResultSets that should be buffered + resumeToken []byte // Resume token that is maintained by resumableStreamDecoder + stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder + wantErr error + }{ + { + // unConnected->queueingRetryable->finished + name: "unConnected->queueingRetryable->finished", + msgs: []testutil.MockCtlMsg{ + {}, + {}, + {Err: io.EOF, ResumeToken: false}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + }, + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + }, + }, + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + finished, // got EOF + }, + }, + { + // unConnected->queueingRetryable->aborted + name: "unConnected->queueingRetryable->aborted", + msgs: []testutil.MockCtlMsg{ + {}, + {Err: nil, ResumeToken: true}, + {}, + {Err: errors.New("I quit"), ResumeToken: false}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + ResumeToken: testutil.EncodeResumeToken(1), + }, + }, + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + queueingRetryable, // foo-01, resume token + queueingRetryable, // got foo-02 + aborted, // got error + }, + wantErr: status.Errorf(codes.Unknown, "I quit"), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable + name: "unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers+1; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers+1; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // the internal queue of resumableStreamDecoder fills up + } + // the first item fills up the queue and triggers state transition; + // the second item is received under queueingUnretryable state. + s = append(s, queueingUnretryable) + s = append(s, queueingUnretryable) + return s + }(), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->aborted + name: "unConnected->queueingRetryable->queueingUnretryable->aborted", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: errors.New("Just Abort It"), ResumeToken: false}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up + } + s = append(s, queueingUnretryable) // the last row triggers state change + s = append(s, aborted) // Error happens + return s + }(), + wantErr: status.Errorf(codes.Unknown, "Just Abort It"), + }, + } +nextTest: + for _, test := range tests { + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + mc := sppb.NewSpannerClient(dialMock(t, ms)) + if test.rpc == nil { + test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: test.sql, + ResumeToken: resumeToken, + }) + } + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + r := newResumableStreamDecoder( + ctx, + test.rpc, + ) + st := []resumableStreamDecoderState{} + var lastErr error + // Once the expected number of state transitions are observed, + // send a signal by setting stateDone = true. + stateDone := false + // Set stateWitness to listen to state changes. + hl := len(test.stateHistory) // To avoid data race on test. + r.stateWitness = func(rs resumableStreamDecoderState) { + if !stateDone { + // Record state transitions. + st = append(st, rs) + if len(st) == hl { + lastErr = r.lastErr() + stateDone = true + } + } + } + // Let mock server stream given messages to resumableStreamDecoder. + for _, m := range test.msgs { + ms.AddMsg(m.Err, m.ResumeToken) + } + var rs []*sppb.PartialResultSet + for { + select { + case <-ctx.Done(): + t.Errorf("context cancelled or timeout during test") + continue nextTest + default: + } + if stateDone { + // Check if resumableStreamDecoder carried out expected + // state transitions. + if !testEqual(st, test.stateHistory) { + t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", + test.name, st, test.stateHistory) + } + // Check if resumableStreamDecoder returns expected array of + // PartialResultSets. + if !testEqual(rs, test.want) { + t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) + } + // Verify that resumableStreamDecoder's internal buffering is also correct. + var q []*sppb.PartialResultSet + for { + item := r.q.pop() + if item == nil { + break + } + q = append(q, item) + } + if !testEqual(q, test.queue) { + t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) + } + // Verify resume token. + if test.resumeToken != nil && !testEqual(r.resumeToken, test.resumeToken) { + t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) + } + // Verify error message. + if !testEqual(lastErr, test.wantErr) { + t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) + } + // Proceed to next test + continue nextTest + } + // Receive next decoded item. + if r.next() { + rs = append(rs, r.get()) + } + } + } +} + +// Test state transitions of resumableStreamDecoder where state machine +// ends up to a blocking state(resumableStreamDecoder.Next blocks +// on blocking state). +func TestRsdBlockingStates(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + tests := []struct { + name string + msgs []testutil.MockCtlMsg + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + sql string + // Expected values + want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller + queue []*sppb.PartialResultSet // PartialResultSets that should be buffered + resumeToken []byte // Resume token that is maintained by resumableStreamDecoder + stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder + wantErr error + }{ + { + // unConnected -> unConnected + name: "unConnected -> unConnected", + rpc: func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return nil, status.Errorf(codes.Unavailable, "trust me: server is unavailable") + }, + sql: "SELECT * from t_whatever", + stateHistory: []resumableStreamDecoderState{unConnected, unConnected, unConnected}, + wantErr: status.Errorf(codes.Unavailable, "trust me: server is unavailable"), + }, + { + // unConnected -> queueingRetryable + name: "unConnected -> queueingRetryable", + sql: "SELECT t.key key, t.value value FROM t_mock t", + stateHistory: []resumableStreamDecoderState{queueingRetryable}, + }, + { + // unConnected->queueingRetryable->queueingRetryable + name: "unConnected->queueingRetryable->queueingRetryable", + msgs: []testutil.MockCtlMsg{ + {}, + {Err: nil, ResumeToken: true}, + {Err: nil, ResumeToken: true}, + {}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + ResumeToken: testutil.EncodeResumeToken(1), + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, + }, + ResumeToken: testutil.EncodeResumeToken(2), + }, + }, + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(3)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(3)}}, + }, + }, + }, + resumeToken: testutil.EncodeResumeToken(2), + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + queueingRetryable, // foo-01, resume token + queueingRetryable, // got foo-02 + queueingRetryable, // foo-02, resume token + queueingRetryable, // got foo-03 + }, + }, + { + // unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable + name: "unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers+1; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: nil, ResumeToken: true}) + m = append(m, testutil.MockCtlMsg{}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers+2; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + s[maxBuffers+1].ResumeToken = testutil.EncodeResumeToken(maxBuffers + 1) + return s + }(), + resumeToken: testutil.EncodeResumeToken(maxBuffers + 1), + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 2)}}, + }, + }, + }, + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder filles up + } + for i := maxBuffers - 1; i < maxBuffers+1; i++ { + // the first item fills up the queue and triggers state change; + // the second item is received under queueingUnretryable state. + s = append(s, queueingUnretryable) + } + s = append(s, queueingUnretryable) // got (maxBuffers+1)th row under Unretryable state + s = append(s, queueingRetryable) // (maxBuffers+1)th row has resume token + s = append(s, queueingRetryable) // (maxBuffers+2)th row has no resume token + return s + }(), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->finished + name: "unConnected->queueingRetryable->queueingUnretryable->finished", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: io.EOF, ResumeToken: false}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up + } + s = append(s, queueingUnretryable) // last row triggers state change + s = append(s, finished) // query finishes + return s + }(), + }, + } + for _, test := range tests { + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + cc := dialMock(t, ms) + mc := sppb.NewSpannerClient(cc) + if test.rpc == nil { + // Avoid using test.sql directly in closure because for loop changes test. + sql := test.sql + test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: sql, + ResumeToken: resumeToken, + }) + } + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + r := newResumableStreamDecoder( + ctx, + test.rpc, + ) + // Override backoff to make the test run faster. + r.backoff = exponentialBackoff{1 * time.Nanosecond, 1 * time.Nanosecond} + // st is the set of observed state transitions. + st := []resumableStreamDecoderState{} + // q is the content of the decoder's partial result queue when expected number of state transitions are done. + q := []*sppb.PartialResultSet{} + var lastErr error + // Once the expected number of state transitions are observed, + // send a signal to channel stateDone. + stateDone := make(chan int) + // Set stateWitness to listen to state changes. + hl := len(test.stateHistory) // To avoid data race on test. + r.stateWitness = func(rs resumableStreamDecoderState) { + select { + case <-stateDone: + // Noop after expected number of state transitions + default: + // Record state transitions. + st = append(st, rs) + if len(st) == hl { + lastErr = r.lastErr() + q = r.q.dump() + close(stateDone) + } + } + } + // Let mock server stream given messages to resumableStreamDecoder. + for _, m := range test.msgs { + ms.AddMsg(m.Err, m.ResumeToken) + } + var rs []*sppb.PartialResultSet + go func() { + for { + if !r.next() { + // Note that r.Next also exits on context cancel/timeout. + return + } + rs = append(rs, r.get()) + } + }() + // Verify that resumableStreamDecoder reaches expected state. + select { + case <-stateDone: // Note that at this point, receiver is still blocking on r.next(). + // Check if resumableStreamDecoder carried out expected + // state transitions. + if !testEqual(st, test.stateHistory) { + t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", + test.name, st, test.stateHistory) + } + // Check if resumableStreamDecoder returns expected array of + // PartialResultSets. + if !testEqual(rs, test.want) { + t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) + } + // Verify that resumableStreamDecoder's internal buffering is also correct. + if !testEqual(q, test.queue) { + t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) + } + // Verify resume token. + if test.resumeToken != nil && !testEqual(r.resumeToken, test.resumeToken) { + t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) + } + // Verify error message. + if !testEqual(lastErr, test.wantErr) { + t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) + } + case <-time.After(1 * time.Second): + t.Errorf("%v: Timeout in waiting for state change", test.name) + } + ms.Stop() + cc.Close() + } +} + +// sReceiver signals every receiving attempt through a channel, +// used by TestResumeToken to determine if the receiving of a certain +// PartialResultSet will be attempted next. +type sReceiver struct { + c chan int + rpcReceiver sppb.Spanner_ExecuteStreamingSqlClient +} + +// Recv() implements streamingReceiver.Recv for sReceiver. +func (sr *sReceiver) Recv() (*sppb.PartialResultSet, error) { + sr.c <- 1 + return sr.rpcReceiver.Recv() +} + +// waitn waits for nth receiving attempt from now on, until +// the signal for nth Recv() attempts is received or timeout. +// Note that because the way stream() works, the signal for the +// nth Recv() means that the previous n - 1 PartialResultSets +// has already been returned to caller or queued, if no error happened. +func (sr *sReceiver) waitn(n int) error { + for i := 0; i < n; i++ { + select { + case <-sr.c: + case <-time.After(10 * time.Second): + return fmt.Errorf("timeout in waiting for %v-th Recv()", i+1) + } + } + return nil +} + +// Test the handling of resumableStreamDecoder.bytesBetweenResumeTokens. +func TestQueueBytes(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + sr := &sReceiver{ + c: make(chan int, 1000), // will never block in this test + } + wantQueueBytes := 0 + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + r := newResumableStreamDecoder( + ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + sr.rpcReceiver = r + return sr, err + }, + ) + go func() { + for r.next() { + } + }() + // Let server send maxBuffers / 2 rows. + for i := 0; i < maxBuffers/2; i++ { + wantQueueBytes += proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers/2 + 1); err != nil { + t.Fatalf("failed to wait for the first %v recv() calls: %v", maxBuffers, err) + } + if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { + t.Errorf("r.bytesBetweenResumeTokens = %v, want %v", r.bytesBetweenResumeTokens, wantQueueBytes) + } + // Now send a resume token to drain the queue. + ms.AddMsg(nil, true) + // Wait for all rows to be processes. + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for rows to be processed: %v", err) + } + if r.bytesBetweenResumeTokens != 0 { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } + // Let server send maxBuffers - 1 rows. + wantQueueBytes = 0 + for i := 0; i < maxBuffers-1; i++ { + wantQueueBytes += proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers - 1); err != nil { + t.Fatalf("failed to wait for %v rows to be processed: %v", maxBuffers-1, err) + } + if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } + // Trigger a state transition: queueingRetryable -> queueingUnretryable. + ms.AddMsg(nil, false) + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for state transition: %v", err) + } + if r.bytesBetweenResumeTokens != 0 { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } +} + +// Verify that client can deal with resume token correctly +func TestResumeToken(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + sr := &sReceiver{ + c: make(chan int, 1000), // will never block in this test + } + rows := []*Row{} + done := make(chan error) + streaming := func() { + // Establish a stream to mock cloud spanner server. + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + sr.rpcReceiver = r + return sr, err + }, + nil, + func(error) {}) + defer iter.Stop() + var err error + for { + var row *Row + row, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + rows = append(rows, row) + } + done <- err + } + go streaming() + // Server streaming row 0 - 2, only row 1 has resume token. + // Client will receive row 0 - 2, so it will try receiving for + // 4 times (the last recv will block), and only row 0 - 1 will + // be yielded. + for i := 0; i < 3; i++ { + if i == 1 { + ms.AddMsg(nil, true) + } else { + ms.AddMsg(nil, false) + } + } + // Wait for 4 receive attempts, as explained above. + if err := sr.waitn(4); err != nil { + t.Fatalf("failed to wait for row 0 - 2: %v", err) + } + want := []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + }, + } + if !testEqual(rows, want) { + t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) + } + // Inject resumable failure. + ms.AddMsg( + status.Errorf(codes.Unavailable, "mock server unavailable"), + false, + ) + // Test if client detects the resumable failure and retries. + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for client to retry: %v", err) + } + // Client has resumed the query, now server resend row 2. + ms.AddMsg(nil, true) + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for resending row 2: %v", err) + } + // Now client should have received row 0 - 2. + want = append(want, &Row{ + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, + }, + }) + if !testEqual(rows, want) { + t.Errorf("received rows: \n%v\n, want\n%v\n", rows, want) + } + // Sending 3rd - (maxBuffers+1)th rows without resume tokens, client should buffer them. + for i := 3; i < maxBuffers+2; i++ { + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers - 1); err != nil { + t.Fatalf("failed to wait for row 3-%v: %v", maxBuffers+1, err) + } + // Received rows should be unchanged. + if !testEqual(rows, want) { + t.Errorf("receive rows: \n%v\n, want\n%v\n", rows, want) + } + // Send (maxBuffers+2)th row to trigger state change of resumableStreamDecoder: + // queueingRetryable -> queueingUnretryable + ms.AddMsg(nil, false) + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for row %v: %v", maxBuffers+2, err) + } + // Client should yield row 3rd - (maxBuffers+2)th to application. Therefore, application should + // see row 0 - (maxBuffers+2)th so far. + for i := 3; i < maxBuffers+3; i++ { + want = append(want, &Row{ + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + if !testEqual(rows, want) { + t.Errorf("received rows: \n%v\n; want\n%v\n", rows, want) + } + // Inject resumable error, but since resumableStreamDecoder is already at queueingUnretryable + // state, query will just fail. + ms.AddMsg( + status.Errorf(codes.Unavailable, "mock server wants some sleep"), + false, + ) + var gotErr error + select { + case gotErr = <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timeout in waiting for failed query to return.") + } + if wantErr := toSpannerError(status.Errorf(codes.Unavailable, "mock server wants some sleep")); !testEqual(gotErr, wantErr) { + t.Fatalf("stream() returns error: %v, but want error: %v", gotErr, wantErr) + } + + // Reconnect to mock Cloud Spanner. + rows = []*Row{} + go streaming() + // Let server send two rows without resume token. + for i := maxBuffers + 3; i < maxBuffers+5; i++ { + ms.AddMsg(nil, false) + } + if err := sr.waitn(3); err != nil { + t.Fatalf("failed to wait for row %v - %v: %v", maxBuffers+3, maxBuffers+5, err) + } + if len(rows) > 0 { + t.Errorf("client received some rows unexpectedly: %v, want nothing", rows) + } + // Let server end the query. + ms.AddMsg(io.EOF, false) + select { + case gotErr = <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timeout in waiting for failed query to return") + } + if gotErr != nil { + t.Fatalf("stream() returns unexpected error: %v, but want no error", gotErr) + } + // Verify if a normal server side EOF flushes all queued rows. + want = []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 3)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 3)}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 4)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 4)}}, + }, + }, + } + if !testEqual(rows, want) { + t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) + } +} + +// Verify that streaming query get retried upon real gRPC server transport failures. +func TestGrpcReconnect(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + retry := make(chan int) + row := make(chan int) + var err error + go func() { + r := 0 + // Establish a stream to mock cloud spanner server. + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + if r > 0 { + // This RPC attempt is a retry, signal it. + retry <- r + } + r++ + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + + }, + nil, + func(error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + row <- 0 + } + }() + // Add a message and wait for the receipt. + ms.AddMsg(nil, true) + select { + case <-row: + case <-time.After(10 * time.Second): + t.Fatalf("expect stream to be established within 10 seconds, but it didn't") + } + // Error injection: force server to close all connections. + ms.Stop() + // Test to see if client respond to the real RPC failure correctly by + // retrying RPC. + select { + case r, ok := <-retry: + if ok && r == 1 { + break + } + t.Errorf("retry count = %v, want 1", r) + case <-time.After(10 * time.Second): + t.Errorf("client library failed to respond after 10 seconds, aborting") + return + } +} + +// Test cancel/timeout for client operations. +func TestCancelTimeout(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + done := make(chan int) + go func() { + for { + ms.AddMsg(nil, true) + } + }() + // Test cancelling query. + ctx, cancel := context.WithCancel(context.Background()) + var err error + go func() { + // Establish a stream to mock cloud spanner server. + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + break + } + if err != nil { + done <- 0 + break + } + } + }() + cancel() + select { + case <-done: + if ErrCode(err) != codes.Canceled { + t.Errorf("streaming query is canceled and returns error %v, want error code %v", err, codes.Canceled) + } + case <-time.After(1 * time.Second): + t.Errorf("query doesn't exit timely after being cancelled") + } + // Test query timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + go func() { + // Establish a stream to mock cloud spanner server. + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + } + done <- 0 + }() + select { + case <-done: + if wantErr := codes.DeadlineExceeded; ErrCode(err) != wantErr { + t.Errorf("streaming query timeout returns error %v, want error code %v", err, wantErr) + } + case <-time.After(2 * time.Second): + t.Errorf("query doesn't timeout as expected") + } +} + +func TestRowIteratorDo(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + for i := 0; i < 3; i++ { + ms.AddMsg(nil, false) + } + ms.AddMsg(io.EOF, true) + nRows := 0 + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + err := iter.Do(func(r *Row) error { nRows++; return nil }) + if err != nil { + t.Errorf("Using Do: %v", err) + } + if nRows != 3 { + t.Errorf("got %d rows, want 3", nRows) + } +} + +func TestRowIteratorDoWithError(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + for i := 0; i < 3; i++ { + ms.AddMsg(nil, false) + } + ms.AddMsg(io.EOF, true) + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + injected := errors.New("Failed iterator") + err := iter.Do(func(r *Row) error { return injected }) + if err != injected { + t.Errorf("got <%v>, want <%v>", err, injected) + } +} + +func TestIteratorStopEarly(t *testing.T) { + ctx := context.Background() + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + ms.AddMsg(nil, false) + ms.AddMsg(nil, false) + ms.AddMsg(io.EOF, true) + + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + _, err := iter.Next() + if err != nil { + t.Fatalf("before Stop: %v", err) + } + iter.Stop() + // Stop sets r.err to the FailedPrecondition error "Next called after Stop". + // Override that here so this test can observe the Canceled error from the stream. + iter.err = nil + iter.Next() + if ErrCode(iter.streamd.lastErr()) != codes.Canceled { + t.Errorf("after Stop: got %v, wanted Canceled", err) + } +} + +func TestIteratorWithError(t *testing.T) { + injected := errors.New("Failed iterator") + iter := RowIterator{err: injected} + defer iter.Stop() + if _, err := iter.Next(); err != injected { + t.Fatalf("Expected error: %v, got %v", injected, err) + } +} + +func dialMock(t *testing.T, ms *testutil.MockCloudSpanner) *grpc.ClientConn { + cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + return cc +} diff --git a/vendor/cloud.google.com/go/spanner/retry.go b/vendor/cloud.google.com/go/spanner/retry.go new file mode 100644 index 0000000..288c985 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/retry.go @@ -0,0 +1,198 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const ( + retryInfoKey = "google.rpc.retryinfo-bin" +) + +// errRetry returns an unavailable error under error namespace EsOther. It is a +// generic retryable error that is used to mask and recover unretryable errors +// in a retry loop. +func errRetry(err error) error { + if se, ok := err.(*Error); ok { + return &Error{codes.Unavailable, fmt.Sprintf("generic Cloud Spanner retryable error: { %v }", se.Error()), se.trailers} + } + return spannerErrorf(codes.Unavailable, "generic Cloud Spanner retryable error: { %v }", err.Error()) +} + +// isErrorClosing reports whether the error is generated by gRPC layer talking to a closed server. +func isErrorClosing(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "transport is closing") { + // Handle the case when connection is closed unexpectedly. + // TODO: once gRPC is able to categorize + // this as retryable error, we should stop parsing the + // error message here. + return true + } + return false +} + +// isErrorRST reports whether the error is generated by gRPC client receiving a RST frame from server. +func isErrorRST(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "stream terminated by RST_STREAM") { + // TODO: once gRPC is able to categorize this error as "go away" or "retryable", + // we should stop parsing the error message. + return true + } + return false +} + +// isErrorUnexpectedEOF returns true if error is generated by gRPC layer +// receiving io.EOF unexpectedly. +func isErrorUnexpectedEOF(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Unknown && strings.Contains(ErrDesc(err), "unexpected EOF") { + // Unexpected EOF is an transport layer issue that + // could be recovered by retries. The most likely + // scenario is a flaky RecvMsg() call due to network + // issues. + // TODO: once gRPC is able to categorize + // this as retryable error, we should stop parsing the + // error message here. + return true + } + return false +} + +// isErrorUnavailable returns true if the error is about server being unavailable. +func isErrorUnavailable(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Unavailable { + return true + } + return false +} + +// isRetryable returns true if the Cloud Spanner error being checked is a retryable error. +func isRetryable(err error) bool { + if isErrorClosing(err) { + return true + } + if isErrorUnexpectedEOF(err) { + return true + } + if isErrorRST(err) { + return true + } + if isErrorUnavailable(err) { + return true + } + return false +} + +// errContextCanceled returns *spanner.Error for canceled context. +func errContextCanceled(ctx context.Context, lastErr error) error { + if ctx.Err() == context.DeadlineExceeded { + return spannerErrorf(codes.DeadlineExceeded, "%v, lastErr is <%v>", ctx.Err(), lastErr) + } + return spannerErrorf(codes.Canceled, "%v, lastErr is <%v>", ctx.Err(), lastErr) +} + +// extractRetryDelay extracts retry backoff if present. +func extractRetryDelay(err error) (time.Duration, bool) { + trailers := errTrailers(err) + if trailers == nil { + return 0, false + } + elem, ok := trailers[retryInfoKey] + if !ok || len(elem) <= 0 { + return 0, false + } + _, b, err := metadata.DecodeKeyValue(retryInfoKey, elem[0]) + if err != nil { + return 0, false + } + var retryInfo edpb.RetryInfo + if proto.Unmarshal([]byte(b), &retryInfo) != nil { + return 0, false + } + delay, err := ptypes.Duration(retryInfo.RetryDelay) + if err != nil { + return 0, false + } + return delay, true +} + +// runRetryable keeps attempting to run f until one of the following happens: +// 1) f returns nil error or an unretryable error; +// 2) context is cancelled or timeout. +// TODO: consider using https://github.com/googleapis/gax-go once it +// becomes available internally. +func runRetryable(ctx context.Context, f func(context.Context) error) error { + return toSpannerError(runRetryableNoWrap(ctx, f)) +} + +// Like runRetryable, but doesn't wrap the returned error in a spanner.Error. +func runRetryableNoWrap(ctx context.Context, f func(context.Context) error) error { + var funcErr error + retryCount := 0 + for { + select { + case <-ctx.Done(): + // Do context check here so that even f() failed to do + // so (for example, gRPC implementation bug), the loop + // can still have a chance to exit as expected. + return errContextCanceled(ctx, funcErr) + default: + } + funcErr = f(ctx) + if funcErr == nil { + return nil + } + if isRetryable(funcErr) { + // Error is retryable, do exponential backoff and continue. + b, ok := extractRetryDelay(funcErr) + if !ok { + b = defaultBackoff.delay(retryCount) + } + tracePrintf(ctx, nil, "Backing off for %s, then retrying", b) + select { + case <-ctx.Done(): + return errContextCanceled(ctx, funcErr) + case <-time.After(b): + } + retryCount++ + continue + } + // Error isn't retryable / no error, return immediately. + return funcErr + } +} diff --git a/vendor/cloud.google.com/go/spanner/retry_test.go b/vendor/cloud.google.com/go/spanner/retry_test.go new file mode 100644 index 0000000..b15eb7f --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/retry_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Test if runRetryable loop deals with various errors correctly. +func TestRetry(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + responses := []error{ + status.Errorf(codes.Internal, "transport is closing"), + status.Errorf(codes.Unknown, "unexpected EOF"), + status.Errorf(codes.Internal, "stream terminated by RST_STREAM with error code: 2"), + status.Errorf(codes.Unavailable, "service is currently unavailable"), + errRetry(fmt.Errorf("just retry it")), + } + err := runRetryable(context.Background(), func(ct context.Context) error { + var r error + if len(responses) > 0 { + r = responses[0] + responses = responses[1:] + } + return r + }) + if err != nil { + t.Errorf("runRetryable should be able to survive all retryable errors, but it returns %v", err) + } + // Unretryable errors + injErr := errors.New("this is unretryable") + err = runRetryable(context.Background(), func(ct context.Context) error { + return injErr + }) + if wantErr := toSpannerError(injErr); !testEqual(err, wantErr) { + t.Errorf("runRetryable returns error %v, want %v", err, wantErr) + } + // Timeout + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + retryErr := errRetry(fmt.Errorf("still retrying")) + err = runRetryable(ctx, func(ct context.Context) error { + // Expect to trigger timeout in retryable runner after 10 executions. + <-time.After(100 * time.Millisecond) + // Let retryable runner to retry so that timeout will eventually happen. + return retryErr + }) + // Check error code and error message + if wantErrCode, wantErr := codes.DeadlineExceeded, errContextCanceled(ctx, retryErr); ErrCode(err) != wantErrCode || !testEqual(err, wantErr) { + t.Errorf("=\n<%v, %v>, want:\n<%v, %v>", ErrCode(err), err, wantErrCode, wantErr) + } + // Cancellation + ctx, cancel = context.WithCancel(context.Background()) + retries := 3 + retryErr = errRetry(fmt.Errorf("retry before cancel")) + err = runRetryable(ctx, func(ct context.Context) error { + retries-- + if retries == 0 { + cancel() + } + return retryErr + }) + // Check error code, error message, retry count + if wantErrCode, wantErr := codes.Canceled, errContextCanceled(ctx, retryErr); ErrCode(err) != wantErrCode || !testEqual(err, wantErr) || retries != 0 { + t.Errorf("=\n<%v, %v, %v>, want:\n<%v, %v, %v>", ErrCode(err), err, retries, wantErrCode, wantErr, 0) + } +} + +func TestRetryInfo(t *testing.T) { + b, _ := proto.Marshal(&edpb.RetryInfo{ + RetryDelay: ptypes.DurationProto(time.Second), + }) + trailers := map[string]string{ + retryInfoKey: string(b), + } + gotDelay, ok := extractRetryDelay(errRetry(toSpannerErrorWithMetadata(status.Errorf(codes.Aborted, ""), metadata.New(trailers)))) + if !ok || !testEqual(time.Second, gotDelay) { + t.Errorf(" = <%t, %v>, want ", ok, gotDelay, time.Second) + } +} diff --git a/vendor/cloud.google.com/go/spanner/row.go b/vendor/cloud.google.com/go/spanner/row.go new file mode 100644 index 0000000..e59226f --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/row.go @@ -0,0 +1,305 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "reflect" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// A Row is a view of a row of data returned by a Cloud Spanner read. +// It consists of a number of columns; the number depends on the columns +// used to construct the read. +// +// The column values can be accessed by index. For instance, if the read specified +// []string{"photo_id", "caption"}, then each row will contain two +// columns: "photo_id" with index 0, and "caption" with index 1. +// +// Column values are decoded by using one of the Column, ColumnByName, or +// Columns methods. The valid values passed to these methods depend on the +// column type. For example: +// +// var photoID int64 +// err := row.Column(0, &photoID) // Decode column 0 as an integer. +// +// var caption string +// err := row.Column(1, &caption) // Decode column 1 as a string. +// +// // Decode all the columns. +// err := row.Columns(&photoID, &caption) +// +// Supported types and their corresponding Cloud Spanner column type(s) are: +// +// *string(not NULL), *NullString - STRING +// *[]string, *[]NullString - STRING ARRAY +// *[]byte - BYTES +// *[][]byte - BYTES ARRAY +// *int64(not NULL), *NullInt64 - INT64 +// *[]int64, *[]NullInt64 - INT64 ARRAY +// *bool(not NULL), *NullBool - BOOL +// *[]bool, *[]NullBool - BOOL ARRAY +// *float64(not NULL), *NullFloat64 - FLOAT64 +// *[]float64, *[]NullFloat64 - FLOAT64 ARRAY +// *time.Time(not NULL), *NullTime - TIMESTAMP +// *[]time.Time, *[]NullTime - TIMESTAMP ARRAY +// *Date(not NULL), *NullDate - DATE +// *[]civil.Date, *[]NullDate - DATE ARRAY +// *[]*some_go_struct, *[]NullRow - STRUCT ARRAY +// *GenericColumnValue - any Cloud Spanner type +// +// For TIMESTAMP columns, the returned time.Time object will be in UTC. +// +// To fetch an array of BYTES, pass a *[][]byte. To fetch an array of (sub)rows, pass +// a *[]spanner.NullRow or a *[]*some_go_struct where some_go_struct holds all +// information of the subrow, see spanner.Row.ToStruct for the mapping between a +// Cloud Spanner row and a Go struct. To fetch an array of other types, pass a +// *[]spanner.NullXXX type of the appropriate type. Use GenericColumnValue when you +// don't know in advance what column type to expect. +// +// Row decodes the row contents lazily; as a result, each call to a getter has +// a chance of returning an error. +// +// A column value may be NULL if the corresponding value is not present in +// Cloud Spanner. The spanner.NullXXX types (spanner.NullInt64 et al.) allow fetching +// values that may be null. A NULL BYTES can be fetched into a *[]byte as nil. +// It is an error to fetch a NULL value into any other type. +type Row struct { + fields []*sppb.StructType_Field + vals []*proto3.Value // keep decoded for now +} + +// errNamesValuesMismatch returns error for when columnNames count is not equal +// to columnValues count. +func errNamesValuesMismatch(columnNames []string, columnValues []interface{}) error { + return spannerErrorf(codes.FailedPrecondition, + "different number of names(%v) and values(%v)", len(columnNames), len(columnValues)) +} + +// NewRow returns a Row containing the supplied data. This can be useful for +// mocking Cloud Spanner Read and Query responses for unit testing. +func NewRow(columnNames []string, columnValues []interface{}) (*Row, error) { + if len(columnValues) != len(columnNames) { + return nil, errNamesValuesMismatch(columnNames, columnValues) + } + r := Row{ + fields: make([]*sppb.StructType_Field, len(columnValues)), + vals: make([]*proto3.Value, len(columnValues)), + } + for i := range columnValues { + val, typ, err := encodeValue(columnValues[i]) + if err != nil { + return nil, err + } + r.fields[i] = &sppb.StructType_Field{ + Name: columnNames[i], + Type: typ, + } + r.vals[i] = val + } + return &r, nil +} + +// Size is the number of columns in the row. +func (r *Row) Size() int { + return len(r.fields) +} + +// ColumnName returns the name of column i, or empty string for invalid column. +func (r *Row) ColumnName(i int) string { + if i < 0 || i >= len(r.fields) { + return "" + } + return r.fields[i].Name +} + +// ColumnIndex returns the index of the column with the given name. The +// comparison is case-sensitive. +func (r *Row) ColumnIndex(name string) (int, error) { + found := false + var index int + if len(r.vals) != len(r.fields) { + return 0, errFieldsMismatchVals(r) + } + for i, f := range r.fields { + if f == nil { + return 0, errNilColType(i) + } + if name == f.Name { + if found { + return 0, errDupColName(name) + } + found = true + index = i + } + } + if !found { + return 0, errColNotFound(name) + } + return index, nil +} + +// ColumnNames returns all column names of the row. +func (r *Row) ColumnNames() []string { + var n []string + for _, c := range r.fields { + n = append(n, c.Name) + } + return n +} + +// errColIdxOutOfRange returns error for requested column index is out of the +// range of the target Row's columns. +func errColIdxOutOfRange(i int, r *Row) error { + return spannerErrorf(codes.OutOfRange, "column index %d out of range [0,%d)", i, len(r.vals)) +} + +// errDecodeColumn returns error for not being able to decode a indexed column. +func errDecodeColumn(i int, err error) error { + if err == nil { + return nil + } + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.InvalidArgument, "failed to decode column %v, error = <%v>", i, err) + } + se.decorate(fmt.Sprintf("failed to decode column %v", i)) + return se +} + +// errFieldsMismatchVals returns error for field count isn't equal to value count in a Row. +func errFieldsMismatchVals(r *Row) error { + return spannerErrorf(codes.FailedPrecondition, "row has different number of fields(%v) and values(%v)", + len(r.fields), len(r.vals)) +} + +// errNilColType returns error for column type for column i being nil in the row. +func errNilColType(i int) error { + return spannerErrorf(codes.FailedPrecondition, "column(%v)'s type is nil", i) +} + +// Column fetches the value from the ith column, decoding it into ptr. +// See the Row documentation for the list of acceptable argument types. +// see Client.ReadWriteTransaction for an example. +func (r *Row) Column(i int, ptr interface{}) error { + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + if i < 0 || i >= len(r.fields) { + return errColIdxOutOfRange(i, r) + } + if r.fields[i] == nil { + return errNilColType(i) + } + if err := decodeValue(r.vals[i], r.fields[i].Type, ptr); err != nil { + return errDecodeColumn(i, err) + } + return nil +} + +// errDupColName returns error for duplicated column name in the same row. +func errDupColName(n string) error { + return spannerErrorf(codes.FailedPrecondition, "ambiguous column name %q", n) +} + +// errColNotFound returns error for not being able to find a named column. +func errColNotFound(n string) error { + return spannerErrorf(codes.NotFound, "column %q not found", n) +} + +// ColumnByName fetches the value from the named column, decoding it into ptr. +// See the Row documentation for the list of acceptable argument types. +func (r *Row) ColumnByName(name string, ptr interface{}) error { + index, err := r.ColumnIndex(name) + if err != nil { + return err + } + return r.Column(index, ptr) +} + +// errNumOfColValue returns error for providing wrong number of values to Columns. +func errNumOfColValue(n int, r *Row) error { + return spannerErrorf(codes.InvalidArgument, + "Columns(): number of arguments (%d) does not match row size (%d)", n, len(r.vals)) +} + +// Columns fetches all the columns in the row at once. +// +// The value of the kth column will be decoded into the kth argument to Columns. See +// Row for the list of acceptable argument types. The number of arguments must be +// equal to the number of columns. Pass nil to specify that a column should be +// ignored. +func (r *Row) Columns(ptrs ...interface{}) error { + if len(ptrs) != len(r.vals) { + return errNumOfColValue(len(ptrs), r) + } + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + for i, p := range ptrs { + if p == nil { + continue + } + if err := r.Column(i, p); err != nil { + return err + } + } + return nil +} + +// errToStructArgType returns error for p not having the correct data type(pointer to Go struct) to +// be the argument of Row.ToStruct. +func errToStructArgType(p interface{}) error { + return spannerErrorf(codes.InvalidArgument, "ToStruct(): type %T is not a valid pointer to Go struct", p) +} + +// ToStruct fetches the columns in a row into the fields of a struct. +// The rules for mapping a row's columns into a struct's exported fields +// are as the following: +// +// 1. If a field has a `spanner: "column_name"` tag, then decode column +// 'column_name' into the field. A special case is the `spanner: "-"` +// tag, which instructs ToStruct to ignore the field during decoding. +// +// 2. Otherwise, if the name of a field matches the name of a column (ignoring case), +// decode the column into the field. +// +// The fields of the destination struct can be of any type that is acceptable +// to spanner.Row.Column. +// +// Slice and pointer fields will be set to nil if the source column is NULL, and a +// non-nil value if the column is not NULL. To decode NULL values of other types, use +// one of the spanner.NullXXX types as the type of the destination field. +func (r *Row) ToStruct(p interface{}) error { + // Check if p is a pointer to a struct + if t := reflect.TypeOf(p); t == nil || t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return errToStructArgType(p) + } + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + // Call decodeStruct directly to decode the row as a typed proto.ListValue. + return decodeStruct( + &sppb.StructType{Fields: r.fields}, + &proto3.ListValue{Values: r.vals}, + p, + ) +} diff --git a/vendor/cloud.google.com/go/spanner/row_test.go b/vendor/cloud.google.com/go/spanner/row_test.go new file mode 100644 index 0000000..3e3dbc1 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/row_test.go @@ -0,0 +1,1811 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "cloud.google.com/go/civil" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +var ( + tm = time.Date(2016, 11, 15, 0, 0, 0, 0, time.UTC) + dt, _ = civil.ParseDate("2016-11-15") + // row contains a column for each unique Cloud Spanner type. + row = Row{ + []*sppb.StructType_Field{ + // STRING / STRING ARRAY + {"STRING", stringType()}, + {"NULL_STRING", stringType()}, + {"STRING_ARRAY", listType(stringType())}, + {"NULL_STRING_ARRAY", listType(stringType())}, + // BYTES / BYTES ARRAY + {"BYTES", bytesType()}, + {"NULL_BYTES", bytesType()}, + {"BYTES_ARRAY", listType(bytesType())}, + {"NULL_BYTES_ARRAY", listType(bytesType())}, + // INT64 / INT64 ARRAY + {"INT64", intType()}, + {"NULL_INT64", intType()}, + {"INT64_ARRAY", listType(intType())}, + {"NULL_INT64_ARRAY", listType(intType())}, + // BOOL / BOOL ARRAY + {"BOOL", boolType()}, + {"NULL_BOOL", boolType()}, + {"BOOL_ARRAY", listType(boolType())}, + {"NULL_BOOL_ARRAY", listType(boolType())}, + // FLOAT64 / FLOAT64 ARRAY + {"FLOAT64", floatType()}, + {"NULL_FLOAT64", floatType()}, + {"FLOAT64_ARRAY", listType(floatType())}, + {"NULL_FLOAT64_ARRAY", listType(floatType())}, + // TIMESTAMP / TIMESTAMP ARRAY + {"TIMESTAMP", timeType()}, + {"NULL_TIMESTAMP", timeType()}, + {"TIMESTAMP_ARRAY", listType(timeType())}, + {"NULL_TIMESTAMP_ARRAY", listType(timeType())}, + // DATE / DATE ARRAY + {"DATE", dateType()}, + {"NULL_DATE", dateType()}, + {"DATE_ARRAY", listType(dateType())}, + {"NULL_DATE_ARRAY", listType(dateType())}, + + // STRUCT ARRAY + { + "STRUCT_ARRAY", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + { + "NULL_STRUCT_ARRAY", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{ + // STRING / STRING ARRAY + stringProto("value"), + nullProto(), + listProto(stringProto("value1"), nullProto(), stringProto("value3")), + nullProto(), + // BYTES / BYTES ARRAY + bytesProto([]byte("value")), + nullProto(), + listProto(bytesProto([]byte("value1")), nullProto(), bytesProto([]byte("value3"))), + nullProto(), + // INT64 / INT64 ARRAY + intProto(17), + nullProto(), + listProto(intProto(1), intProto(2), nullProto()), + nullProto(), + // BOOL / BOOL ARRAY + boolProto(true), + nullProto(), + listProto(nullProto(), boolProto(true), boolProto(false)), + nullProto(), + // FLOAT64 / FLOAT64 ARRAY + floatProto(1.7), + nullProto(), + listProto(nullProto(), nullProto(), floatProto(1.7)), + nullProto(), + // TIMESTAMP / TIMESTAMP ARRAY + timeProto(tm), + nullProto(), + listProto(nullProto(), timeProto(tm)), + nullProto(), + // DATE / DATE ARRAY + dateProto(dt), + nullProto(), + listProto(nullProto(), dateProto(dt)), + nullProto(), + // STRUCT ARRAY + listProto( + nullProto(), + listProto(intProto(3), floatProto(33.3), stringProto("three")), + nullProto(), + ), + nullProto(), + }, + } +) + +// Test helpers for getting column values. +func TestColumnValues(t *testing.T) { + vals := []interface{}{} + wantVals := []interface{}{} + // Test getting column values. + for i, wants := range [][]interface{}{ + // STRING / STRING ARRAY + {"value", NullString{"value", true}}, + {NullString{}}, + {[]NullString{{"value1", true}, {}, {"value3", true}}}, + {[]NullString(nil)}, + // BYTES / BYTES ARRAY + {[]byte("value")}, + {[]byte(nil)}, + {[][]byte{[]byte("value1"), nil, []byte("value3")}}, + {[][]byte(nil)}, + // INT64 / INT64 ARRAY + {int64(17), NullInt64{17, true}}, + {NullInt64{}}, + {[]NullInt64{{1, true}, {2, true}, {}}}, + {[]NullInt64(nil)}, + // BOOL / BOOL ARRAY + {true, NullBool{true, true}}, + {NullBool{}}, + {[]NullBool{{}, {true, true}, {false, true}}}, + {[]NullBool(nil)}, + // FLOAT64 / FLOAT64 ARRAY + {1.7, NullFloat64{1.7, true}}, + {NullFloat64{}}, + {[]NullFloat64{{}, {}, {1.7, true}}}, + {[]NullFloat64(nil)}, + // TIMESTAMP / TIMESTAMP ARRAY + {tm, NullTime{tm, true}}, + {NullTime{}}, + {[]NullTime{{}, {tm, true}}}, + {[]NullTime(nil)}, + // DATE / DATE ARRAY + {dt, NullDate{dt, true}}, + {NullDate{}}, + {[]NullDate{{}, {dt, true}}}, + {[]NullDate(nil)}, + // STRUCT ARRAY + { + []*struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }{ + nil, + &struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }{ + NullInt64{3, true}, + NullFloat64{33.3, true}, + "three", + }, + nil, + }, + []NullRow{ + {}, + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + }, + vals: []*proto3.Value{ + intProto(3), + floatProto(33.3), + stringProto("three"), + }, + }, + Valid: true, + }, + {}, + }, + }, + { + []*struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }(nil), + []NullRow(nil), + }, + } { + for j, want := range wants { + // Prepare Value vector to test Row.Columns. + if j == 0 { + vals = append(vals, reflect.New(reflect.TypeOf(want)).Interface()) + wantVals = append(wantVals, want) + } + // Column + gotp := reflect.New(reflect.TypeOf(want)) + err := row.Column(i, gotp.Interface()) + if err != nil { + t.Errorf("\t row.Column(%v, %T) returns error: %v, want nil", i, gotp.Interface(), err) + } + if got := reflect.Indirect(gotp).Interface(); !testEqual(got, want) { + t.Errorf("\t row.Column(%v, %T) retrives %v, want %v", i, gotp.Interface(), got, want) + } + // ColumnByName + gotp = reflect.New(reflect.TypeOf(want)) + err = row.ColumnByName(row.fields[i].Name, gotp.Interface()) + if err != nil { + t.Errorf("\t row.ColumnByName(%v, %T) returns error: %v, want nil", row.fields[i].Name, gotp.Interface(), err) + } + if got := reflect.Indirect(gotp).Interface(); !testEqual(got, want) { + t.Errorf("\t row.ColumnByName(%v, %T) retrives %v, want %v", row.fields[i].Name, gotp.Interface(), got, want) + } + } + } + // Test Row.Columns. + if err := row.Columns(vals...); err != nil { + t.Errorf("row.Columns() returns error: %v, want nil", err) + } + for i, want := range wantVals { + if got := reflect.Indirect(reflect.ValueOf(vals[i])).Interface(); !testEqual(got, want) { + t.Errorf("\t got %v(%T) for column[%v], want %v(%T)", got, got, row.fields[i].Name, want, want) + } + } +} + +// Test decoding into nil destination. +func TestNilDst(t *testing.T) { + for i, test := range []struct { + r *Row + dst interface{} + wantErr error + structDst interface{} + wantToStructErr error + }{ + { + &Row{ + []*sppb.StructType_Field{ + {"Col0", stringType()}, + }, + []*proto3.Value{stringProto("value")}, + }, + nil, + errDecodeColumn(0, errNilDst(nil)), + nil, + errToStructArgType(nil), + }, + { + &Row{ + []*sppb.StructType_Field{ + {"Col0", stringType()}, + }, + []*proto3.Value{stringProto("value")}, + }, + (*string)(nil), + errDecodeColumn(0, errNilDst((*string)(nil))), + (*struct{ STRING string })(nil), + errNilDst((*struct{ STRING string })(nil)), + }, + { + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + ), + ), + }, + }, + []*proto3.Value{listProto( + listProto(intProto(3), floatProto(33.3)), + )}, + }, + (*[]*struct { + Col1 int + Col2 float64 + })(nil), + errDecodeColumn(0, errNilDst((*[]*struct { + Col1 int + Col2 float64 + })(nil))), + (*struct { + StructArray []*struct { + Col1 int + Col2 float64 + } `spanner:"STRUCT_ARRAY"` + })(nil), + errNilDst((*struct { + StructArray []*struct { + Col1 int + Col2 float64 + } `spanner:"STRUCT_ARRAY"` + })(nil)), + }, + } { + if gotErr := test.r.Column(0, test.dst); !testEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.r.Column() returns error %v, want %v", i, gotErr, test.wantErr) + } + if gotErr := test.r.ColumnByName("Col0", test.dst); !testEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.r.ColumnByName() returns error %v, want %v", i, gotErr, test.wantErr) + } + // Row.Columns(T) should return nil on T == nil, otherwise, it should return test.wantErr. + wantColumnsErr := test.wantErr + if test.dst == nil { + wantColumnsErr = nil + } + if gotErr := test.r.Columns(test.dst); !testEqual(gotErr, wantColumnsErr) { + t.Errorf("%v: test.r.Columns() returns error %v, want %v", i, gotErr, wantColumnsErr) + } + if gotErr := test.r.ToStruct(test.structDst); !testEqual(gotErr, test.wantToStructErr) { + t.Errorf("%v: test.r.ToStruct() returns error %v, want %v", i, gotErr, test.wantToStructErr) + } + } +} + +// Test decoding NULL columns using Go types that don't support NULL. +func TestNullTypeErr(t *testing.T) { + var tm time.Time + ntoi := func(n string) int { + for i, f := range row.fields { + if f.Name == n { + return i + } + } + t.Errorf("cannot find column name %q in row", n) + return 0 + } + for _, test := range []struct { + colName string + dst interface{} + }{ + { + "NULL_STRING", + proto.String(""), + }, + { + "NULL_INT64", + proto.Int64(0), + }, + { + "NULL_BOOL", + proto.Bool(false), + }, + { + "NULL_FLOAT64", + proto.Float64(0.0), + }, + { + "NULL_TIMESTAMP", + &tm, + }, + { + "NULL_DATE", + &dt, + }, + } { + wantErr := errDecodeColumn(ntoi(test.colName), errDstNotForNull(test.dst)) + if gotErr := row.ColumnByName(test.colName, test.dst); !testEqual(gotErr, wantErr) { + t.Errorf("row.ColumnByName(%v) returns error %v, want %v", test.colName, gotErr, wantErr) + } + } +} + +// Test using wrong destination type in column decoders. +func TestColumnTypeErr(t *testing.T) { + // badDst cannot hold any of the column values. + badDst := &struct{}{} + for i, f := range row.fields { // For each of the columns, try to decode it into badDst. + tc := f.Type.Code + var etc sppb.TypeCode + if strings.Contains(f.Name, "ARRAY") { + etc = f.Type.ArrayElementType.Code + } + wantErr := errDecodeColumn(i, errTypeMismatch(tc, etc, badDst)) + if gotErr := row.Column(i, badDst); !testEqual(gotErr, wantErr) { + t.Errorf("Column(%v): decoding into destination with wrong type %T returns error %v, want %v", + i, badDst, gotErr, wantErr) + } + if gotErr := row.ColumnByName(f.Name, badDst); !testEqual(gotErr, wantErr) { + t.Errorf("ColumnByName(%v): decoding into destination with wrong type %T returns error %v, want %v", + f.Name, badDst, gotErr, wantErr) + } + } + wantErr := errDecodeColumn(1, errTypeMismatch(sppb.TypeCode_STRING, sppb.TypeCode_TYPE_CODE_UNSPECIFIED, badDst)) + // badDst is used to receive column 1. + vals := []interface{}{nil, badDst} // Row.Column() is expected to fail at column 1. + // Skip decoding the rest columns by providing nils as the destinations. + for i := 2; i < len(row.fields); i++ { + vals = append(vals, nil) + } + if gotErr := row.Columns(vals...); !testEqual(gotErr, wantErr) { + t.Errorf("Columns(): decoding column 1 with wrong type %T returns error %v, want %v", + badDst, gotErr, wantErr) + } +} + +// Test the handling of invalid column decoding requests which cannot be mapped to correct column(s). +func TestInvalidColumnRequest(t *testing.T) { + for _, test := range []struct { + desc string + f func() error + wantErr error + }{ + { + "Request column index is out of range", + func() error { + return row.Column(10000, &struct{}{}) + }, + errColIdxOutOfRange(10000, &row), + }, + { + "Cannot find the named column", + func() error { + return row.ColumnByName("string", &struct{}{}) + }, + errColNotFound("string"), + }, + { + "Not enough arguments to call row.Columns()", + func() error { + return row.Columns(nil, nil) + }, + errNumOfColValue(2, &row), + }, + { + "Call ColumnByName on row with duplicated column names", + func() error { + var s string + r := &Row{ + []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + []*proto3.Value{stringProto("value1"), stringProto("value2")}, + } + return r.ColumnByName("Val", &s) + }, + errDupColName("Val"), + }, + { + "Call ToStruct on row with duplicated column names", + func() error { + s := &struct { + Val string + }{} + r := &Row{ + []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + []*proto3.Value{stringProto("value1"), stringProto("value2")}, + } + return r.ToStruct(s) + }, + errDupSpannerField("Val", &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + }), + }, + { + "Call ToStruct on a row with unnamed field", + func() error { + s := &struct { + Val string + }{} + r := &Row{ + []*sppb.StructType_Field{ + {"", stringType()}, + }, + []*proto3.Value{stringProto("value1")}, + } + return r.ToStruct(s) + }, + errUnnamedField(&sppb.StructType{Fields: []*sppb.StructType_Field{{"", stringType()}}}, 0), + }, + } { + if gotErr := test.f(); !testEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.f() returns error %v, want %v", test.desc, gotErr, test.wantErr) + } + } +} + +// Test decoding the row with row.ToStruct into an invalid destination. +func TestToStructInvalidDst(t *testing.T) { + for _, test := range []struct { + desc string + dst interface{} + wantErr error + }{ + { + "Decode row as STRUCT into int32", + proto.Int(1), + errToStructArgType(proto.Int(1)), + }, + { + "Decode row as STRUCT to nil Go struct", + (*struct{})(nil), + errNilDst((*struct{})(nil)), + }, + { + "Decode row as STRUCT to Go struct with duplicated fields for the PK column", + &struct { + PK1 string `spanner:"STRING"` + PK2 string `spanner:"STRING"` + }{}, + errNoOrDupGoField(&struct { + PK1 string `spanner:"STRING"` + PK2 string `spanner:"STRING"` + }{}, "STRING"), + }, + { + "Decode row as STRUCT to Go struct with no field for the PK column", + &struct { + PK1 string `spanner:"_STRING"` + }{}, + errNoOrDupGoField(&struct { + PK1 string `spanner:"_STRING"` + }{}, "STRING"), + }, + { + "Decode row as STRUCT to Go struct with wrong type for the PK column", + &struct { + PK1 int64 `spanner:"STRING"` + }{}, + errDecodeStructField(&sppb.StructType{Fields: row.fields}, "STRING", + errTypeMismatch(sppb.TypeCode_STRING, sppb.TypeCode_TYPE_CODE_UNSPECIFIED, proto.Int64(0))), + }, + } { + if gotErr := row.ToStruct(test.dst); !testEqual(gotErr, test.wantErr) { + t.Errorf("%v: decoding:\ngot %v\nwant %v", test.desc, gotErr, test.wantErr) + } + } +} + +// Test decoding a broken row. +func TestBrokenRow(t *testing.T) { + for i, test := range []struct { + row *Row + dst interface{} + wantErr error + }{ + { + // A row with no field. + &Row{ + []*sppb.StructType_Field{}, + []*proto3.Value{stringProto("value")}, + }, + &NullString{"value", true}, + errFieldsMismatchVals(&Row{ + []*sppb.StructType_Field{}, + []*proto3.Value{stringProto("value")}, + }), + }, + { + // A row with nil field. + &Row{ + []*sppb.StructType_Field{nil}, + []*proto3.Value{stringProto("value")}, + }, + &NullString{"value", true}, + errNilColType(0), + }, + { + // Field is not nil, but its type is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + nil, + }, + }, + []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilSpannerType()), + }, + { + // Field is not nil, field type is not nil, but it is an array and its array element type is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + }, + }, + }, + []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilArrElemType(&sppb.Type{Code: sppb.TypeCode_ARRAY})), + }, + { + // Field specifies valid type, value is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{nil}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errNilSrc()), + }, + { + // Field specifies INT64 type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies INT64 type, but value is for Number type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "String")), + }, + { + // Field specifies INT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{stringProto("&1")}, + }, + proto.Int64(0), + errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { + _, err := strconv.ParseInt("&1", 10, 64) + return err + }())), + }, + { + // Field specifies INT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{stringProto("&1")}, + }, + &NullInt64{}, + errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { + _, err := strconv.ParseInt("&1", 10, 64) + return err + }())), + }, + { + // Field specifies STRING type, but value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + stringType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullString{"value", true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies STRING type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + stringType(), + }, + }, + []*proto3.Value{listProto(stringProto("value"))}, + }, + &NullString{"value", true}, + errDecodeColumn(0, errSrcVal(listProto(stringProto("value")), "String")), + }, + { + // Field specifies FLOAT64 type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_NumberValue)(nil)}}, + }, + &NullFloat64{1.0, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_NumberValue)(nil)}, "Number")), + }, + { + // Field specifies FLOAT64 type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{boolProto(true)}, + }, + &NullFloat64{1.0, true}, + errDecodeColumn(0, errSrcVal(boolProto(true), "Number")), + }, + { + // Field specifies FLOAT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{stringProto("nan")}, + }, + &NullFloat64{}, + errDecodeColumn(0, errUnexpectedNumStr("nan")), + }, + { + // Field specifies FLOAT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{stringProto("nan")}, + }, + proto.Float64(0), + errDecodeColumn(0, errUnexpectedNumStr("nan")), + }, + { + // Field specifies BYTES type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &[]byte{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies BYTES type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &[]byte{}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies BYTES type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{stringProto("&&")}, + }, + &[]byte{}, + errDecodeColumn(0, errBadEncoding(stringProto("&&"), func() error { + _, err := base64.StdEncoding.DecodeString("&&") + return err + }())), + }, + { + // Field specifies BOOL type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + boolType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_BoolValue)(nil)}}, + }, + &NullBool{false, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_BoolValue)(nil)}, "Bool")), + }, + { + // Field specifies BOOL type, but value is for STRING type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + boolType(), + }, + }, + []*proto3.Value{stringProto("false")}, + }, + &NullBool{false, true}, + errDecodeColumn(0, errSrcVal(stringProto("false"), "Bool")), + }, + { + // Field specifies TIMESTAMP type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies TIMESTAMP type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies TIMESTAMP type, but value is invalid timestamp. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{stringProto("junk")}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { + _, err := time.Parse(time.RFC3339Nano, "junk") + return err + }())), + }, + { + // Field specifies DATE type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies DATE type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies DATE type, but value is invalid timestamp. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{stringProto("junk")}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { + _, err := civil.ParseDate("junk") + return err + }())), + }, + + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errNilListValue("INT64")), + }, + { + // Field specifies ARRAY type, but value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{bytesProto([]byte("value"))}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "INT64", errSrcVal(boolProto(true), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullString{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilListValue("STRING")), + }, + { + // Field specifies ARRAY type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{boolProto(true)}, + }, + &[]NullString{}, + errDecodeColumn(0, errSrcVal(boolProto(true), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullString{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "STRING", errSrcVal(boolProto(true), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errNilListValue("FLOAT64")), + }, + { + // Field specifies ARRAY type, but value is for STRING type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{stringProto("value")}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errSrcVal(stringProto("value"), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "FLOAT64", errSrcVal(boolProto(true), "Number"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[][]byte{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[][]byte{}, + errDecodeColumn(0, errNilListValue("BYTES")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[][]byte{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[][]byte{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "BYTES", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullBool{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullBool{}, + errDecodeColumn(0, errNilListValue("BOOL")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullBool{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullBool{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "BOOL", errSrcVal(floatProto(1.0), "Bool"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullTime{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullTime{}, + errDecodeColumn(0, errNilListValue("TIMESTAMP")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullTime{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullTime{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "TIMESTAMP", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullDate{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullDate{}, + errDecodeColumn(0, errNilListValue("DATE")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullDate{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullDate{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "DATE", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errNilListValue("STRUCT")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullRow{}, + errDecodeColumn(0, errNilListValue("STRUCT")), + }, + { + // Field specifies ARRAY type, value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{bytesProto([]byte("value"))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), + }, + { + // Field specifies ARRAY type, value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(bytesProto([]byte("value")))}, + }, + &[]NullRow{}, + errDecodeColumn(0, errNotStructElement(0, bytesProto([]byte("value")))), + }, + { + // Field specifies ARRAY type, value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(bytesProto([]byte("value")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errDecodeArrayElement(0, bytesProto([]byte("value")), + "STRUCT", errSrcVal(bytesProto([]byte("value")), "List"))), + }, + { + // Field specifies ARRAY, but is having nil StructType. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + &sppb.Type{Code: sppb.TypeCode_STRUCT}, + ), + }, + }, + []*proto3.Value{listProto(listProto(intProto(1), floatProto(2.0), stringProto("3")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errDecodeArrayElement(0, listProto(intProto(1), floatProto(2.0), stringProto("3")), + "STRUCT", errNilSpannerStructType())), + }, + { + // Field specifies ARRAY, but the second struct value is for BOOL type instead of FLOAT64. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(listProto(intProto(1), boolProto(true), stringProto("3")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn( + 0, + errDecodeArrayElement( + 0, listProto(intProto(1), boolProto(true), stringProto("3")), "STRUCT", + errDecodeStructField( + &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + }, + }, + "Col2", + errSrcVal(boolProto(true), "Number"), + ), + ), + ), + }, + } { + if gotErr := test.row.Column(0, test.dst); !testEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.Column(0) got error %v, want %v", i, gotErr, test.wantErr) + } + if gotErr := test.row.ColumnByName("Col0", test.dst); !testEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.ColumnByName(%q) got error %v, want %v", i, "Col0", gotErr, test.wantErr) + } + if gotErr := test.row.Columns(test.dst); !testEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.Columns(%T) got error %v, want %v", i, test.dst, gotErr, test.wantErr) + } + } +} + +// Test Row.ToStruct(). +func TestToStruct(t *testing.T) { + s := []struct { + // STRING / STRING ARRAY + PrimaryKey string `spanner:"STRING"` + NullString NullString `spanner:"NULL_STRING"` + StringArray []NullString `spanner:"STRING_ARRAY"` + NullStringArray []NullString `spanner:"NULL_STRING_ARRAY"` + // BYTES / BYTES ARRAY + Bytes []byte `spanner:"BYTES"` + NullBytes []byte `spanner:"NULL_BYTES"` + BytesArray [][]byte `spanner:"BYTES_ARRAY"` + NullBytesArray [][]byte `spanner:"NULL_BYTES_ARRAY"` + // INT64 / INT64 ARRAY + Int64 int64 `spanner:"INT64"` + NullInt64 NullInt64 `spanner:"NULL_INT64"` + Int64Array []NullInt64 `spanner:"INT64_ARRAY"` + NullInt64Array []NullInt64 `spanner:"NULL_INT64_ARRAY"` + // BOOL / BOOL ARRAY + Bool bool `spanner:"BOOL"` + NullBool NullBool `spanner:"NULL_BOOL"` + BoolArray []NullBool `spanner:"BOOL_ARRAY"` + NullBoolArray []NullBool `spanner:"NULL_BOOL_ARRAY"` + // FLOAT64 / FLOAT64 ARRAY + Float64 float64 `spanner:"FLOAT64"` + NullFloat64 NullFloat64 `spanner:"NULL_FLOAT64"` + Float64Array []NullFloat64 `spanner:"FLOAT64_ARRAY"` + NullFloat64Array []NullFloat64 `spanner:"NULL_FLOAT64_ARRAY"` + // TIMESTAMP / TIMESTAMP ARRAY + Timestamp time.Time `spanner:"TIMESTAMP"` + NullTimestamp NullTime `spanner:"NULL_TIMESTAMP"` + TimestampArray []NullTime `spanner:"TIMESTAMP_ARRAY"` + NullTimestampArray []NullTime `spanner:"NULL_TIMESTAMP_ARRAY"` + // DATE / DATE ARRAY + Date civil.Date `spanner:"DATE"` + NullDate NullDate `spanner:"NULL_DATE"` + DateArray []NullDate `spanner:"DATE_ARRAY"` + NullDateArray []NullDate `spanner:"NULL_DATE_ARRAY"` + + // STRUCT ARRAY + StructArray []*struct { + Col1 int64 + Col2 float64 + Col3 string + } `spanner:"STRUCT_ARRAY"` + NullStructArray []*struct { + Col1 int64 + Col2 float64 + Col3 string + } `spanner:"NULL_STRUCT_ARRAY"` + }{ + {}, // got + { + // STRING / STRING ARRAY + "value", + NullString{}, + []NullString{{"value1", true}, {}, {"value3", true}}, + []NullString(nil), + // BYTES / BYTES ARRAY + []byte("value"), + []byte(nil), + [][]byte{[]byte("value1"), nil, []byte("value3")}, + [][]byte(nil), + // INT64 / INT64 ARRAY + int64(17), + NullInt64{}, + []NullInt64{{int64(1), true}, {int64(2), true}, {}}, + []NullInt64(nil), + // BOOL / BOOL ARRAY + true, + NullBool{}, + []NullBool{{}, {true, true}, {false, true}}, + []NullBool(nil), + // FLOAT64 / FLOAT64 ARRAY + 1.7, + NullFloat64{}, + []NullFloat64{{}, {}, {1.7, true}}, + []NullFloat64(nil), + // TIMESTAMP / TIMESTAMP ARRAY + tm, + NullTime{}, + []NullTime{{}, {tm, true}}, + []NullTime(nil), + // DATE / DATE ARRAY + dt, + NullDate{}, + []NullDate{{}, {dt, true}}, + []NullDate(nil), + // STRUCT ARRAY + []*struct { + Col1 int64 + Col2 float64 + Col3 string + }{ + nil, + &struct { + Col1 int64 + Col2 float64 + Col3 string + }{3, 33.3, "three"}, + nil, + }, + []*struct { + Col1 int64 + Col2 float64 + Col3 string + }(nil), + }, // want + } + err := row.ToStruct(&s[0]) + if err != nil { + t.Errorf("row.ToStruct() returns error: %v, want nil", err) + } else if !testEqual(s[0], s[1]) { + t.Errorf("row.ToStruct() fetches struct %v, want %v", s[0], s[1]) + } +} + +func TestToStructEmbedded(t *testing.T) { + type ( + S1 struct{ F1 string } + S2 struct { + S1 + F2 string + } + ) + r := Row{ + []*sppb.StructType_Field{ + {"F1", stringType()}, + {"F2", stringType()}, + }, + []*proto3.Value{ + stringProto("v1"), + stringProto("v2"), + }, + } + var got S2 + if err := r.ToStruct(&got); err != nil { + t.Fatal(err) + } + want := S2{S1: S1{F1: "v1"}, F2: "v2"} + if !testEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +// Test helpers for getting column names. +func TestColumnNameAndIndex(t *testing.T) { + // Test Row.Size(). + if rs := row.Size(); rs != len(row.fields) { + t.Errorf("row.Size() returns %v, want %v", rs, len(row.fields)) + } + // Test Row.Size() on empty Row. + if rs := (&Row{}).Size(); rs != 0 { + t.Errorf("empty_row.Size() returns %v, want %v", rs, 0) + } + // Test Row.ColumnName() + for i, col := range row.fields { + if cn := row.ColumnName(i); cn != col.Name { + t.Errorf("row.ColumnName(%v) returns %q, want %q", i, cn, col.Name) + } + goti, err := row.ColumnIndex(col.Name) + if err != nil { + t.Errorf("ColumnIndex(%q) error %v", col.Name, err) + continue + } + if goti != i { + t.Errorf("ColumnIndex(%q) = %d, want %d", col.Name, goti, i) + } + } + // Test Row.ColumnName on empty Row. + if cn := (&Row{}).ColumnName(0); cn != "" { + t.Errorf("empty_row.ColumnName(%v) returns %q, want %q", 0, cn, "") + } + // Test Row.ColumnIndex on empty Row. + if _, err := (&Row{}).ColumnIndex(""); err == nil { + t.Error("empty_row.ColumnIndex returns nil, want error") + } +} + +func TestNewRow(t *testing.T) { + for _, test := range []struct { + names []string + values []interface{} + want *Row + wantErr error + }{ + { + want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, + }, + { + names: []string{}, + values: []interface{}{}, + want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, + }, + { + names: []string{"a", "b"}, + values: []interface{}{}, + want: nil, + wantErr: errNamesValuesMismatch([]string{"a", "b"}, []interface{}{}), + }, + { + names: []string{"a", "b", "c"}, + values: []interface{}{5, "abc", GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}}, + want: &Row{ + []*sppb.StructType_Field{ + {"a", intType()}, + {"b", stringType()}, + {"c", listType(intType())}, + }, + []*proto3.Value{ + intProto(5), + stringProto("abc"), + listProto(intProto(91), nullProto(), intProto(87)), + }, + }, + }, + } { + got, err := NewRow(test.names, test.values) + if !testEqual(err, test.wantErr) { + t.Errorf("NewRow(%v,%v).err = %s, want %s", test.names, test.values, err, test.wantErr) + continue + } + if !testEqual(got, test.want) { + t.Errorf("NewRow(%v,%v) = %s, want %s", test.names, test.values, got, test.want) + continue + } + } +} + +func BenchmarkColumn(b *testing.B) { + var s string + for i := 0; i < b.N; i++ { + if err := row.Column(0, &s); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/session.go b/vendor/cloud.google.com/go/spanner/session.go new file mode 100644 index 0000000..216e139 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/session.go @@ -0,0 +1,1075 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "container/heap" + "container/list" + "fmt" + "log" + "math/rand" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// sessionHandle is an interface for transactions to access Cloud Spanner sessions safely. It is generated by sessionPool.take(). +type sessionHandle struct { + // mu guarantees that the inner session object is returned / destroyed only once. + mu sync.Mutex + // session is a pointer to a session object. Transactions never need to access it directly. + session *session +} + +// recycle gives the inner session object back to its home session pool. It is safe to call recycle multiple times but only the first one would take effect. +func (sh *sessionHandle) recycle() { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + // sessionHandle has already been recycled. + return + } + sh.session.recycle() + sh.session = nil +} + +// getID gets the Cloud Spanner session ID from the internal session object. getID returns empty string if the sessionHandle is nil or the inner session +// object has been released by recycle / destroy. +func (sh *sessionHandle) getID() string { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + // sessionHandle has already been recycled/destroyed. + return "" + } + return sh.session.getID() +} + +// getClient gets the Cloud Spanner RPC client associated with the session ID in sessionHandle. +func (sh *sessionHandle) getClient() sppb.SpannerClient { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.client +} + +// getMetadata returns the metadata associated with the session in sessionHandle. +func (sh *sessionHandle) getMetadata() metadata.MD { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.md +} + +// getTransactionID returns the transaction id in the session if available. +func (sh *sessionHandle) getTransactionID() transactionID { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.tx +} + +// destroy destroys the inner session object. It is safe to call destroy multiple times and only the first call would attempt to +// destroy the inner session object. +func (sh *sessionHandle) destroy() { + sh.mu.Lock() + s := sh.session + sh.session = nil + sh.mu.Unlock() + if s == nil { + // sessionHandle has already been destroyed. + return + } + s.destroy(false) +} + +// session wraps a Cloud Spanner session ID through which transactions are created and executed. +type session struct { + // client is the RPC channel to Cloud Spanner. It is set only once during session's creation. + client sppb.SpannerClient + // id is the unique id of the session in Cloud Spanner. It is set only once during session's creation. + id string + // pool is the session's home session pool where it was created. It is set only once during session's creation. + pool *sessionPool + // createTime is the timestamp of the session's creation. It is set only once during session's creation. + createTime time.Time + + // mu protects the following fields from concurrent access: both healthcheck workers and transactions can modify them. + mu sync.Mutex + // valid marks the validity of a session. + valid bool + // hcIndex is the index of the session inside the global healthcheck queue. If hcIndex < 0, session has been unregistered from the queue. + hcIndex int + // idleList is the linkedlist node which links the session to its home session pool's idle list. If idleList == nil, the + // session is not in idle list. + idleList *list.Element + // nextCheck is the timestamp of next scheduled healthcheck of the session. It is maintained by the global health checker. + nextCheck time.Time + // checkingHelath is true if currently this session is being processed by health checker. Must be modified under health checker lock. + checkingHealth bool + // md is the Metadata to be sent with each request. + md metadata.MD + // tx contains the transaction id if the session has been prepared for write. + tx transactionID +} + +// isValid returns true if the session is still valid for use. +func (s *session) isValid() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.valid +} + +// isWritePrepared returns true if the session is prepared for write. +func (s *session) isWritePrepared() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.tx != nil +} + +// String implements fmt.Stringer for session. +func (s *session) String() string { + s.mu.Lock() + defer s.mu.Unlock() + return fmt.Sprintf("", + s.id, s.hcIndex, s.idleList, s.valid, s.createTime, s.nextCheck) +} + +// ping verifies if the session is still alive in Cloud Spanner. +func (s *session) ping() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + return runRetryable(ctx, func(ctx context.Context) error { + _, err := s.client.GetSession(contextWithOutgoingMetadata(ctx, s.pool.md), &sppb.GetSessionRequest{Name: s.getID()}) // s.getID is safe even when s is invalid. + return err + }) +} + +// setHcIndex atomically sets the session's index in the healthcheck queue and returns the old index. +func (s *session) setHcIndex(i int) int { + s.mu.Lock() + defer s.mu.Unlock() + oi := s.hcIndex + s.hcIndex = i + return oi +} + +// setIdleList atomically sets the session's idle list link and returns the old link. +func (s *session) setIdleList(le *list.Element) *list.Element { + s.mu.Lock() + defer s.mu.Unlock() + old := s.idleList + s.idleList = le + return old +} + +// invalidate marks a session as invalid and returns the old validity. +func (s *session) invalidate() bool { + s.mu.Lock() + defer s.mu.Unlock() + ov := s.valid + s.valid = false + return ov +} + +// setNextCheck sets the timestamp for next healthcheck on the session. +func (s *session) setNextCheck(t time.Time) { + s.mu.Lock() + defer s.mu.Unlock() + s.nextCheck = t +} + +// setTransactionID sets the transaction id in the session +func (s *session) setTransactionID(tx transactionID) { + s.mu.Lock() + defer s.mu.Unlock() + s.tx = tx +} + +// getID returns the session ID which uniquely identifies the session in Cloud Spanner. +func (s *session) getID() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.id +} + +// getHcIndex returns the session's index into the global healthcheck priority queue. +func (s *session) getHcIndex() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.hcIndex +} + +// getIdleList returns the session's link in its home session pool's idle list. +func (s *session) getIdleList() *list.Element { + s.mu.Lock() + defer s.mu.Unlock() + return s.idleList +} + +// getNextCheck returns the timestamp for next healthcheck on the session. +func (s *session) getNextCheck() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.nextCheck +} + +// recycle turns the session back to its home session pool. +func (s *session) recycle() { + s.setTransactionID(nil) + if !s.pool.recycle(s) { + // s is rejected by its home session pool because it expired and the session pool currently has enough open sessions. + s.destroy(false) + } +} + +// destroy removes the session from its home session pool, healthcheck queue and Cloud Spanner service. +func (s *session) destroy(isExpire bool) bool { + // Remove s from session pool. + if !s.pool.remove(s, isExpire) { + return false + } + // Unregister s from healthcheck queue. + s.pool.hc.unregister(s) + // Remove s from Cloud Spanner service. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + // Ignore the error returned by runRetryable because even if we fail to explicitly destroy the session, + // it will be eventually garbage collected by Cloud Spanner. + err := runRetryable(ctx, func(ctx context.Context) error { + _, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()}) + return e + }) + if err != nil { + log.Printf("Failed to delete session %v. Error: %v", s.getID(), err) + } + return true +} + +// prepareForWrite prepares the session for write if it is not already in that state. +func (s *session) prepareForWrite(ctx context.Context) error { + if s.isWritePrepared() { + return nil + } + tx, err := beginTransaction(ctx, s.getID(), s.client) + if err != nil { + return err + } + s.setTransactionID(tx) + return nil +} + +// SessionPoolConfig stores configurations of a session pool. +type SessionPoolConfig struct { + // getRPCClient is the caller supplied method for getting a gRPC client to Cloud Spanner, this makes session pool able to use client pooling. + getRPCClient func() (sppb.SpannerClient, error) + // MaxOpened is the maximum number of opened sessions allowed by the + // session pool. Defaults to NumChannels * 100. + MaxOpened uint64 + // MinOpened is the minimum number of opened sessions that the session pool + // tries to maintain. Session pool won't continue to expire sessions if number + // of opened connections drops below MinOpened. However, if session is found + // to be broken, it will still be evicted from session pool, therefore it is + // posssible that the number of opened sessions drops below MinOpened. + MinOpened uint64 + // MaxIdle is the maximum number of idle sessions, pool is allowed to keep. Defaults to 0. + MaxIdle uint64 + // MaxBurst is the maximum number of concurrent session creation requests. Defaults to 10. + MaxBurst uint64 + // WriteSessions is the fraction of sessions we try to keep prepared for write. + WriteSessions float64 + // HealthCheckWorkers is number of workers used by health checker for this pool. + HealthCheckWorkers int + // HealthCheckInterval is how often the health checker pings a session. Defaults to 5 min. + HealthCheckInterval time.Duration + // healthCheckSampleInterval is how often the health checker samples live session (for use in maintaining session pool size). Defaults to 1 min. + healthCheckSampleInterval time.Duration +} + +// errNoRPCGetter returns error for SessionPoolConfig missing getRPCClient method. +func errNoRPCGetter() error { + return spannerErrorf(codes.InvalidArgument, "require SessionPoolConfig.getRPCClient != nil, got nil") +} + +// errMinOpenedGTMapOpened returns error for SessionPoolConfig.MaxOpened < SessionPoolConfig.MinOpened when SessionPoolConfig.MaxOpened is set. +func errMinOpenedGTMaxOpened(spc *SessionPoolConfig) error { + return spannerErrorf(codes.InvalidArgument, + "require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %v and %v", spc.MaxOpened, spc.MinOpened) +} + +// validate verifies that the SessionPoolConfig is good for use. +func (spc *SessionPoolConfig) validate() error { + if spc.getRPCClient == nil { + return errNoRPCGetter() + } + if spc.MinOpened > spc.MaxOpened && spc.MaxOpened > 0 { + return errMinOpenedGTMaxOpened(spc) + } + return nil +} + +// sessionPool creates and caches Cloud Spanner sessions. +type sessionPool struct { + // mu protects sessionPool from concurrent access. + mu sync.Mutex + // valid marks the validity of the session pool. + valid bool + // db is the database name that all sessions in the pool are associated with. + db string + // idleList caches idle session IDs. Session IDs in this list can be allocated for use. + idleList list.List + // idleWriteList caches idle sessions which have been prepared for write. + idleWriteList list.List + // mayGetSession is for broadcasting that session retrival/creation may proceed. + mayGetSession chan struct{} + // numOpened is the total number of open sessions from the session pool. + numOpened uint64 + // createReqs is the number of ongoing session creation requests. + createReqs uint64 + // prepareReqs is the number of ongoing session preparation request. + prepareReqs uint64 + // configuration of the session pool. + SessionPoolConfig + // Metadata to be sent with each request + md metadata.MD + // hc is the health checker + hc *healthChecker +} + +// newSessionPool creates a new session pool. +func newSessionPool(db string, config SessionPoolConfig, md metadata.MD) (*sessionPool, error) { + if err := config.validate(); err != nil { + return nil, err + } + pool := &sessionPool{ + db: db, + valid: true, + mayGetSession: make(chan struct{}), + SessionPoolConfig: config, + md: md, + } + if config.HealthCheckWorkers == 0 { + // With 10 workers and assuming average latency of 5 ms for BeginTransaction, we will be able to + // prepare 2000 tx/sec in advance. If the rate of takeWriteSession is more than that, it will + // degrade to doing BeginTransaction inline. + // TODO: consider resizing the worker pool dynamically according to the load. + config.HealthCheckWorkers = 10 + } + if config.HealthCheckInterval == 0 { + config.HealthCheckInterval = 5 * time.Minute + } + if config.healthCheckSampleInterval == 0 { + config.healthCheckSampleInterval = time.Minute + } + // On GCE VM, within the same region an healthcheck ping takes on average 10ms to finish, given a 5 minutes interval and + // 10 healthcheck workers, a healthChecker can effectively mantain 100 checks_per_worker/sec * 10 workers * 300 seconds = 300K sessions. + pool.hc = newHealthChecker(config.HealthCheckInterval, config.HealthCheckWorkers, config.healthCheckSampleInterval, pool) + close(pool.hc.ready) + return pool, nil +} + +// isValid checks if the session pool is still valid. +func (p *sessionPool) isValid() bool { + if p == nil { + return false + } + p.mu.Lock() + defer p.mu.Unlock() + return p.valid +} + +// close marks the session pool as closed. +func (p *sessionPool) close() { + if p == nil { + return + } + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return + } + p.valid = false + p.mu.Unlock() + p.hc.close() + // destroy all the sessions + p.hc.mu.Lock() + allSessions := make([]*session, len(p.hc.queue.sessions)) + copy(allSessions, p.hc.queue.sessions) + p.hc.mu.Unlock() + for _, s := range allSessions { + s.destroy(false) + } +} + +// errInvalidSessionPool returns error for using an invalid session pool. +func errInvalidSessionPool() error { + return spannerErrorf(codes.InvalidArgument, "invalid session pool") +} + +// errGetSessionTimeout returns error for context timeout during sessionPool.take(). +func errGetSessionTimeout() error { + return spannerErrorf(codes.Canceled, "timeout / context canceled during getting session") +} + +// shouldPrepareWrite returns true if we should prepare more sessions for write. +func (p *sessionPool) shouldPrepareWrite() bool { + return float64(p.numOpened)*p.WriteSessions > float64(p.idleWriteList.Len()+int(p.prepareReqs)) +} + +func (p *sessionPool) createSession(ctx context.Context) (*session, error) { + tracePrintf(ctx, nil, "Creating a new session") + doneCreate := func(done bool) { + p.mu.Lock() + if !done { + // Session creation failed, give budget back. + p.numOpened-- + } + p.createReqs-- + // Notify other waiters blocking on session creation. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + p.mu.Unlock() + } + sc, err := p.getRPCClient() + if err != nil { + doneCreate(false) + return nil, err + } + var s *session + err = runRetryable(ctx, func(ctx context.Context) error { + sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: p.db}) + if e != nil { + return e + } + // If no error, construct the new session. + s = &session{valid: true, client: sc, id: sid.Name, pool: p, createTime: time.Now(), md: p.md} + p.hc.register(s) + return nil + }) + if err != nil { + doneCreate(false) + // Should return error directly because of the previous retries on CreateSession RPC. + return nil, err + } + doneCreate(true) + return s, nil +} + +func (p *sessionPool) isHealthy(s *session) bool { + if s.getNextCheck().Add(2 * p.hc.getInterval()).Before(time.Now()) { + // TODO: figure out if we need to schedule a new healthcheck worker here. + if err := s.ping(); shouldDropSession(err) { + // The session is already bad, continue to fetch/create a new one. + s.destroy(false) + return false + } + p.hc.scheduledHC(s) + } + return true +} + +// take returns a cached session if there are available ones; if there isn't any, it tries to allocate a new one. +// Session returned by take should be used for read operations. +func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) { + tracePrintf(ctx, nil, "Acquiring a read-only session") + ctx = contextWithOutgoingMetadata(ctx, p.md) + for { + var ( + s *session + err error + ) + + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return nil, errInvalidSessionPool() + } + if p.idleList.Len() > 0 { + // Idle sessions are available, get one from the top of the idle list. + s = p.idleList.Remove(p.idleList.Front()).(*session) + tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, + "Acquired read-only session") + } else if p.idleWriteList.Len() > 0 { + s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) + tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, + "Acquired read-write session") + } + if s != nil { + s.setIdleList(nil) + p.mu.Unlock() + // From here, session is no longer in idle list, so healthcheck workers won't destroy it. + // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. + // Because session check is still much cheaper than session creation, they should be reused as much as possible. + if !p.isHealthy(s) { + continue + } + return &sessionHandle{session: s}, nil + } + // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. + if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { + mayGetSession := p.mayGetSession + p.mu.Unlock() + tracePrintf(ctx, nil, "Waiting for read-only session to become available") + select { + case <-ctx.Done(): + tracePrintf(ctx, nil, "Context done waiting for session") + return nil, errGetSessionTimeout() + case <-mayGetSession: + } + continue + } + // Take budget before the actual session creation. + p.numOpened++ + p.createReqs++ + p.mu.Unlock() + if s, err = p.createSession(ctx); err != nil { + tracePrintf(ctx, nil, "Error creating session: %v", err) + return nil, toSpannerError(err) + } + tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, + "Created session") + return &sessionHandle{session: s}, nil + } +} + +// takeWriteSession returns a write prepared cached session if there are available ones; if there isn't any, it tries to allocate a new one. +// Session returned should be used for read write transactions. +func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, error) { + tracePrintf(ctx, nil, "Acquiring a read-write session") + ctx = contextWithOutgoingMetadata(ctx, p.md) + for { + var ( + s *session + err error + ) + + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return nil, errInvalidSessionPool() + } + if p.idleWriteList.Len() > 0 { + // Idle sessions are available, get one from the top of the idle list. + s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) + tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, "Acquired read-write session") + } else if p.idleList.Len() > 0 { + s = p.idleList.Remove(p.idleList.Front()).(*session) + tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, "Acquired read-only session") + } + if s != nil { + s.setIdleList(nil) + p.mu.Unlock() + // From here, session is no longer in idle list, so healthcheck workers won't destroy it. + // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. + // Because session check is still much cheaper than session creation, they should be reused as much as possible. + if !p.isHealthy(s) { + continue + } + } else { + // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. + if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { + mayGetSession := p.mayGetSession + p.mu.Unlock() + tracePrintf(ctx, nil, "Waiting for read-write session to become available") + select { + case <-ctx.Done(): + tracePrintf(ctx, nil, "Context done waiting for session") + return nil, errGetSessionTimeout() + case <-mayGetSession: + } + continue + } + + // Take budget before the actual session creation. + p.numOpened++ + p.createReqs++ + p.mu.Unlock() + if s, err = p.createSession(ctx); err != nil { + tracePrintf(ctx, nil, "Error creating session: %v", err) + return nil, toSpannerError(err) + } + tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, + "Created session") + } + if !s.isWritePrepared() { + if err = s.prepareForWrite(ctx); err != nil { + s.recycle() + tracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()}, + "Error preparing session for write") + return nil, toSpannerError(err) + } + } + return &sessionHandle{session: s}, nil + } +} + +// recycle puts session s back to the session pool's idle list, it returns true if the session pool successfully recycles session s. +func (p *sessionPool) recycle(s *session) bool { + p.mu.Lock() + defer p.mu.Unlock() + if !s.isValid() || !p.valid { + // Reject the session if session is invalid or pool itself is invalid. + return false + } + // Put session at the back of the list to round robin for load balancing across channels. + if s.isWritePrepared() { + s.setIdleList(p.idleWriteList.PushBack(s)) + } else { + s.setIdleList(p.idleList.PushBack(s)) + } + // Broadcast that a session has been returned to idle list. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + return true +} + +// remove atomically removes session s from the session pool and invalidates s. +// If isExpire == true, the removal is triggered by session expiration and in such cases, only idle sessions can be removed. +func (p *sessionPool) remove(s *session, isExpire bool) bool { + p.mu.Lock() + defer p.mu.Unlock() + if isExpire && (p.numOpened <= p.MinOpened || s.getIdleList() == nil) { + // Don't expire session if the session is not in idle list (in use), or if number of open sessions is going below p.MinOpened. + return false + } + ol := s.setIdleList(nil) + // If the session is in the idlelist, remove it. + if ol != nil { + // Remove from whichever list it is in. + p.idleList.Remove(ol) + p.idleWriteList.Remove(ol) + } + if s.invalidate() { + // Decrease the number of opened sessions. + p.numOpened-- + // Broadcast that a session has been destroyed. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + return true + } + return false +} + +// hcHeap implements heap.Interface. It is used to create the priority queue for session healthchecks. +type hcHeap struct { + sessions []*session +} + +// Len impelemnts heap.Interface.Len. +func (h hcHeap) Len() int { + return len(h.sessions) +} + +// Less implements heap.Interface.Less. +func (h hcHeap) Less(i, j int) bool { + return h.sessions[i].getNextCheck().Before(h.sessions[j].getNextCheck()) +} + +// Swap implements heap.Interface.Swap. +func (h hcHeap) Swap(i, j int) { + h.sessions[i], h.sessions[j] = h.sessions[j], h.sessions[i] + h.sessions[i].setHcIndex(i) + h.sessions[j].setHcIndex(j) +} + +// Push implements heap.Interface.Push. +func (h *hcHeap) Push(s interface{}) { + ns := s.(*session) + ns.setHcIndex(len(h.sessions)) + h.sessions = append(h.sessions, ns) +} + +// Pop implements heap.Interface.Pop. +func (h *hcHeap) Pop() interface{} { + old := h.sessions + n := len(old) + s := old[n-1] + h.sessions = old[:n-1] + s.setHcIndex(-1) + return s +} + +// healthChecker performs periodical healthchecks on registered sessions. +type healthChecker struct { + // mu protects concurrent access to hcQueue. + mu sync.Mutex + // queue is the priority queue for session healthchecks. Sessions with lower nextCheck rank higher in the queue. + queue hcHeap + // interval is the average interval between two healthchecks on a session. + interval time.Duration + // workers is the number of concurrent healthcheck workers. + workers int + // waitWorkers waits for all healthcheck workers to exit + waitWorkers sync.WaitGroup + // pool is the underlying session pool. + pool *sessionPool + // sampleInterval is the interval of sampling by the maintainer. + sampleInterval time.Duration + // ready is used to signal that maintainer can start running. + ready chan struct{} + // done is used to signal that health checker should be closed. + done chan struct{} + // once is used for closing channel done only once. + once sync.Once +} + +// newHealthChecker initializes new instance of healthChecker. +func newHealthChecker(interval time.Duration, workers int, sampleInterval time.Duration, pool *sessionPool) *healthChecker { + if workers <= 0 { + workers = 1 + } + hc := &healthChecker{ + interval: interval, + workers: workers, + pool: pool, + sampleInterval: sampleInterval, + ready: make(chan struct{}), + done: make(chan struct{}), + } + hc.waitWorkers.Add(1) + go hc.maintainer() + for i := 1; i <= hc.workers; i++ { + hc.waitWorkers.Add(1) + go hc.worker(i) + } + return hc +} + +// close closes the healthChecker and waits for all healthcheck workers to exit. +func (hc *healthChecker) close() { + hc.once.Do(func() { close(hc.done) }) + hc.waitWorkers.Wait() +} + +// isClosing checks if a healthChecker is already closing. +func (hc *healthChecker) isClosing() bool { + select { + case <-hc.done: + return true + default: + return false + } +} + +// getInterval gets the healthcheck interval. +func (hc *healthChecker) getInterval() time.Duration { + hc.mu.Lock() + defer hc.mu.Unlock() + return hc.interval +} + +// scheduledHCLocked schedules next healthcheck on session s with the assumption that hc.mu is being held. +func (hc *healthChecker) scheduledHCLocked(s *session) { + // The next healthcheck will be scheduled after [interval*0.5, interval*1.5) nanoseconds. + nsFromNow := rand.Int63n(int64(hc.interval)) + int64(hc.interval)/2 + s.setNextCheck(time.Now().Add(time.Duration(nsFromNow))) + if hi := s.getHcIndex(); hi != -1 { + // Session is still being tracked by healthcheck workers. + heap.Fix(&hc.queue, hi) + } +} + +// scheduledHC schedules next healthcheck on session s. It is safe to be called concurrently. +func (hc *healthChecker) scheduledHC(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + hc.scheduledHCLocked(s) +} + +// register registers a session with healthChecker for periodical healthcheck. +func (hc *healthChecker) register(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + hc.scheduledHCLocked(s) + heap.Push(&hc.queue, s) +} + +// unregister unregisters a session from healthcheck queue. +func (hc *healthChecker) unregister(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + oi := s.setHcIndex(-1) + if oi >= 0 { + heap.Remove(&hc.queue, oi) + } +} + +// markDone marks that health check for session has been performed. +func (hc *healthChecker) markDone(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + s.checkingHealth = false +} + +// healthCheck checks the health of the session and pings it if needed. +func (hc *healthChecker) healthCheck(s *session) { + defer hc.markDone(s) + if !s.pool.isValid() { + // Session pool is closed, perform a garbage collection. + s.destroy(false) + return + } + if err := s.ping(); shouldDropSession(err) { + // Ping failed, destroy the session. + s.destroy(false) + } +} + +// worker performs the healthcheck on sessions in healthChecker's priority queue. +func (hc *healthChecker) worker(i int) { + // Returns a session which we should ping to keep it alive. + getNextForPing := func() *session { + hc.pool.mu.Lock() + defer hc.pool.mu.Unlock() + hc.mu.Lock() + defer hc.mu.Unlock() + if hc.queue.Len() <= 0 { + // Queue is empty. + return nil + } + s := hc.queue.sessions[0] + if s.getNextCheck().After(time.Now()) && hc.pool.valid { + // All sessions have been checked recently. + return nil + } + hc.scheduledHCLocked(s) + if !s.checkingHealth { + s.checkingHealth = true + return s + } + return nil + } + + // Returns a session which we should prepare for write. + getNextForTx := func() *session { + hc.pool.mu.Lock() + defer hc.pool.mu.Unlock() + if hc.pool.shouldPrepareWrite() { + if hc.pool.idleList.Len() > 0 && hc.pool.valid { + hc.mu.Lock() + defer hc.mu.Unlock() + if hc.pool.idleList.Front().Value.(*session).checkingHealth { + return nil + } + session := hc.pool.idleList.Remove(hc.pool.idleList.Front()).(*session) + session.checkingHealth = true + hc.pool.prepareReqs++ + return session + } + } + return nil + } + + for { + if hc.isClosing() { + // Exit when the pool has been closed and all sessions have been destroyed + // or when health checker has been closed. + hc.waitWorkers.Done() + return + } + ws := getNextForTx() + if ws != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err := ws.prepareForWrite(contextWithOutgoingMetadata(ctx, hc.pool.md)) + cancel() + if err != nil { + // Skip handling prepare error, session can be prepared in next cycle + log.Printf("Failed to prepare session, error: %v", toSpannerError(err)) + } + hc.pool.recycle(ws) + hc.pool.mu.Lock() + hc.pool.prepareReqs-- + hc.pool.mu.Unlock() + hc.markDone(ws) + } + rs := getNextForPing() + if rs == nil { + if ws == nil { + // No work to be done so sleep to avoid burning cpu + pause := int64(100 * time.Millisecond) + if pause > int64(hc.interval) { + pause = int64(hc.interval) + } + select { + case <-time.After(time.Duration(rand.Int63n(pause) + pause/2)): + break + case <-hc.done: + break + } + + } + continue + } + hc.healthCheck(rs) + } +} + +// maintainer maintains the maxSessionsInUse by a window of kWindowSize * sampleInterval. +// Based on this information, health checker will try to maintain the number of sessions by hc.. +func (hc *healthChecker) maintainer() { + // Wait so that pool is ready. + <-hc.ready + + var ( + windowSize uint64 = 10 + iteration uint64 + timeout <-chan time.Time + ) + + // replenishPool is run if numOpened is less than sessionsToKeep, timeouts on sampleInterval. + replenishPool := func(sessionsToKeep uint64) { + ctx, _ := context.WithTimeout(context.Background(), hc.sampleInterval) + for { + select { + case <-timeout: + return + default: + break + } + + p := hc.pool + p.mu.Lock() + // Take budget before the actual session creation. + if sessionsToKeep <= p.numOpened { + p.mu.Unlock() + break + } + p.numOpened++ + p.createReqs++ + shouldPrepareWrite := p.shouldPrepareWrite() + p.mu.Unlock() + var ( + s *session + err error + ) + if s, err = p.createSession(ctx); err != nil { + log.Printf("Failed to create session, error: %v", toSpannerError(err)) + continue + } + if shouldPrepareWrite { + if err = s.prepareForWrite(ctx); err != nil { + p.recycle(s) + log.Printf("Failed to prepare session, error: %v", toSpannerError(err)) + continue + } + } + p.recycle(s) + } + } + + // shrinkPool, scales down the session pool. + shrinkPool := func(sessionsToKeep uint64) { + for { + select { + case <-timeout: + return + default: + break + } + + p := hc.pool + p.mu.Lock() + + if sessionsToKeep >= p.numOpened { + p.mu.Unlock() + break + } + + var s *session + if p.idleList.Len() > 0 { + s = p.idleList.Front().Value.(*session) + } else if p.idleWriteList.Len() > 0 { + s = p.idleWriteList.Front().Value.(*session) + } + p.mu.Unlock() + if s != nil { + // destroy session as expire. + s.destroy(true) + } else { + break + } + } + } + + for { + if hc.isClosing() { + hc.waitWorkers.Done() + return + } + + // maxSessionsInUse is the maximum number of sessions in use concurrently over a period of time. + var maxSessionsInUse uint64 + + // Updates metrics. + hc.pool.mu.Lock() + currSessionsInUse := hc.pool.numOpened - uint64(hc.pool.idleList.Len()) - uint64(hc.pool.idleWriteList.Len()) + currSessionsOpened := hc.pool.numOpened + hc.pool.mu.Unlock() + + hc.mu.Lock() + if iteration%windowSize == 0 || maxSessionsInUse < currSessionsInUse { + maxSessionsInUse = currSessionsInUse + } + sessionsToKeep := maxUint64(hc.pool.MinOpened, + minUint64(currSessionsOpened, hc.pool.MaxIdle+maxSessionsInUse)) + hc.mu.Unlock() + + timeout = time.After(hc.sampleInterval) + // Replenish or Shrink pool if needed. + // Note: we don't need to worry about pending create session requests, we only need to sample the current sessions in use. + // the routines will not try to create extra / delete creating sessions. + if sessionsToKeep > currSessionsOpened { + replenishPool(sessionsToKeep) + } else { + shrinkPool(sessionsToKeep) + } + + select { + case <-timeout: + break + case <-hc.done: + break + } + iteration++ + } +} + +// shouldDropSession returns true if a particular error leads to the removal of a session +func shouldDropSession(err error) bool { + if err == nil { + return false + } + // If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller + // should not try to return the session back into the session pool. + // TODO: once gRPC can return auxilary error information, stop parsing the error message. + if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found:") { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/spanner/session_test.go b/vendor/cloud.google.com/go/spanner/session_test.go new file mode 100644 index 0000000..c5bc8c9 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/session_test.go @@ -0,0 +1,857 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "container/heap" + "math/rand" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/status" + + "cloud.google.com/go/spanner/internal/testutil" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// setup prepares test environment for regular session pool tests. +func setup(t *testing.T, spc SessionPoolConfig) (sp *sessionPool, sc *testutil.MockCloudSpannerClient, cancel func()) { + sc = testutil.NewMockCloudSpannerClient(t) + spc.getRPCClient = func() (sppb.SpannerClient, error) { + return sc, nil + } + if spc.HealthCheckInterval == 0 { + spc.HealthCheckInterval = 50 * time.Millisecond + } + if spc.healthCheckSampleInterval == 0 { + spc.healthCheckSampleInterval = 10 * time.Millisecond + } + sp, err := newSessionPool("mockdb", spc, nil) + if err != nil { + t.Fatalf("cannot create session pool: %v", err) + } + cancel = func() { + sp.close() + } + return +} + +// TestSessionCreation tests session creation during sessionPool.Take(). +func TestSessionCreation(t *testing.T) { + t.Parallel() + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Take three sessions from session pool, this should trigger session pool to create three new sessions. + shs := make([]*sessionHandle, 3) + // gotDs holds the unique sessions taken from session pool. + gotDs := map[string]bool{} + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + gotDs[shs[i].getID()] = true + } + if len(gotDs) != len(shs) { + t.Errorf("session pool created %v sessions, want %v", len(gotDs), len(shs)) + } + if wantDs := sc.DumpSessions(); !testEqual(gotDs, wantDs) { + t.Errorf("session pool creates sessions %v, want %v", gotDs, wantDs) + } + // Verify that created sessions are recorded correctly in session pool. + sp.mu.Lock() + if int(sp.numOpened) != len(shs) { + t.Errorf("session pool reports %v open sessions, want %v", sp.numOpened, len(shs)) + } + if sp.createReqs != 0 { + t.Errorf("session pool reports %v session create requests, want 0", int(sp.createReqs)) + } + sp.mu.Unlock() + // Verify that created sessions are tracked correctly by healthcheck queue. + hc := sp.hc + hc.mu.Lock() + if hc.queue.Len() != len(shs) { + t.Errorf("healthcheck queue length = %v, want %v", hc.queue.Len(), len(shs)) + } + for _, s := range hc.queue.sessions { + if !gotDs[s.getID()] { + t.Errorf("session %v is in healthcheck queue, but it is not created by session pool", s.getID()) + } + } + hc.mu.Unlock() +} + +// TestTakeFromIdleList tests taking sessions from session pool's idle list. +func TestTakeFromIdleList(t *testing.T) { + t.Parallel() + sp, sc, cancel := setup(t, SessionPoolConfig{MaxIdle: 10}) // make sure maintainer keeps the idle sessions + defer cancel() + // Take ten sessions from session pool and recycle them. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + } + // Make sure it's sampled once before recycling, otherwise it will be cleaned up. + <-time.After(sp.SessionPoolConfig.healthCheckSampleInterval) + for i := 0; i < len(shs); i++ { + shs[i].recycle() + } + // Further session requests from session pool won't cause mockclient to create more sessions. + wantSessions := sc.DumpSessions() + // Take ten sessions from session pool again, this time all sessions should come from idle list. + gotSessions := map[string]bool{} + for i := 0; i < len(shs); i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + gotSessions[sh.getID()] = true + } + if len(gotSessions) != 10 { + t.Errorf("got %v unique sessions, want 10", len(gotSessions)) + } + if !testEqual(gotSessions, wantSessions) { + t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) + } +} + +// TesttakeWriteSessionFromIdleList tests taking write sessions from session pool's idle list. +func TestTakeWriteSessionFromIdleList(t *testing.T) { + t.Parallel() + sp, sc, cancel := setup(t, SessionPoolConfig{MaxIdle: 20}) // make sure maintainer keeps the idle sessions + defer cancel() + + acts := make([]testutil.Action, 20) + for i := 0; i < len(acts); i++ { + acts[i] = testutil.Action{"BeginTransaction", nil} + } + sc.SetActions(acts...) + // Take ten sessions from session pool and recycle them. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + } + // Make sure it's sampled once before recycling, otherwise it will be cleaned up. + <-time.After(sp.SessionPoolConfig.healthCheckSampleInterval) + for i := 0; i < len(shs); i++ { + shs[i].recycle() + } + // Further session requests from session pool won't cause mockclient to create more sessions. + wantSessions := sc.DumpSessions() + // Take ten sessions from session pool again, this time all sessions should come from idle list. + gotSessions := map[string]bool{} + for i := 0; i < len(shs); i++ { + sh, err := sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + gotSessions[sh.getID()] = true + } + if len(gotSessions) != 10 { + t.Errorf("got %v unique sessions, want 10", len(gotSessions)) + } + if !testEqual(gotSessions, wantSessions) { + t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) + } +} + +// TestTakeFromIdleListChecked tests taking sessions from session pool's idle list, but with a extra ping check. +func TestTakeFromIdleListChecked(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxIdle: 1}) // make sure maintainer keeps the idle sessions + defer cancel() + // Stop healthcheck workers to simulate slow pings. + sp.hc.close() + // Create a session and recycle it. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + // Make sure it's sampled once before recycling, otherwise it will be cleaned up. + <-time.After(sp.SessionPoolConfig.healthCheckSampleInterval) + wantSid := sh.getID() + sh.recycle() + <-time.After(time.Second) + // Two back-to-back session requests, both of them should return the same session created before and + // none of them should trigger a session ping. + for i := 0; i < 2; i++ { + // Take the session from the idle list and recycle it. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("%v - failed to get session: %v", i, err) + } + if gotSid := sh.getID(); gotSid != wantSid { + t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) + } + // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take + // reschedules the next healthcheck. + if got, want := sc.DumpPings(), ([]string{wantSid}); !testEqual(got, want) { + t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) + } + sh.recycle() + } + // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and + // the session pool will create a new session. + sc.InjectError("GetSession", status.Errorf(codes.NotFound, "Session not found:")) + // Delay to trigger sessionPool.Take to ping the session. + <-time.After(time.Second) + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + ds := sc.DumpSessions() + if len(ds) != 1 { + t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) + } + if sh.getID() == wantSid { + t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) + } +} + +// TestTakeFromIdleWriteListChecked tests taking sessions from session pool's idle list, but with a extra ping check. +func TestTakeFromIdleWriteListChecked(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxIdle: 1}) // make sure maintainer keeps the idle sessions + defer cancel() + sc.MakeNice() + // Stop healthcheck workers to simulate slow pings. + sp.hc.close() + // Create a session and recycle it. + sh, err := sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + wantSid := sh.getID() + // Make sure it's sampled once before recycling, otherwise it will be cleaned up. + <-time.After(sp.SessionPoolConfig.healthCheckSampleInterval) + sh.recycle() + <-time.After(time.Second) + // Two back-to-back session requests, both of them should return the same session created before and + // none of them should trigger a session ping. + for i := 0; i < 2; i++ { + // Take the session from the idle list and recycle it. + sh, err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("%v - failed to get session: %v", i, err) + } + if gotSid := sh.getID(); gotSid != wantSid { + t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) + } + // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take + // reschedules the next healthcheck. + if got, want := sc.DumpPings(), ([]string{wantSid}); !testEqual(got, want) { + t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) + } + sh.recycle() + } + // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and + // the session pool will create a new session. + sc.InjectError("GetSession", status.Errorf(codes.NotFound, "Session not found:")) + // Delay to trigger sessionPool.Take to ping the session. + <-time.After(time.Second) + sh, err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + ds := sc.DumpSessions() + if len(ds) != 1 { + t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) + } + if sh.getID() == wantSid { + t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) + } +} + +// TestMaxOpenedSessions tests max open sessions constraint. +func TestMaxOpenedSessions(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, _, cancel := setup(t, SessionPoolConfig{MaxOpened: 1}) + defer cancel() + sh1, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + // Session request will timeout due to the max open sessions constraint. + sh2, gotErr := sp.take(ctx) + if wantErr := errGetSessionTimeout(); !testEqual(gotErr, wantErr) { + t.Errorf("the second session retrival returns error %v, want %v", gotErr, wantErr) + } + go func() { + <-time.After(time.Second) + // destroy the first session to allow the next session request to proceed. + sh1.destroy() + }() + // Now session request can be processed because the first session will be destroyed. + sh2, err = sp.take(context.Background()) + if err != nil { + t.Errorf("after the first session is destroyed, session retrival still returns error %v, want nil", err) + } + if !sh2.session.isValid() || sh2.getID() == "" { + t.Errorf("got invalid session: %v", sh2.session) + } +} + +// TestMinOpenedSessions tests min open session constraint. +func TestMinOpenedSessions(t *testing.T) { + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) + defer cancel() + // Take ten sessions from session pool and recycle them. + var ss []*session + var shs []*sessionHandle + for i := 0; i < 10; i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + ss = append(ss, sh.session) + shs = append(shs, sh) + sh.recycle() + } + for _, sh := range shs { + sh.recycle() + } + // Simulate session expiration. + for _, s := range ss { + s.destroy(true) + } + sp.mu.Lock() + defer sp.mu.Unlock() + // There should be still one session left in idle list due to the min open sessions constraint. + if sp.idleList.Len() != 1 { + t.Errorf("got %v sessions in idle list, want 1 %d", sp.idleList.Len(), sp.numOpened) + } +} + +// TestMaxBurst tests max burst constraint. +func TestMaxBurst(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxBurst: 1}) + defer cancel() + // Will cause session creation RPC to be retried forever. + sc.InjectError("CreateSession", status.Errorf(codes.Unavailable, "try later")) + // This session request will never finish until the injected error is cleared. + go sp.take(context.Background()) + // Poll for the execution of the first session request. + for { + sp.mu.Lock() + cr := sp.createReqs + sp.mu.Unlock() + if cr == 0 { + <-time.After(time.Second) + continue + } + // The first session request is being executed. + break + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + sh, gotErr := sp.take(ctx) + // Since MaxBurst == 1, the second session request should block. + if wantErr := errGetSessionTimeout(); !testEqual(gotErr, wantErr) { + t.Errorf("session retrival returns error %v, want %v", gotErr, wantErr) + } + // Let the first session request succeed. + sc.InjectError("CreateSession", nil) + // Now new session request can proceed because the first session request will eventually succeed. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("session retrival returns error %v, want nil", err) + } + if !sh.session.isValid() || sh.getID() == "" { + t.Errorf("got invalid session: %v", sh.session) + } +} + +// TestSessionrecycle tests recycling sessions. +func TestSessionRecycle(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1, MaxIdle: 2}) + // Set MaxIdle to ensure shs[0] is not destroyed from scale down. + defer cancel() + + // Test session is correctly recycled and reused. + for i := 0; i < 20; i++ { + s, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get the session %v: %v", i, err) + } + s.recycle() + } + if sp.numOpened != 1 { + t.Errorf("Expect session pool size %d, got %d", 1, sp.numOpened) + } +} + +// TestSessionDestroy tests destroying sessions. +func TestSessionDestroy(t *testing.T) { + t.Parallel() + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) + defer cancel() + <-time.After(10 * time.Millisecond) // maintainer will create one session, we wait for it create session to avoid flakiness in test + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + s := sh.session + sh.recycle() + if d := s.destroy(true); d || !s.isValid() { + // Session should be remaining because of min open sessions constraint. + t.Errorf("session %v invalid, want it to stay alive. (destroy in expiration mode, success: %v)", s, d) + } + if d := s.destroy(false); !d || s.isValid() { + // Session should be destroyed. + t.Errorf("failed to destroy session %v. (destroy in default mode, success: %v)", s, d) + } +} + +// TestHcHeap tests heap operation on top of hcHeap. +func TestHcHeap(t *testing.T) { + in := []*session{ + &session{nextCheck: time.Unix(10, 0)}, + &session{nextCheck: time.Unix(0, 5)}, + &session{nextCheck: time.Unix(1, 8)}, + &session{nextCheck: time.Unix(11, 7)}, + &session{nextCheck: time.Unix(6, 3)}, + } + want := []*session{ + &session{nextCheck: time.Unix(1, 8), hcIndex: 0}, + &session{nextCheck: time.Unix(6, 3), hcIndex: 1}, + &session{nextCheck: time.Unix(8, 2), hcIndex: 2}, + &session{nextCheck: time.Unix(10, 0), hcIndex: 3}, + &session{nextCheck: time.Unix(11, 7), hcIndex: 4}, + } + hh := hcHeap{} + for _, s := range in { + heap.Push(&hh, s) + } + // Change top of the heap and do a adjustment. + hh.sessions[0].nextCheck = time.Unix(8, 2) + heap.Fix(&hh, 0) + for idx := 0; hh.Len() > 0; idx++ { + got := heap.Pop(&hh).(*session) + want[idx].hcIndex = -1 + if !testEqual(got, want[idx]) { + t.Errorf("%v: heap.Pop returns %v, want %v", idx, got, want[idx]) + } + } +} + +// TestHealthCheckScheduler tests if healthcheck workers can schedule and perform healthchecks properly. +func TestHealthCheckScheduler(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Create 50 sessions. + ss := []string{} + for i := 0; i < 50; i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + ss = append(ss, sh.getID()) + } + // Sleep for 1s, allowing healthcheck workers to perform some session pings. + <-time.After(time.Second) + dp := sc.DumpPings() + gotPings := map[string]int64{} + for _, p := range dp { + gotPings[p]++ + } + for _, s := range ss { + // The average ping interval is 50ms. + want := int64(time.Second) / int64(50*time.Millisecond) + if got := gotPings[s]; got < want/2 || got > want+want/2 { + t.Errorf("got %v healthchecks on session %v, want it between (%v, %v)", got, s, want/2, want+want/2) + } + } +} + +// Tests that a fractions of sessions are prepared for write by health checker. +func TestWriteSessionsPrepared(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{WriteSessions: 0.5, MaxIdle: 20}) + sc.MakeNice() + defer cancel() + shs := make([]*sessionHandle, 10) + var err error + for i := 0; i < 10; i++ { + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + // Now there are 10 sessions in the pool. Release them. + for _, sh := range shs { + sh.recycle() + } + // Sleep for 1s, allowing healthcheck workers to invoke begin transaction. + <-time.After(time.Second) + wshs := make([]*sessionHandle, 5) + for i := 0; i < 5; i++ { + wshs[i], err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + if wshs[i].getTransactionID() == nil { + t.Errorf("got nil transaction id from session pool") + } + } + for _, sh := range wshs { + sh.recycle() + } + <-time.After(time.Second) + // Now force creation of 10 more sessions. + shs = make([]*sessionHandle, 20) + for i := 0; i < 20; i++ { + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + // Now there are 20 sessions in the pool. Release them. + for _, sh := range shs { + sh.recycle() + } + <-time.After(time.Second) + if sp.idleWriteList.Len() != 10 { + t.Errorf("Expect 10 write prepared session, got: %d", sp.idleWriteList.Len()) + } +} + +// TestTakeFromWriteQueue tests that sessionPool.take() returns write prepared sessions as well. +func TestTakeFromWriteQueue(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxOpened: 1, WriteSessions: 1.0, MaxIdle: 1}) + sc.MakeNice() + defer cancel() + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sh.recycle() + <-time.After(time.Second) + // The session should now be in write queue but take should also return it. + if sp.idleWriteList.Len() == 0 { + t.Errorf("write queue unexpectedly empty") + } + if sp.idleList.Len() != 0 { + t.Errorf("read queue not empty") + } + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sh.recycle() +} + +// TestSessionHealthCheck tests healthchecking cases. +func TestSessionHealthCheck(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Test pinging sessions. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + <-time.After(time.Second) + pings := sc.DumpPings() + if len(pings) == 0 || pings[0] != sh.getID() { + t.Errorf("healthchecker didn't send any ping to session %v", sh.getID()) + } + // Test broken session detection. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sc.InjectError("GetSession", status.Errorf(codes.NotFound, "Session not found:")) + // Wait for healthcheck workers to find the broken session and tear it down. + <-time.After(1 * time.Second) + s := sh.session + if sh.session.isValid() { + t.Errorf("session(%v) is still alive, want it to be dropped by healthcheck workers", s) + } + sc.InjectError("GetSession", nil) + // Test garbage collection. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sp.close() + if sh.session.isValid() { + t.Errorf("session(%v) is still alive, want it to be garbage collected", s) + } +} + +// TestStressSessionPool does stress test on session pool by the following concurrent operations: +// 1) Test worker gets a session from the pool. +// 2) Test worker turns a session back into the pool. +// 3) Test worker destroys a session got from the pool. +// 4) Healthcheck destroys a broken session (because a worker has already destroyed it). +// 5) Test worker closes the session pool. +// +// During the test, the session pool maintainer maintains the number of sessions, +// and it is expected that all sessions that are taken from session pool remains valid. +// When all test workers and healthcheck workers exit, mockclient, session pool +// and healthchecker should be in consistent state. + +func TestStressSessionPool(t *testing.T) { + t.Parallel() + // Use concurrent workers to test different session pool built from different configurations. + if testing.Short() { + t.SkipNow() + } + for ti, cfg := range []SessionPoolConfig{ + SessionPoolConfig{}, + SessionPoolConfig{MinOpened: 10, MaxOpened: 100}, + SessionPoolConfig{MaxBurst: 50}, + SessionPoolConfig{MinOpened: 10, MaxOpened: 200, MaxBurst: 5}, + SessionPoolConfig{MinOpened: 10, MaxOpened: 200, MaxBurst: 5, WriteSessions: 0.2}, + } { + var wg sync.WaitGroup + // Create a more aggressive session healthchecker to increase test concurrency. + cfg.HealthCheckInterval = 50 * time.Millisecond + cfg.healthCheckSampleInterval = 10 * time.Millisecond + cfg.HealthCheckWorkers = 50 + sc := testutil.NewMockCloudSpannerClient(t) + sc.MakeNice() + cfg.getRPCClient = func() (sppb.SpannerClient, error) { + return sc, nil + } + sp, _ := newSessionPool("mockdb", cfg, nil) + for i := 0; i < 100; i++ { + wg.Add(1) + // Schedule a test worker. + go func(idx int, pool *sessionPool, client sppb.SpannerClient) { + defer wg.Done() + // Test worker iterates 1K times and tries different session / session pool operations. + for j := 0; j < 1000; j++ { + if idx%10 == 0 && j >= 900 { + // Close the pool in selected set of workers during the middle of the test. + pool.close() + } + // Take a write sessions ~ 20% of the times. + takeWrite := rand.Intn(5) == 4 + var ( + sh *sessionHandle + gotErr error + ) + if takeWrite { + sh, gotErr = pool.takeWriteSession(context.Background()) + } else { + sh, gotErr = pool.take(context.Background()) + } + if gotErr != nil { + if pool.isValid() { + t.Errorf("%v.%v: pool.take returns error when pool is still valid: %v", ti, idx, gotErr) + } + if wantErr := errInvalidSessionPool(); !testEqual(gotErr, wantErr) { + t.Errorf("%v.%v: got error when pool is closed: %v, want %v", ti, idx, gotErr, wantErr) + } + continue + } + // Verify if session is valid when session pool is valid. Note that if session pool is invalid after sh is taken, + // then sh might be invalidated by healthcheck workers. + if (sh.getID() == "" || sh.session == nil || !sh.session.isValid()) && pool.isValid() { + t.Errorf("%v.%v.%v: pool.take returns invalid session %v", ti, idx, takeWrite, sh.session) + } + if takeWrite && sh.getTransactionID() == nil { + t.Errorf("%v.%v: pool.takeWriteSession returns session %v without transaction", ti, idx, sh.session) + } + if rand.Intn(100) < idx { + // Random sleep before destroying/recycling the session, to give healthcheck worker a chance to step in. + <-time.After(time.Duration(rand.Int63n(int64(cfg.HealthCheckInterval)))) + } + if rand.Intn(100) < idx { + // destroy the session. + sh.destroy() + continue + } + // recycle the session. + sh.recycle() + } + }(i, sp, sc) + } + wg.Wait() + sp.hc.close() + // Here the states of healthchecker, session pool and mockclient are stable. + idleSessions := map[string]bool{} + hcSessions := map[string]bool{} + mockSessions := sc.DumpSessions() + // Dump session pool's idle list. + for sl := sp.idleList.Front(); sl != nil; sl = sl.Next() { + s := sl.Value.(*session) + if idleSessions[s.getID()] { + t.Errorf("%v: found duplicated session in idle list: %v", ti, s.getID()) + } + idleSessions[s.getID()] = true + } + for sl := sp.idleWriteList.Front(); sl != nil; sl = sl.Next() { + s := sl.Value.(*session) + if idleSessions[s.getID()] { + t.Errorf("%v: found duplicated session in idle write list: %v", ti, s.getID()) + } + idleSessions[s.getID()] = true + } + sp.mu.Lock() + if int(sp.numOpened) != len(idleSessions) { + t.Errorf("%v: number of opened sessions (%v) != number of idle sessions (%v)", ti, sp.numOpened, len(idleSessions)) + } + if sp.createReqs != 0 { + t.Errorf("%v: number of pending session creations = %v, want 0", ti, sp.createReqs) + } + // Dump healthcheck queue. + for _, s := range sp.hc.queue.sessions { + if hcSessions[s.getID()] { + t.Errorf("%v: found duplicated session in healthcheck queue: %v", ti, s.getID()) + } + hcSessions[s.getID()] = true + } + sp.mu.Unlock() + + // Verify that idleSessions == hcSessions == mockSessions. + if !testEqual(idleSessions, hcSessions) { + t.Errorf("%v: sessions in idle list (%v) != sessions in healthcheck queue (%v)", ti, idleSessions, hcSessions) + } + if !testEqual(hcSessions, mockSessions) { + t.Errorf("%v: sessions in healthcheck queue (%v) != sessions in mockclient (%v)", ti, hcSessions, mockSessions) + } + sp.close() + mockSessions = sc.DumpSessions() + if len(mockSessions) != 0 { + t.Errorf("Found live sessions: %v", mockSessions) + } + } +} + +// TestMaintainer checks the session pool maintainer maintains the number of sessions in the following cases +// 1. On initialization of session pool, replenish session pool to meet MinOpened or MaxIdle. +// 2. On increased session usage, provision extra MaxIdle sessions. +// 3. After the surge passes, scale down the session pool accordingly. +func TestMaintainer(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + var ( + minOpened uint64 = 5 + maxIdle uint64 = 4 + ) + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: minOpened, MaxIdle: maxIdle}) + sampleInterval := sp.SessionPoolConfig.healthCheckSampleInterval + hcInterval := sp.SessionPoolConfig.HealthCheckInterval + defer cancel() + + <-time.After(sampleInterval * 1) + sp.mu.Lock() + if sp.numOpened != 5 { + t.Errorf("Replenish. Expect %d open, got %d", sp.MinOpened, sp.numOpened) + } + sp.mu.Unlock() + + // To save test time, we are not creating many sessions, because the time to create sessions will have impact on the decision on sessionsToKeep. We also parallelize the take and recycle process. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + sp.mu.Lock() + if sp.numOpened != 10 { + t.Errorf("Scale out from normal use. Expect %d open, got %d", 10, sp.numOpened) + } + sp.mu.Unlock() + + <-time.After(sampleInterval) + for _, sh := range shs[:7] { + sh.recycle() + } + + <-time.After(sampleInterval * 2) + sp.mu.Lock() + if sp.numOpened != 7 { + t.Errorf("Keep extra MaxIdle sessions. Expect %d open, got %d", 7, sp.numOpened) + } + sp.mu.Unlock() + + for _, sh := range shs[7:] { + sh.recycle() + } + <-time.After(sampleInterval*10 + hcInterval) + sp.mu.Lock() + if sp.numOpened != minOpened { + t.Errorf("Scale down. Expect %d open, got %d", minOpened, sp.numOpened) + } + sp.mu.Unlock() +} + +func (s1 *session) Equal(s2 *session) bool { + return s1.client == s2.client && + s1.id == s2.id && + s1.pool == s2.pool && + s1.createTime == s2.createTime && + s1.valid == s2.valid && + s1.hcIndex == s2.hcIndex && + s1.idleList == s2.idleList && + s1.nextCheck.Equal(s2.nextCheck) && + s1.checkingHealth == s2.checkingHealth && + testEqual(s1.md, s2.md) && + bytes.Equal(s1.tx, s2.tx) +} diff --git a/vendor/cloud.google.com/go/spanner/spanner_test.go b/vendor/cloud.google.com/go/spanner/spanner_test.go new file mode 100644 index 0000000..d8dbec1 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/spanner_test.go @@ -0,0 +1,1879 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "flag" + "fmt" + "log" + "math" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" + database "cloud.google.com/go/spanner/admin/database/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc/codes" + + adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +var ( + // testProjectID specifies the project used for testing. + // It can be changed by setting environment variable GCLOUD_TESTS_GOLANG_PROJECT_ID. + testProjectID = testutil.ProjID() + // testInstanceID specifies the Cloud Spanner instance used for testing. + testInstanceID = "go-integration-test" + + // admin is a spanner.DatabaseAdminClient. + admin *database.DatabaseAdminClient +) + +var ( + singerDBStatements = []string{ + `CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) + ) PRIMARY KEY (SingerId)`, + `CREATE INDEX SingerByName ON Singers(FirstName, LastName)`, + `CREATE TABLE Accounts ( + AccountId INT64 NOT NULL, + Nickname STRING(100), + Balance INT64 NOT NULL, + ) PRIMARY KEY (AccountId)`, + `CREATE INDEX AccountByNickname ON Accounts(Nickname) STORING (Balance)`, + `CREATE TABLE Types ( + RowID INT64 NOT NULL, + String STRING(MAX), + StringArray ARRAY, + Bytes BYTES(MAX), + BytesArray ARRAY, + Int64a INT64, + Int64Array ARRAY, + Bool BOOL, + BoolArray ARRAY, + Float64 FLOAT64, + Float64Array ARRAY, + Date DATE, + DateArray ARRAY, + Timestamp TIMESTAMP, + TimestampArray ARRAY, + ) PRIMARY KEY (RowID)`, + } + + readDBStatements = []string{ + `CREATE TABLE TestTable ( + Key STRING(MAX) NOT NULL, + StringValue STRING(MAX) + ) PRIMARY KEY (Key)`, + `CREATE INDEX TestTableByValue ON TestTable(StringValue)`, + `CREATE INDEX TestTableByValueDesc ON TestTable(StringValue DESC)`, + } + + simpleDBStatements = []string{ + `CREATE TABLE test ( + a STRING(1024), + b STRING(1024), + ) PRIMARY KEY (a)`, + } + simpleDBTableColumns = []string{"a", "b"} + + ctsDBStatements = []string{ + `CREATE TABLE TestTable ( + Key STRING(MAX) NOT NULL, + Ts TIMESTAMP OPTIONS (allow_commit_timestamp = true), + ) PRIMARY KEY (Key)`, + } +) + +const ( + str1 = "alice" + str2 = "a@example.com" +) + +type testTableRow struct{ Key, StringValue string } + +func TestMain(m *testing.M) { + initIntegrationTest() + os.Exit(m.Run()) +} + +func initIntegrationTest() { + flag.Parse() // needed for testing.Short() + if testing.Short() { + return + } + if testProjectID == "" { + log.Print("Integration tests skipped: GCLOUD_TESTS_GOLANG_PROJECT_ID is missing") + return + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, AdminScope, Scope) + if ts == nil { + log.Printf("Integration test skipped: cannot get service account credential from environment variable %v", "GCLOUD_TESTS_GOLANG_KEY") + return + } + var err error + // Create Admin client and Data client. + admin, err = database.NewDatabaseAdminClient(ctx, option.WithTokenSource(ts), option.WithEndpoint(endpoint)) + if err != nil { + log.Fatalf("cannot create admin client: %v", err) + } +} + +var ( + mu sync.Mutex + count int + now = time.Now() +) + +// prepare initializes Cloud Spanner testing DB and clients. +func prepare(ctx context.Context, t *testing.T, statements []string) (client *Client, dbPath string, tearDown func()) { + if admin == nil { + t.Skip("Integration tests skipped") + } + // Construct a unique test DB name. + mu.Lock() + dbName := fmt.Sprintf("gotest_%d_%d", now.UnixNano(), count) + count++ + mu.Unlock() + + dbPath = fmt.Sprintf("projects/%v/instances/%v/databases/%v", testProjectID, testInstanceID, dbName) + // Create database and tables. + op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ + Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), + CreateStatement: "CREATE DATABASE " + dbName, + ExtraStatements: statements, + }) + if err != nil { + t.Fatalf("cannot create testing DB %v: %v", dbPath, err) + } + if _, err := op.Wait(ctx); err != nil { + t.Fatalf("cannot create testing DB %v: %v", dbPath, err) + } + client, err = NewClientWithConfig(ctx, dbPath, ClientConfig{ + SessionPoolConfig: SessionPoolConfig{WriteSessions: 0.2}, + }, option.WithTokenSource(testutil.TokenSource(ctx, Scope)), option.WithEndpoint(endpoint)) + if err != nil { + t.Fatalf("cannot create data client on DB %v: %v", dbPath, err) + } + return client, dbPath, func() { + client.Close() + if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{dbPath}); err != nil { + t.Logf("failed to drop database %s (error %v), might need a manual removal", + dbPath, err) + } + } +} + +// Test SingleUse transaction. +func TestSingleUse(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + // Set up testing environment. + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + writes := []struct { + row []interface{} + ts time.Time + }{ + {row: []interface{}{1, "Marc", "Foo"}}, + {row: []interface{}{2, "Tars", "Bar"}}, + {row: []interface{}{3, "Alpha", "Beta"}}, + {row: []interface{}{4, "Last", "End"}}, + } + // Try to write four rows through the Apply API. + for i, w := range writes { + var err error + m := InsertOrUpdate("Singers", + []string{"SingerId", "FirstName", "LastName"}, + w.row) + if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + } + + // For testing timestamp bound staleness. + <-time.After(time.Second) + + // Test reading rows with different timestamp bounds. + for i, test := range []struct { + want [][]interface{} + tb TimestampBound + checkTs func(time.Time) error + }{ + { + // strong + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + StrongRead(), + func(ts time.Time) error { + // writes[3] is the last write, all subsequent strong read should have a timestamp larger than that. + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // min_read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + MinReadTimestamp(writes[3].ts), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // max_staleness + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + MaxStaleness(time.Second), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, + ReadTimestamp(writes[2].ts), + func(ts time.Time) error { + if ts != writes[2].ts { + return fmt.Errorf("read got timestamp %v, want %v", ts, writes[2].ts) + } + return nil + }, + }, + { + // exact_staleness + nil, + // Specify a staleness which should be already before this test because + // context timeout is set to be 10s. + ExactStaleness(11 * time.Second), + func(ts time.Time) error { + if ts.After(writes[0].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) + } + return nil + }, + }, + } { + // SingleUse.Query + su := client.Single().WithTimestampBound(test.tb) + got, err := readAll(su.Query( + ctx, + Statement{ + "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", + map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, + })) + if err != nil { + t.Errorf("%d: SingleUse.Query returns error %v, want nil", i, err) + } + if !testEqual(got, test.want) { + t.Errorf("%d: got unexpected result from SingleUse.Query: %v, want %v", i, got, test.want) + } + rts, err := su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.Query doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.Query doesn't return expected timestamp: %v", i, err) + } + // SingleUse.Read + su = client.Single().WithTimestampBound(test.tb) + got, err = readAll(su.Read(ctx, "Singers", KeySets(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: SingleUse.Read returns error %v, want nil", i, err) + } + if !testEqual(got, test.want) { + t.Errorf("%d: got unexpected result from SingleUse.Read: %v, want %v", i, got, test.want) + } + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.Read doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.Read doesn't return expected timestamp: %v", i, err) + } + // SingleUse.ReadRow + got = nil + for _, k := range []Key{Key{1}, Key{3}, Key{4}} { + su = client.Single().WithTimestampBound(test.tb) + r, err := su.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) + if err != nil { + continue + } + v, err := rowToValues(r) + if err != nil { + continue + } + got = append(got, v) + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) + } + } + if !testEqual(got, test.want) { + t.Errorf("%d: got unexpected results from SingleUse.ReadRow: %v, want %v", i, got, test.want) + } + // SingleUse.ReadUsingIndex + su = client.Single().WithTimestampBound(test.tb) + got, err = readAll(su.ReadUsingIndex(ctx, "Singers", "SingerByName", KeySets(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex returns error %v, want nil", i, err) + } + // The results from ReadUsingIndex is sorted by the index rather than primary key. + if len(got) != len(test.want) { + t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) + } + for j, g := range got { + if j > 0 { + prev := got[j-1][1].(string) + got[j-1][2].(string) + curr := got[j][1].(string) + got[j][2].(string) + if strings.Compare(prev, curr) > 0 { + t.Errorf("%d: SingleUse.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) + } + } + found := false + for _, w := range test.want { + if testEqual(g, w) { + found = true + } + } + if !found { + t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) + break + } + } + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return expected timestamp: %v", i, err) + } + } + + // Reading with limit. + su := client.Single() + const limit = 1 + gotRows, err := readAll(su.ReadWithOptions(ctx, "Singers", KeySets(Key{1}, Key{3}, Key{4}), + []string{"SingerId", "FirstName", "LastName"}, &ReadOptions{Limit: limit})) + if err != nil { + t.Errorf("SingleUse.ReadWithOptions returns error %v, want nil", err) + } + if got, want := len(gotRows), limit; got != want { + t.Errorf("got %d, want %d", got, want) + } + +} + +// Test ReadOnlyTransaction. The testsuite is mostly like SingleUse, except it +// also tests for a single timestamp across multiple reads. +func TestReadOnlyTransaction(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + // Set up testing environment. + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + writes := []struct { + row []interface{} + ts time.Time + }{ + {row: []interface{}{1, "Marc", "Foo"}}, + {row: []interface{}{2, "Tars", "Bar"}}, + {row: []interface{}{3, "Alpha", "Beta"}}, + {row: []interface{}{4, "Last", "End"}}, + } + // Try to write four rows through the Apply API. + for i, w := range writes { + var err error + m := InsertOrUpdate("Singers", + []string{"SingerId", "FirstName", "LastName"}, + w.row) + if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + } + + // For testing timestamp bound staleness. + <-time.After(time.Second) + + // Test reading rows with different timestamp bounds. + for i, test := range []struct { + want [][]interface{} + tb TimestampBound + checkTs func(time.Time) error + }{ + // Note: min_read_timestamp and max_staleness are not supported by ReadOnlyTransaction. See + // API document for more details. + { + // strong + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + StrongRead(), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, + ReadTimestamp(writes[2].ts), + func(ts time.Time) error { + if ts != writes[2].ts { + return fmt.Errorf("read got timestamp %v, expect %v", ts, writes[2].ts) + } + return nil + }, + }, + { + // exact_staleness + nil, + // Specify a staleness which should be already before this test because + // context timeout is set to be 10s. + ExactStaleness(11 * time.Second), + func(ts time.Time) error { + if ts.After(writes[0].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) + } + return nil + }, + }, + } { + // ReadOnlyTransaction.Query + ro := client.ReadOnlyTransaction().WithTimestampBound(test.tb) + got, err := readAll(ro.Query( + ctx, + Statement{ + "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", + map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, + })) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query returns error %v, want nil", i, err) + } + if !testEqual(got, test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Query: %v, want %v", i, got, test.want) + } + rts, err := ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query doesn't return expected timestamp: %v", i, err) + } + roTs := rts + // ReadOnlyTransaction.Read + got, err = readAll(ro.Read(ctx, "Singers", KeySets(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read returns error %v, want nil", i, err) + } + if !testEqual(got, test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Read: %v, want %v", i, got, test.want) + } + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read doesn't return expected timestamp: %v", i, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + // ReadOnlyTransaction.ReadRow + got = nil + for _, k := range []Key{Key{1}, Key{3}, Key{4}} { + r, err := ro.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) + if err != nil { + continue + } + v, err := rowToValues(r) + if err != nil { + continue + } + got = append(got, v) + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + } + if !testEqual(got, test.want) { + t.Errorf("%d: got unexpected results from ReadOnlyTransaction.ReadRow: %v, want %v", i, got, test.want) + } + // SingleUse.ReadUsingIndex + got, err = readAll(ro.ReadUsingIndex(ctx, "Singers", "SingerByName", KeySets(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex returns error %v, want nil", i, err) + } + // The results from ReadUsingIndex is sorted by the index rather than primary key. + if len(got) != len(test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) + } + for j, g := range got { + if j > 0 { + prev := got[j-1][1].(string) + got[j-1][2].(string) + curr := got[j][1].(string) + got[j][2].(string) + if strings.Compare(prev, curr) > 0 { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) + } + } + found := false + for _, w := range test.want { + if testEqual(g, w) { + found = true + } + } + if !found { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) + break + } + } + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return expected timestamp: %v", i, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + ro.Close() + } +} + +// Test ReadOnlyTransaction with different timestamp bound when there's an update at the same time. +func TestUpdateDuringRead(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + for i, tb := range []TimestampBound{ + StrongRead(), + ReadTimestamp(time.Now().Add(-time.Minute * 30)), // version GC is 1 hour + ExactStaleness(time.Minute * 30), + } { + ro := client.ReadOnlyTransaction().WithTimestampBound(tb) + _, err := ro.ReadRow(ctx, "Singers", Key{i}, []string{"SingerId"}) + if ErrCode(err) != codes.NotFound { + t.Errorf("%d: ReadOnlyTransaction.ReadRow before write returns error: %v, want NotFound", i, err) + } + + m := InsertOrUpdate("Singers", []string{"SingerId"}, []interface{}{i}) + if _, err := client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + _, err = ro.ReadRow(ctx, "Singers", Key{i}, []string{"SingerId"}) + if ErrCode(err) != codes.NotFound { + t.Errorf("%d: ReadOnlyTransaction.ReadRow after write returns error: %v, want NotFound", i, err) + } + } +} + +// Test ReadWriteTransaction. +func TestReadWriteTransaction(t *testing.T) { + t.Parallel() + // Give a longer deadline because of transaction backoffs. + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + // Set up two accounts + accounts := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + if _, err := client.Apply(ctx, accounts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + wg := sync.WaitGroup{} + + readBalance := func(iter *RowIterator) (int64, error) { + defer iter.Stop() + var bal int64 + for { + row, err := iter.Next() + if err == iterator.Done { + return bal, nil + } + if err != nil { + return 0, err + } + if err := row.Column(0, &bal); err != nil { + return 0, err + } + } + } + + for i := 0; i < 20; i++ { + wg.Add(1) + go func(iter int) { + defer wg.Done() + _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + // Query Foo's balance and Bar's balance. + bf, e := readBalance(tx.Query(ctx, + Statement{"SELECT Balance FROM Accounts WHERE AccountId = @id", map[string]interface{}{"id": int64(1)}})) + if e != nil { + return e + } + bb, e := readBalance(tx.Read(ctx, "Accounts", KeySets(Key{int64(2)}), []string{"Balance"})) + if e != nil { + return e + } + if bf <= 0 { + return nil + } + bf-- + bb++ + tx.BufferWrite([]*Mutation{ + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), bf}), + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), bb}), + }) + return nil + }) + if err != nil { + t.Fatalf("%d: failed to execute transaction: %v", iter, err) + } + }(i) + } + // Because of context timeout, all goroutines will eventually return. + wg.Wait() + _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + var bf, bb int64 + r, e := tx.ReadRow(ctx, "Accounts", Key{int64(1)}, []string{"Balance"}) + if e != nil { + return e + } + if ce := r.Column(0, &bf); ce != nil { + return ce + } + bb, e = readBalance(tx.ReadUsingIndex(ctx, "Accounts", "AccountByNickname", KeySets(Key{"Bar"}), []string{"Balance"})) + if e != nil { + return e + } + if bf != 30 || bb != 21 { + t.Errorf("Foo's balance is now %v and Bar's balance is now %v, want %v and %v", bf, bb, 30, 21) + } + return nil + }) + if err != nil { + t.Errorf("failed to check balances: %v", err) + } +} + +const ( + testTable = "TestTable" + testTableIndex = "TestTableByValue" +) + +var testTableColumns = []string{"Key", "StringValue"} + +func TestReads(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + // Set up testing environment. + client, _, tearDown := prepare(ctx, t, readDBStatements) + defer tearDown() + + // Includes k0..k14. Strings sort lexically, eg "k1" < "k10" < "k2". + var ms []*Mutation + for i := 0; i < 15; i++ { + ms = append(ms, InsertOrUpdate(testTable, + testTableColumns, + []interface{}{fmt.Sprintf("k%d", i), fmt.Sprintf("v%d", i)})) + } + // Don't use ApplyAtLeastOnce, so we can test the other code path. + if _, err := client.Apply(ctx, ms); err != nil { + t.Fatal(err) + } + + // Empty read. + rows, err := readAllTestTable(client.Single().Read(ctx, testTable, + KeyRange{Start: Key{"k99"}, End: Key{"z"}}, testTableColumns)) + if err != nil { + t.Fatal(err) + } + if got, want := len(rows), 0; got != want { + t.Errorf("got %d, want %d", got, want) + } + + // Index empty read. + rows, err = readAllTestTable(client.Single().ReadUsingIndex(ctx, testTable, testTableIndex, + KeyRange{Start: Key{"v99"}, End: Key{"z"}}, testTableColumns)) + if err != nil { + t.Fatal(err) + } + if got, want := len(rows), 0; got != want { + t.Errorf("got %d, want %d", got, want) + } + + // Point read. + row, err := client.Single().ReadRow(ctx, testTable, Key{"k1"}, testTableColumns) + if err != nil { + t.Fatal(err) + } + var got testTableRow + if err := row.ToStruct(&got); err != nil { + t.Fatal(err) + } + if want := (testTableRow{"k1", "v1"}); got != want { + t.Errorf("got %v, want %v", got, want) + } + + // Point read not found. + _, err = client.Single().ReadRow(ctx, testTable, Key{"k999"}, testTableColumns) + if ErrCode(err) != codes.NotFound { + t.Fatalf("got %v, want NotFound", err) + } + + // No index point read not found, because Go does not have ReadRowUsingIndex. + + rangeReads(ctx, t, client) + indexRangeReads(ctx, t, client) +} + +func rangeReads(ctx context.Context, t *testing.T, client *Client) { + checkRange := func(ks KeySet, wantNums ...int) { + if msg, ok := compareRows(client.Single().Read(ctx, testTable, ks, testTableColumns), wantNums); !ok { + t.Errorf("key set %+v: %s", ks, msg) + } + } + + checkRange(Key{"k1"}, 1) + checkRange(KeyRange{Key{"k3"}, Key{"k5"}, ClosedOpen}, 3, 4) + checkRange(KeyRange{Key{"k3"}, Key{"k5"}, ClosedClosed}, 3, 4, 5) + checkRange(KeyRange{Key{"k3"}, Key{"k5"}, OpenClosed}, 4, 5) + checkRange(KeyRange{Key{"k3"}, Key{"k5"}, OpenOpen}, 4) + + // Partial key specification. + checkRange(KeyRange{Key{"k7"}, Key{}, ClosedClosed}, 7, 8, 9) + checkRange(KeyRange{Key{"k7"}, Key{}, OpenClosed}, 8, 9) + checkRange(KeyRange{Key{}, Key{"k11"}, ClosedOpen}, 0, 1, 10) + checkRange(KeyRange{Key{}, Key{"k11"}, ClosedClosed}, 0, 1, 10, 11) + + // The following produce empty ranges. + // TODO(jba): Consider a multi-part key to illustrate partial key behavior. + // checkRange(KeyRange{Key{"k7"}, Key{}, ClosedOpen}) + // checkRange(KeyRange{Key{"k7"}, Key{}, OpenOpen}) + // checkRange(KeyRange{Key{}, Key{"k11"}, OpenOpen}) + // checkRange(KeyRange{Key{}, Key{"k11"}, OpenClosed}) + + // Prefix is component-wise, not string prefix. + checkRange(Key{"k1"}.AsPrefix(), 1) + checkRange(KeyRange{Key{"k1"}, Key{"k2"}, ClosedOpen}, 1, 10, 11, 12, 13, 14) + + checkRange(AllKeys(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) +} + +func indexRangeReads(ctx context.Context, t *testing.T, client *Client) { + checkRange := func(ks KeySet, wantNums ...int) { + if msg, ok := compareRows(client.Single().ReadUsingIndex(ctx, testTable, testTableIndex, ks, testTableColumns), + wantNums); !ok { + t.Errorf("key set %+v: %s", ks, msg) + } + } + + checkRange(Key{"v1"}, 1) + checkRange(KeyRange{Key{"v3"}, Key{"v5"}, ClosedOpen}, 3, 4) + checkRange(KeyRange{Key{"v3"}, Key{"v5"}, ClosedClosed}, 3, 4, 5) + checkRange(KeyRange{Key{"v3"}, Key{"v5"}, OpenClosed}, 4, 5) + checkRange(KeyRange{Key{"v3"}, Key{"v5"}, OpenOpen}, 4) + + // // Partial key specification. + checkRange(KeyRange{Key{"v7"}, Key{}, ClosedClosed}, 7, 8, 9) + checkRange(KeyRange{Key{"v7"}, Key{}, OpenClosed}, 8, 9) + checkRange(KeyRange{Key{}, Key{"v11"}, ClosedOpen}, 0, 1, 10) + checkRange(KeyRange{Key{}, Key{"v11"}, ClosedClosed}, 0, 1, 10, 11) + + // // The following produce empty ranges. + // checkRange(KeyRange{Key{"v7"}, Key{}, ClosedOpen}) + // checkRange(KeyRange{Key{"v7"}, Key{}, OpenOpen}) + // checkRange(KeyRange{Key{}, Key{"v11"}, OpenOpen}) + // checkRange(KeyRange{Key{}, Key{"v11"}, OpenClosed}) + + // // Prefix is component-wise, not string prefix. + checkRange(Key{"v1"}.AsPrefix(), 1) + checkRange(KeyRange{Key{"v1"}, Key{"v2"}, ClosedOpen}, 1, 10, 11, 12, 13, 14) + checkRange(AllKeys(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) + + // Read from an index with DESC ordering. + wantNums := []int{14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} + if msg, ok := compareRows(client.Single().ReadUsingIndex(ctx, testTable, "TestTableByValueDesc", AllKeys(), testTableColumns), + wantNums); !ok { + t.Errorf("desc: %s", msg) + } +} + +func compareRows(iter *RowIterator, wantNums []int) (string, bool) { + rows, err := readAllTestTable(iter) + if err != nil { + return err.Error(), false + } + want := map[string]string{} + for _, n := range wantNums { + want[fmt.Sprintf("k%d", n)] = fmt.Sprintf("v%d", n) + } + got := map[string]string{} + for _, r := range rows { + got[r.Key] = r.StringValue + } + if !testEqual(got, want) { + return fmt.Sprintf("got %v, want %v", got, want), false + } + return "", true +} + +func TestEarlyTimestamp(t *testing.T) { + t.Parallel() + // Test that we can get the timestamp from a read-only transaction as + // soon as we have read at least one row. + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + // Set up testing environment. + client, _, tearDown := prepare(ctx, t, readDBStatements) + defer tearDown() + + var ms []*Mutation + for i := 0; i < 3; i++ { + ms = append(ms, InsertOrUpdate(testTable, + testTableColumns, + []interface{}{fmt.Sprintf("k%d", i), fmt.Sprintf("v%d", i)})) + } + if _, err := client.Apply(ctx, ms, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + txn := client.Single() + iter := txn.Read(ctx, testTable, AllKeys(), testTableColumns) + defer iter.Stop() + // In single-use transaction, we should get an error before reading anything. + if _, err := txn.Timestamp(); err == nil { + t.Error("wanted error, got nil") + } + // After reading one row, the timestamp should be available. + _, err := iter.Next() + if err != nil { + t.Fatal(err) + } + if _, err := txn.Timestamp(); err != nil { + t.Errorf("got %v, want nil", err) + } + + txn = client.ReadOnlyTransaction() + defer txn.Close() + iter = txn.Read(ctx, testTable, AllKeys(), testTableColumns) + defer iter.Stop() + // In an ordinary read-only transaction, the timestamp should be + // available immediately. + if _, err := txn.Timestamp(); err != nil { + t.Errorf("got %v, want nil", err) + } +} + +func TestNestedTransaction(t *testing.T) { + t.Parallel() + // You cannot use a transaction from inside a read-write transaction. + ctx := context.Background() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + _, err := client.ReadWriteTransaction(ctx, + func(context.Context, *ReadWriteTransaction) error { return nil }) + if ErrCode(err) != codes.FailedPrecondition { + t.Fatalf("got %v, want FailedPrecondition", err) + } + _, err = client.Single().ReadRow(ctx, "Singers", Key{1}, []string{"SingerId"}) + if ErrCode(err) != codes.FailedPrecondition { + t.Fatalf("got %v, want FailedPrecondition", err) + } + rot := client.ReadOnlyTransaction() + defer rot.Close() + _, err = rot.ReadRow(ctx, "Singers", Key{1}, []string{"SingerId"}) + if ErrCode(err) != codes.FailedPrecondition { + t.Fatalf("got %v, want FailedPrecondition", err) + } + return nil + }) +} + +// Test client recovery on database recreation. +func TestDbRemovalRecovery(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, dbPath, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + // Drop the testing database. + if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{dbPath}); err != nil { + t.Fatalf("failed to drop testing database %v: %v", dbPath, err) + } + + // Now, send the query. + iter := client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) + defer iter.Stop() + if _, err := iter.Next(); err == nil { + t.Errorf("client sends query to removed database successfully, want it to fail") + } + + // Recreate database and table. + dbName := dbPath[strings.LastIndex(dbPath, "/")+1:] + op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ + Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), + CreateStatement: "CREATE DATABASE " + dbName, + ExtraStatements: []string{ + `CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) + ) PRIMARY KEY (SingerId)`, + }, + }) + if err != nil { + t.Fatalf("cannot recreate testing DB %v: %v", dbPath, err) + } + if _, err := op.Wait(ctx); err != nil { + t.Fatalf("cannot recreate testing DB %v: %v", dbPath, err) + } + + // Now, send the query again. + iter = client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) + defer iter.Stop() + _, err = iter.Next() + if err != nil && err != iterator.Done { + t.Errorf("failed to send query to database %v: %v", dbPath, err) + } +} + +// Test encoding/decoding non-struct Cloud Spanner types. +func TestBasicTypes(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + t1, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + // Boundaries + t2, _ := time.Parse(time.RFC3339Nano, "0001-01-01T00:00:00.000000000Z") + t3, _ := time.Parse(time.RFC3339Nano, "9999-12-31T23:59:59.999999999Z") + d1, _ := civil.ParseDate("2016-11-15") + // Boundaries + d2, _ := civil.ParseDate("0001-01-01") + d3, _ := civil.ParseDate("9999-12-31") + + tests := []struct { + col string + val interface{} + want interface{} + }{ + {col: "String", val: ""}, + {col: "String", val: "", want: NullString{"", true}}, + {col: "String", val: "foo"}, + {col: "String", val: "foo", want: NullString{"foo", true}}, + {col: "String", val: NullString{"bar", true}, want: "bar"}, + {col: "String", val: NullString{"bar", false}, want: NullString{"", false}}, + {col: "StringArray", val: []string(nil), want: []NullString(nil)}, + {col: "StringArray", val: []string{}, want: []NullString{}}, + {col: "StringArray", val: []string{"foo", "bar"}, want: []NullString{{"foo", true}, {"bar", true}}}, + {col: "StringArray", val: []NullString(nil)}, + {col: "StringArray", val: []NullString{}}, + {col: "StringArray", val: []NullString{{"foo", true}, {}}}, + {col: "Bytes", val: []byte{}}, + {col: "Bytes", val: []byte{1, 2, 3}}, + {col: "Bytes", val: []byte(nil)}, + {col: "BytesArray", val: [][]byte(nil)}, + {col: "BytesArray", val: [][]byte{}}, + {col: "BytesArray", val: [][]byte{[]byte{1}, []byte{2, 3}}}, + {col: "Int64a", val: 0, want: int64(0)}, + {col: "Int64a", val: -1, want: int64(-1)}, + {col: "Int64a", val: 2, want: int64(2)}, + {col: "Int64a", val: int64(3)}, + {col: "Int64a", val: 4, want: NullInt64{4, true}}, + {col: "Int64a", val: NullInt64{5, true}, want: int64(5)}, + {col: "Int64a", val: NullInt64{6, true}, want: int64(6)}, + {col: "Int64a", val: NullInt64{7, false}, want: NullInt64{0, false}}, + {col: "Int64Array", val: []int(nil), want: []NullInt64(nil)}, + {col: "Int64Array", val: []int{}, want: []NullInt64{}}, + {col: "Int64Array", val: []int{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, + {col: "Int64Array", val: []int64(nil), want: []NullInt64(nil)}, + {col: "Int64Array", val: []int64{}, want: []NullInt64{}}, + {col: "Int64Array", val: []int64{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, + {col: "Int64Array", val: []NullInt64(nil)}, + {col: "Int64Array", val: []NullInt64{}}, + {col: "Int64Array", val: []NullInt64{{1, true}, {}}}, + {col: "Bool", val: false}, + {col: "Bool", val: true}, + {col: "Bool", val: false, want: NullBool{false, true}}, + {col: "Bool", val: true, want: NullBool{true, true}}, + {col: "Bool", val: NullBool{true, true}}, + {col: "Bool", val: NullBool{false, false}}, + {col: "BoolArray", val: []bool(nil), want: []NullBool(nil)}, + {col: "BoolArray", val: []bool{}, want: []NullBool{}}, + {col: "BoolArray", val: []bool{true, false}, want: []NullBool{{true, true}, {false, true}}}, + {col: "BoolArray", val: []NullBool(nil)}, + {col: "BoolArray", val: []NullBool{}}, + {col: "BoolArray", val: []NullBool{{false, true}, {true, true}, {}}}, + {col: "Float64", val: 0.0}, + {col: "Float64", val: 3.14}, + {col: "Float64", val: math.NaN()}, + {col: "Float64", val: math.Inf(1)}, + {col: "Float64", val: math.Inf(-1)}, + {col: "Float64", val: 2.78, want: NullFloat64{2.78, true}}, + {col: "Float64", val: NullFloat64{2.71, true}, want: 2.71}, + {col: "Float64", val: NullFloat64{1.41, true}, want: NullFloat64{1.41, true}}, + {col: "Float64", val: NullFloat64{0, false}}, + {col: "Float64Array", val: []float64(nil), want: []NullFloat64(nil)}, + {col: "Float64Array", val: []float64{}, want: []NullFloat64{}}, + {col: "Float64Array", val: []float64{2.72, 3.14, math.Inf(1)}, want: []NullFloat64{{2.72, true}, {3.14, true}, {math.Inf(1), true}}}, + {col: "Float64Array", val: []NullFloat64(nil)}, + {col: "Float64Array", val: []NullFloat64{}}, + {col: "Float64Array", val: []NullFloat64{{2.72, true}, {math.Inf(1), true}, {}}}, + {col: "Date", val: d1}, + {col: "Date", val: d1, want: NullDate{d1, true}}, + {col: "Date", val: NullDate{d1, true}}, + {col: "Date", val: NullDate{d1, true}, want: d1}, + {col: "Date", val: NullDate{civil.Date{}, false}}, + {col: "DateArray", val: []civil.Date(nil), want: []NullDate(nil)}, + {col: "DateArray", val: []civil.Date{}, want: []NullDate{}}, + {col: "DateArray", val: []civil.Date{d1, d2, d3}, want: []NullDate{{d1, true}, {d2, true}, {d3, true}}}, + {col: "Timestamp", val: t1}, + {col: "Timestamp", val: t1, want: NullTime{t1, true}}, + {col: "Timestamp", val: NullTime{t1, true}}, + {col: "Timestamp", val: NullTime{t1, true}, want: t1}, + {col: "Timestamp", val: NullTime{}}, + {col: "TimestampArray", val: []time.Time(nil), want: []NullTime(nil)}, + {col: "TimestampArray", val: []time.Time{}, want: []NullTime{}}, + {col: "TimestampArray", val: []time.Time{t1, t2, t3}, want: []NullTime{{t1, true}, {t2, true}, {t3, true}}}, + } + + // Write rows into table first. + var muts []*Mutation + for i, test := range tests { + muts = append(muts, InsertOrUpdate("Types", []string{"RowID", test.col}, []interface{}{i, test.val})) + } + if _, err := client.Apply(ctx, muts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + for i, test := range tests { + row, err := client.Single().ReadRow(ctx, "Types", []interface{}{i}, []string{test.col}) + if err != nil { + t.Fatalf("Unable to fetch row %v: %v", i, err) + } + // Create new instance of type of test.want. + want := test.want + if want == nil { + want = test.val + } + gotp := reflect.New(reflect.TypeOf(want)) + if err := row.Column(0, gotp.Interface()); err != nil { + t.Errorf("%d: col:%v val:%#v, %v", i, test.col, test.val, err) + continue + } + got := reflect.Indirect(gotp).Interface() + + // One of the test cases is checking NaN handling. Given + // NaN!=NaN, we can't use reflect to test for it. + if isNaN(got) && isNaN(want) { + continue + } + + // Check non-NaN cases. + if !testEqual(got, want) { + t.Errorf("%d: col:%v val:%#v, got %#v, want %#v", i, test.col, test.val, got, want) + continue + } + } +} + +// Test decoding Cloud Spanner STRUCT type. +func TestStructTypes(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + tests := []struct { + q Statement + want func(r *Row) error + }{ + { + q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1, 2))`}, + want: func(r *Row) error { + // Test STRUCT ARRAY decoding to []NullRow. + var rows []NullRow + if err := r.Column(0, &rows); err != nil { + return err + } + if len(rows) != 1 { + return fmt.Errorf("len(rows) = %d; want 1", len(rows)) + } + if !rows[0].Valid { + return fmt.Errorf("rows[0] is NULL") + } + var i, j int64 + if err := rows[0].Row.Columns(&i, &j); err != nil { + return err + } + if i != 1 || j != 2 { + return fmt.Errorf("got (%d,%d), want (1,2)", i, j) + } + return nil + }, + }, + { + q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1 as foo, 2 as bar)) as col1`}, + want: func(r *Row) error { + // Test Row.ToStruct. + s := struct { + Col1 []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + } `spanner:"col1"` + }{} + if err := r.ToStruct(&s); err != nil { + return err + } + want := struct { + Col1 []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + } `spanner:"col1"` + }{ + Col1: []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + }{ + { + Foo: 1, + Bar: 2, + }, + }, + } + if !testEqual(want, s) { + return fmt.Errorf("unexpected decoding result: %v, want %v", s, want) + } + return nil + }, + }, + } + for i, test := range tests { + iter := client.Single().Query(ctx, test.q) + defer iter.Stop() + row, err := iter.Next() + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + if err := test.want(row); err != nil { + t.Errorf("%d: %v", i, err) + continue + } + } +} + +// Test queries of the form "SELECT expr". +func TestQueryExpressions(t *testing.T) { + t.Parallel() + ctx := context.Background() + client, _, tearDown := prepare(ctx, t, nil) + defer tearDown() + + newRow := func(vals []interface{}) *Row { + row, err := NewRow(make([]string, len(vals)), vals) + if err != nil { + t.Fatal(err) + } + return row + } + + tests := []struct { + expr string + want interface{} + }{ + {"1", int64(1)}, + {"[1, 2, 3]", []NullInt64{{1, true}, {2, true}, {3, true}}}, + {"[1, NULL, 3]", []NullInt64{{1, true}, {0, false}, {3, true}}}, + {"IEEE_DIVIDE(1, 0)", math.Inf(1)}, + {"IEEE_DIVIDE(-1, 0)", math.Inf(-1)}, + {"IEEE_DIVIDE(0, 0)", math.NaN()}, + // TODO(jba): add IEEE_DIVIDE(0, 0) to the following array when we have a better equality predicate. + {"[IEEE_DIVIDE(1, 0), IEEE_DIVIDE(-1, 0)]", []NullFloat64{{math.Inf(1), true}, {math.Inf(-1), true}}}, + {"ARRAY(SELECT AS STRUCT * FROM (SELECT 'a', 1) WHERE 0 = 1)", []NullRow{}}, + {"ARRAY(SELECT STRUCT(1, 2))", []NullRow{{Row: *newRow([]interface{}{1, 2}), Valid: true}}}, + } + for _, test := range tests { + iter := client.Single().Query(ctx, Statement{SQL: "SELECT " + test.expr}) + defer iter.Stop() + row, err := iter.Next() + if err != nil { + t.Errorf("%q: %v", test.expr, err) + continue + } + // Create new instance of type of test.want. + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := row.Column(0, gotp.Interface()); err != nil { + t.Errorf("%q: Column returned error %v", test.expr, err) + continue + } + got := reflect.Indirect(gotp).Interface() + // TODO(jba): remove isNaN special case when we have a better equality predicate. + if isNaN(got) && isNaN(test.want) { + continue + } + if !testEqual(got, test.want) { + t.Errorf("%q\n got %#v\nwant %#v", test.expr, got, test.want) + } + } +} + +func TestQueryStats(t *testing.T) { + t.Parallel() + ctx := context.Background() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + accounts := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + if _, err := client.Apply(ctx, accounts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + const sql = "SELECT Balance FROM Accounts" + + qp, err := client.Single().AnalyzeQuery(ctx, Statement{sql, nil}) + if err != nil { + t.Fatal(err) + } + if len(qp.PlanNodes) == 0 { + t.Error("got zero plan nodes, expected at least one") + } + + iter := client.Single().QueryWithStats(ctx, Statement{sql, nil}) + defer iter.Stop() + for { + _, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + } + if iter.QueryPlan == nil { + t.Error("got nil QueryPlan, expected one") + } + if iter.QueryStats == nil { + t.Error("got nil QueryStats, expected some") + } +} + +func isNaN(x interface{}) bool { + f, ok := x.(float64) + if !ok { + return false + } + return math.IsNaN(f) +} + +func TestInvalidDatabase(t *testing.T) { + t.Parallel() + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + if testProjectID == "" { + t.Skip("Integration tests skipped: GCLOUD_TESTS_GOLANG_PROJECT_ID is missing") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + t.Skip("Integration test skipped: cannot get service account credential from environment variable GCLOUD_TESTS_GOLANG_KEY") + } + db := fmt.Sprintf("projects/%v/instances/%v/databases/invalid", testProjectID, testInstanceID) + c, err := NewClient(ctx, db, option.WithTokenSource(ts)) + // Client creation should succeed even if the database is invalid. + if err != nil { + t.Fatal(err) + } + _, err = c.Single().ReadRow(ctx, "TestTable", Key{1}, []string{"col1"}) + if msg, ok := matchError(err, codes.NotFound, ""); !ok { + t.Fatal(msg) + } +} + +func TestReadErrors(t *testing.T) { + t.Parallel() + ctx := context.Background() + client, _, tearDown := prepare(ctx, t, readDBStatements) + defer tearDown() + + // Read over invalid table fails + _, err := client.Single().ReadRow(ctx, "badTable", Key{1}, []string{"StringValue"}) + if msg, ok := matchError(err, codes.NotFound, "badTable"); !ok { + t.Error(msg) + } + // Read over invalid column fails + _, err = client.Single().ReadRow(ctx, "TestTable", Key{1}, []string{"badcol"}) + if msg, ok := matchError(err, codes.NotFound, "badcol"); !ok { + t.Error(msg) + } + + // Invalid query fails + iter := client.Single().Query(ctx, Statement{SQL: "SELECT Apples AND Oranges"}) + defer iter.Stop() + _, err = iter.Next() + if msg, ok := matchError(err, codes.InvalidArgument, "unrecognized name"); !ok { + t.Error(msg) + } + + // Read should fail on cancellation. + cctx, cancel := context.WithCancel(ctx) + cancel() + _, err = client.Single().ReadRow(cctx, "TestTable", Key{1}, []string{"StringValue"}) + if msg, ok := matchError(err, codes.Canceled, ""); !ok { + t.Error(msg) + } + // Read should fail if deadline exceeded. + dctx, _ := context.WithTimeout(ctx, time.Nanosecond) + <-dctx.Done() + _, err = client.Single().ReadRow(dctx, "TestTable", Key{1}, []string{"StringValue"}) + if msg, ok := matchError(err, codes.DeadlineExceeded, ""); !ok { + t.Error(msg) + } +} + +func matchError(got error, wantCode codes.Code, wantMsgPart string) (string, bool) { + if ErrCode(got) != wantCode || !strings.Contains(strings.ToLower(ErrDesc(got)), strings.ToLower(wantMsgPart)) { + return fmt.Sprintf("got error <%v>\n"+`want `, got, wantCode, wantMsgPart), false + } + return "", true +} + +func rowToValues(r *Row) ([]interface{}, error) { + var x int64 + var y, z string + if err := r.Column(0, &x); err != nil { + return nil, err + } + if err := r.Column(1, &y); err != nil { + return nil, err + } + if err := r.Column(2, &z); err != nil { + return nil, err + } + return []interface{}{x, y, z}, nil +} + +func readAll(iter *RowIterator) ([][]interface{}, error) { + defer iter.Stop() + var vals [][]interface{} + for { + row, err := iter.Next() + if err == iterator.Done { + return vals, nil + } + if err != nil { + return nil, err + } + v, err := rowToValues(row) + if err != nil { + return nil, err + } + vals = append(vals, v) + } +} + +func readAllTestTable(iter *RowIterator) ([]testTableRow, error) { + defer iter.Stop() + var vals []testTableRow + for { + row, err := iter.Next() + if err == iterator.Done { + return vals, nil + } + if err != nil { + return nil, err + } + var ttr testTableRow + if err := row.ToStruct(&ttr); err != nil { + return nil, err + } + vals = append(vals, ttr) + } +} + +// Test TransactionRunner. Test that transactions are aborted and retried as expected. +func TestTransactionRunner(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + // Test 1: User error should abort the transaction. + _, _ = client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + tx.BufferWrite([]*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)})}) + return errors.New("user error") + }) + // Empty read. + rows, err := readAllTestTable(client.Single().Read(ctx, "Accounts", Key{1}, []string{"AccountId", "Nickname", "Balance"})) + if err != nil { + t.Fatal(err) + } + if got, want := len(rows), 0; got != want { + t.Errorf("Empty read, got %d, want %d.", got, want) + } + + // Test 2: Expect abort and retry. + // We run two ReadWriteTransactions concurrently and make txn1 abort txn2 by committing writes to the column txn2 have read, + // and expect the following read to abort and txn2 retries. + + // Set up two accounts + accounts := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), int64(0)}), + Insert("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), int64(1)}), + } + if _, err := client.Apply(ctx, accounts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + var ( + cTxn1Start = make(chan struct{}) + cTxn1Commit = make(chan struct{}) + cTxn2Start = make(chan struct{}) + wg sync.WaitGroup + ) + + // read balance, check error if we don't expect abort. + readBalance := func(tx interface { + ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error) + }, key int64, expectAbort bool) (int64, error) { + var b int64 + r, e := tx.ReadRow(ctx, "Accounts", Key{int64(key)}, []string{"Balance"}) + if e != nil { + if expectAbort && !isAbortErr(e) { + t.Errorf("ReadRow got %v, want Abort error.", e) + } + return b, e + } + if ce := r.Column(0, &b); ce != nil { + return b, ce + } + return b, nil + } + + wg.Add(2) + // Txn 1 + go func() { + defer wg.Done() + var once sync.Once + _, e := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + b, e := readBalance(tx, 1, false) + if e != nil { + return e + } + // txn 1 can abort, in that case we skip closing the channel on retry. + once.Do(func() { close(cTxn1Start) }) + tx.BufferWrite([]*Mutation{ + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), int64(b + 1)})}) + // Wait for second transaction. + <-cTxn2Start + return nil + }) + close(cTxn1Commit) + if e != nil { + t.Errorf("Transaction 1 commit, got %v, want nil.", e) + } + }() + // Txn 2 + go func() { + // Wait until txn 1 starts. + <-cTxn1Start + defer wg.Done() + var ( + once sync.Once + b1 int64 + b2 int64 + e error + ) + _, e = client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + if b1, e = readBalance(tx, 1, false); e != nil { + return e + } + // Skip closing channel on retry. + once.Do(func() { close(cTxn2Start) }) + // Wait until txn 1 successfully commits. + <-cTxn1Commit + // Txn1 has committed and written a balance to the account. + // Now this transaction (txn2) reads and re-writes the balance. + // The first time through, it will abort because it overlaps with txn1. + // Then it will retry after txn1 commits, and succeed. + if b2, e = readBalance(tx, 2, true); e != nil { + return e + } + tx.BufferWrite([]*Mutation{ + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), int64(b1 + b2)})}) + return nil + }) + if e != nil { + t.Errorf("Transaction 2 commit, got %v, want nil.", e) + } + }() + wg.Wait() + // Check that both transactions' effects are visible. + for i := int64(1); i <= int64(2); i++ { + if b, e := readBalance(client.Single(), i, false); e != nil { + t.Fatalf("ReadBalance for key %d error %v.", i, e) + } else if b != i { + t.Errorf("Balance for key %d, got %d, want %d.", i, b, i) + } + } +} + +// createClient creates Cloud Spanner data client. +func createClient(ctx context.Context, dbPath string) (client *Client, err error) { + client, err = NewClientWithConfig(ctx, dbPath, ClientConfig{ + SessionPoolConfig: SessionPoolConfig{WriteSessions: 0.2}, + }, option.WithTokenSource(testutil.TokenSource(ctx, Scope)), option.WithEndpoint(endpoint)) + if err != nil { + return nil, fmt.Errorf("cannot create data client on DB %v: %v", dbPath, err) + } + return client, nil +} + +// populate prepares the database with some data. +func populate(ctx context.Context, client *Client) error { + // Populate data + var err error + m := InsertMap("test", map[string]interface{}{ + "a": str1, + "b": str2, + }) + _, err = client.Apply(ctx, []*Mutation{m}) + return err +} + +// Test PartitionQuery of BatchReadOnlyTransaction, create partitions then +// serialize and deserialize both transaction and partition to be used in +// execution on another client, and compare results. +func TestBatchQuery(t *testing.T) { + t.Parallel() + // Set up testing environment. + var ( + client2 *Client + err error + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + client, dbPath, tearDown := prepare(ctx, t, simpleDBStatements) + defer tearDown() + if err = populate(ctx, client); err != nil { + t.Fatal(err) + } + if client2, err = createClient(ctx, dbPath); err != nil { + t.Fatal(err) + } + defer client2.Close() + + // PartitionQuery + var ( + txn *BatchReadOnlyTransaction + partitions []*Partition + stmt = Statement{SQL: "SELECT * FROM test;"} + ) + + if txn, err = client.BatchReadOnlyTransaction(ctx, StrongRead()); err != nil { + t.Fatal(err) + } + defer txn.Cleanup(ctx) + if partitions, err = txn.PartitionQuery(ctx, stmt, PartitionOptions{0, 3}); err != nil { + t.Fatal(err) + } + + // Reconstruct BatchReadOnlyTransactionID and execute partitions + var ( + tid2 BatchReadOnlyTransactionID + data []byte + gotResult bool // if we get matching result from two separate txns + ) + if data, err = txn.ID.MarshalBinary(); err != nil { + t.Fatalf("encoding failed %v", err) + } + if err = tid2.UnmarshalBinary(data); err != nil { + t.Fatalf("decoding failed %v", err) + } + txn2 := client2.BatchReadOnlyTransactionFromID(tid2) + + // Execute Partitions and compare results + for i, p := range partitions { + iter := txn.Execute(ctx, p) + defer iter.Stop() + p2 := serdesPartition(t, i, p) + iter2 := txn2.Execute(ctx, &p2) + defer iter2.Stop() + + row1, err1 := iter.Next() + row2, err2 := iter2.Next() + if err1 != err2 { + t.Fatalf("execution failed for different reasons: %v, %v", err1, err2) + continue + } + if !testEqual(row1, row2) { + t.Fatalf("execution returned different values: %v, %v", row1, row2) + continue + } + if row1 == nil { + continue + } + var a, b string + if err = row1.Columns(&a, &b); err != nil { + t.Fatalf("failed to parse row %v", err) + continue + } + if a == str1 && b == str2 { + gotResult = true + } + } + if !gotResult { + t.Fatalf("execution didn't return expected values") + } +} + +// Test PartitionRead of BatchReadOnlyTransaction, similar to TestBatchQuery +func TestBatchRead(t *testing.T) { + t.Parallel() + // Set up testing environment. + var ( + client2 *Client + err error + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + client, dbPath, tearDown := prepare(ctx, t, simpleDBStatements) + defer tearDown() + if err = populate(ctx, client); err != nil { + t.Fatal(err) + } + if client2, err = createClient(ctx, dbPath); err != nil { + t.Fatal(err) + } + defer client2.Close() + + // PartitionRead + var ( + txn *BatchReadOnlyTransaction + partitions []*Partition + ) + + if txn, err = client.BatchReadOnlyTransaction(ctx, StrongRead()); err != nil { + t.Fatal(err) + } + defer txn.Cleanup(ctx) + if partitions, err = txn.PartitionRead(ctx, "test", AllKeys(), simpleDBTableColumns, PartitionOptions{0, 3}); err != nil { + t.Fatal(err) + } + + // Reconstruct BatchReadOnlyTransactionID and execute partitions + var ( + tid2 BatchReadOnlyTransactionID + data []byte + gotResult bool // if we get matching result from two separate txns + ) + if data, err = txn.ID.MarshalBinary(); err != nil { + t.Fatalf("encoding failed %v", err) + } + if err = tid2.UnmarshalBinary(data); err != nil { + t.Fatalf("decoding failed %v", err) + } + txn2 := client2.BatchReadOnlyTransactionFromID(tid2) + + // Execute Partitions and compare results + for i, p := range partitions { + iter := txn.Execute(ctx, p) + defer iter.Stop() + p2 := serdesPartition(t, i, p) + iter2 := txn2.Execute(ctx, &p2) + defer iter2.Stop() + + row1, err1 := iter.Next() + row2, err2 := iter2.Next() + if err1 != err2 { + t.Fatalf("execution failed for different reasons: %v, %v", err1, err2) + continue + } + if !testEqual(row1, row2) { + t.Fatalf("execution returned different values: %v, %v", row1, row2) + continue + } + if row1 == nil { + continue + } + var a, b string + if err = row1.Columns(&a, &b); err != nil { + t.Fatalf("failed to parse row %v", err) + continue + } + if a == str1 && b == str2 { + gotResult = true + } + } + if !gotResult { + t.Fatalf("execution didn't return expected values") + } +} + +// Test normal txReadEnv method on BatchReadOnlyTransaction. +func TestBROTNormal(t *testing.T) { + t.Parallel() + // Set up testing environment and create txn. + var ( + txn *BatchReadOnlyTransaction + err error + row *Row + i int64 + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + client, _, tearDown := prepare(ctx, t, simpleDBStatements) + defer tearDown() + + if txn, err = client.BatchReadOnlyTransaction(ctx, StrongRead()); err != nil { + t.Fatal(err) + } + defer txn.Cleanup(ctx) + if _, err := txn.PartitionRead(ctx, "test", AllKeys(), simpleDBTableColumns, PartitionOptions{0, 3}); err != nil { + t.Fatal(err) + } + // Normal query should work with BatchReadOnlyTransaction + stmt2 := Statement{SQL: "SELECT 1"} + iter := txn.Query(ctx, stmt2) + defer iter.Stop() + + row, err = iter.Next() + if err != nil { + t.Errorf("query failed with %v", err) + } + if err = row.Columns(&i); err != nil { + t.Errorf("failed to parse row %v", err) + } +} + +func TestCommitTimestamp(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, ctsDBStatements) + defer tearDown() + + type testTableRow struct { + Key string + Ts NullTime + } + + var ( + cts1, cts2, ts1, ts2 time.Time + err error + ) + + // Apply mutation in sequence, expect to see commit timestamp in good order, check also the commit timestamp returned + for _, it := range []struct { + k string + t *time.Time + }{ + {"a", &cts1}, + {"b", &cts2}, + } { + tt := testTableRow{Key: it.k, Ts: NullTime{CommitTimestamp, true}} + m, err := InsertStruct("TestTable", tt) + if err != nil { + t.Fatal(err) + } + *it.t, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()) + if err != nil { + t.Fatal(err) + } + } + + txn := client.ReadOnlyTransaction() + for _, it := range []struct { + k string + t *time.Time + }{ + {"a", &ts1}, + {"b", &ts2}, + } { + if r, e := txn.ReadRow(ctx, "TestTable", Key{it.k}, []string{"Ts"}); e != nil { + t.Fatal(err) + } else { + var got testTableRow + if err := r.ToStruct(&got); err != nil { + t.Fatal(err) + } + *it.t = got.Ts.Time + } + } + if !cts1.Equal(ts1) { + t.Errorf("Expect commit timestamp returned and read to match for txn1, got %v and %v.", cts1, ts1) + } + if !cts2.Equal(ts2) { + t.Errorf("Expect commit timestamp returned and read to match for txn2, got %v and %v.", cts2, ts2) + } + + // Try writing a timestamp in the future to commit timestamp, expect error + _, err = client.Apply(ctx, []*Mutation{InsertOrUpdate("TestTable", []string{"Key", "Ts"}, []interface{}{"a", time.Now().Add(time.Hour)})}, ApplyAtLeastOnce()) + if msg, ok := matchError(err, codes.FailedPrecondition, "Cannot write timestamps in the future"); !ok { + t.Error(msg) + } +} diff --git a/vendor/cloud.google.com/go/spanner/statement.go b/vendor/cloud.google.com/go/spanner/statement.go new file mode 100644 index 0000000..a4313ba --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/statement.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// A Statement is a SQL query with named parameters. +// +// A parameter placeholder consists of '@' followed by the parameter name. +// Parameter names consist of any combination of letters, numbers, and +// underscores. Names may be entirely numeric (e.g., "WHERE m.id = @5"). +// Parameters may appear anywhere that a literal value is expected. The same +// parameter name may be used more than once. It is an error to execute a +// statement with unbound parameters. On the other hand, it is allowable to +// bind parameter names that are not used. +// +// See the documentation of the Row type for how Go types are mapped to Cloud +// Spanner types. +type Statement struct { + SQL string + Params map[string]interface{} +} + +// NewStatement returns a Statement with the given SQL and an empty Params map. +func NewStatement(sql string) Statement { + return Statement{SQL: sql, Params: map[string]interface{}{}} +} + +// errBindParam returns error for not being able to bind parameter to query request. +func errBindParam(k string, v interface{}, err error) error { + if err == nil { + return nil + } + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.InvalidArgument, "failed to bind query parameter(name: %q, value: %v), error = <%v>", k, v, err) + } + se.decorate(fmt.Sprintf("failed to bind query parameter(name: %q, value: %v)", k, v)) + return se +} + +var ( + errNilParam = errors.New("use T(nil), not nil") + errNoType = errors.New("no type information") +) + +// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest or sppb.PartitionQueryRequest. +func (s *Statement) bindParams(i interface{}) error { + params := &proto3.Struct{ + Fields: map[string]*proto3.Value{}, + } + paramTypes := map[string]*sppb.Type{} + for k, v := range s.Params { + if v == nil { + return errBindParam(k, v, errNilParam) + } + val, t, err := encodeValue(v) + if err != nil { + return errBindParam(k, v, err) + } + if t == nil { // should not happen, because of nil check above + return errBindParam(k, v, errNoType) + } + params.Fields[k] = val + paramTypes[k] = t + } + + switch r := i.(type) { + default: + return fmt.Errorf("failed to bind query parameter, unexpected request type: %v", r) + case *sppb.ExecuteSqlRequest: + r.Params = params + r.ParamTypes = paramTypes + case *sppb.PartitionQueryRequest: + r.Params = params + r.ParamTypes = paramTypes + } + return nil +} diff --git a/vendor/cloud.google.com/go/spanner/statement_test.go b/vendor/cloud.google.com/go/spanner/statement_test.go new file mode 100644 index 0000000..01c6368 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/statement_test.go @@ -0,0 +1,171 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "testing" + "time" + + "cloud.google.com/go/civil" + + "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test Statement.bindParams. +func TestBindParams(t *testing.T) { + // Verify Statement.bindParams generates correct values and types. + st := Statement{ + SQL: "SELECT id from t_foo WHERE col = @var", + Params: map[string]interface{}{"var": nil}, + } + want := &sppb.ExecuteSqlRequest{ + Params: &proto3.Struct{ + Fields: map[string]*proto3.Value{"var": nil}, + }, + ParamTypes: map[string]*sppb.Type{"var": nil}, + } + var ( + t1, _ = time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + // Boundaries + t2, _ = time.Parse(time.RFC3339Nano, "0001-01-01T00:00:00.000000000Z") + t3, _ = time.Parse(time.RFC3339Nano, "9999-12-31T23:59:59.999999999Z") + d1, _ = civil.ParseDate("2016-11-15") + // Boundaries + d2, _ = civil.ParseDate("0001-01-01") + d3, _ = civil.ParseDate("9999-12-31") + ) + for i, test := range []struct { + val interface{} + wantField *proto3.Value + wantType *sppb.Type + }{ + // bool + {true, boolProto(true), boolType()}, + {NullBool{true, true}, boolProto(true), boolType()}, + {NullBool{true, false}, nullProto(), boolType()}, + {[]bool(nil), nullProto(), listType(boolType())}, + {[]bool{}, listProto(), listType(boolType())}, + {[]bool{true, false}, listProto(boolProto(true), boolProto(false)), listType(boolType())}, + {[]NullBool(nil), nullProto(), listType(boolType())}, + {[]NullBool{}, listProto(), listType(boolType())}, + {[]NullBool{{true, true}, {}}, listProto(boolProto(true), nullProto()), listType(boolType())}, + // int + {int(1), intProto(1), intType()}, + {[]int(nil), nullProto(), listType(intType())}, + {[]int{}, listProto(), listType(intType())}, + {[]int{1, 2}, listProto(intProto(1), intProto(2)), listType(intType())}, + // int64 + {int64(1), intProto(1), intType()}, + {NullInt64{5, true}, intProto(5), intType()}, + {NullInt64{5, false}, nullProto(), intType()}, + {[]int64(nil), nullProto(), listType(intType())}, + {[]int64{}, listProto(), listType(intType())}, + {[]int64{1, 2}, listProto(intProto(1), intProto(2)), listType(intType())}, + {[]NullInt64(nil), nullProto(), listType(intType())}, + {[]NullInt64{}, listProto(), listType(intType())}, + {[]NullInt64{{1, true}, {}}, listProto(intProto(1), nullProto()), listType(intType())}, + // float64 + {0.0, floatProto(0.0), floatType()}, + {math.Inf(1), floatProto(math.Inf(1)), floatType()}, + {math.Inf(-1), floatProto(math.Inf(-1)), floatType()}, + {math.NaN(), floatProto(math.NaN()), floatType()}, + {NullFloat64{2.71, true}, floatProto(2.71), floatType()}, + {NullFloat64{1.41, false}, nullProto(), floatType()}, + {[]float64(nil), nullProto(), listType(floatType())}, + {[]float64{}, listProto(), listType(floatType())}, + {[]float64{2.72, math.Inf(1)}, listProto(floatProto(2.72), floatProto(math.Inf(1))), listType(floatType())}, + {[]NullFloat64(nil), nullProto(), listType(floatType())}, + {[]NullFloat64{}, listProto(), listType(floatType())}, + {[]NullFloat64{{2.72, true}, {}}, listProto(floatProto(2.72), nullProto()), listType(floatType())}, + // string + {"", stringProto(""), stringType()}, + {"foo", stringProto("foo"), stringType()}, + {NullString{"bar", true}, stringProto("bar"), stringType()}, + {NullString{"bar", false}, nullProto(), stringType()}, + {[]string(nil), nullProto(), listType(stringType())}, + {[]string{}, listProto(), listType(stringType())}, + {[]string{"foo", "bar"}, listProto(stringProto("foo"), stringProto("bar")), listType(stringType())}, + {[]NullString(nil), nullProto(), listType(stringType())}, + {[]NullString{}, listProto(), listType(stringType())}, + {[]NullString{{"foo", true}, {}}, listProto(stringProto("foo"), nullProto()), listType(stringType())}, + // bytes + {[]byte{}, bytesProto([]byte{}), bytesType()}, + {[]byte{1, 2, 3}, bytesProto([]byte{1, 2, 3}), bytesType()}, + {[]byte(nil), nullProto(), bytesType()}, + {[][]byte(nil), nullProto(), listType(bytesType())}, + {[][]byte{}, listProto(), listType(bytesType())}, + {[][]byte{[]byte{1}, []byte(nil)}, listProto(bytesProto([]byte{1}), nullProto()), listType(bytesType())}, + // date + {d1, dateProto(d1), dateType()}, + {NullDate{civil.Date{}, false}, nullProto(), dateType()}, + {[]civil.Date(nil), nullProto(), listType(dateType())}, + {[]civil.Date{}, listProto(), listType(dateType())}, + {[]civil.Date{d1, d2, d3}, listProto(dateProto(d1), dateProto(d2), dateProto(d3)), listType(dateType())}, + {[]NullDate{NullDate{d2, true}, NullDate{}}, listProto(dateProto(d2), nullProto()), listType(dateType())}, + // timestamp + {t1, timeProto(t1), timeType()}, + {NullTime{}, nullProto(), timeType()}, + {[]time.Time(nil), nullProto(), listType(timeType())}, + {[]time.Time{}, listProto(), listType(timeType())}, + {[]time.Time{t1, t2, t3}, listProto(timeProto(t1), timeProto(t2), timeProto(t3)), listType(timeType())}, + {[]NullTime{NullTime{t2, true}, NullTime{}}, listProto(timeProto(t2), nullProto()), listType(timeType())}, + } { + st.Params["var"] = test.val + want.Params.Fields["var"] = test.wantField + want.ParamTypes["var"] = test.wantType + got := &sppb.ExecuteSqlRequest{} + if err := st.bindParams(got); err != nil || !proto.Equal(got, want) { + // handle NaN + if test.wantType.Code == floatType().Code && proto.MarshalTextString(got) == proto.MarshalTextString(want) { + continue + } + t.Errorf("#%d: bind result: \n(%v, %v)\nwant\n(%v, %v)\n", i, got, err, want, nil) + } + } + + // Verify type error reporting. + for _, test := range []struct { + val interface{} + wantErr error + }{ + { + struct{}{}, + errBindParam("var", struct{}{}, errEncoderUnsupportedType(struct{}{})), + }, + { + nil, + errBindParam("var", nil, errNilParam), + }, + } { + st.Params["var"] = test.val + var got sppb.ExecuteSqlRequest + if err := st.bindParams(&got); !testEqual(err, test.wantErr) { + t.Errorf("value %#v:\ngot: %v\nwant: %v", test.val, err, test.wantErr) + } + } +} + +func TestNewStatement(t *testing.T) { + s := NewStatement("query") + if got, want := s.SQL, "query"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} diff --git a/vendor/cloud.google.com/go/spanner/timestampbound.go b/vendor/cloud.google.com/go/spanner/timestampbound.go new file mode 100644 index 0000000..e606e6c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/timestampbound.go @@ -0,0 +1,240 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "time" + + pbd "github.com/golang/protobuf/ptypes/duration" + pbt "github.com/golang/protobuf/ptypes/timestamp" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// timestampBoundType specifies the timestamp bound mode. +type timestampBoundType int + +const ( + strong timestampBoundType = iota // strong reads + exactStaleness // read with exact staleness + maxStaleness // read with max staleness + minReadTimestamp // read with min freshness + readTimestamp // read data at exact timestamp +) + +// TimestampBound defines how Cloud Spanner will choose a timestamp for a single +// read/query or read-only transaction. +// +// There are three types of timestamp bound: strong, bounded staleness and exact +// staleness. Strong is the default. +// +// If the Cloud Spanner database to be read is geographically distributed, stale +// read-only transactions can execute more quickly than strong or read-write +// transactions, because they are able to execute far from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. A TimestampBound +// can be specified when creating transactions, see the documentation of +// spanner.Client for an example. +// +// Strong reads +// +// Strong reads are guaranteed to see the effects of all transactions that have +// committed before the start of the read. Furthermore, all rows yielded by a +// single read are consistent with each other: if any part of the read +// observes a transaction, all parts of the read see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are concurrent +// writes. If consistency across reads is required, the reads should be +// executed within a transaction or at an exact read timestamp. +// +// Use StrongRead to create a bound of this type. +// +// Exact staleness +// +// An exact staleness timestamp bound executes reads at a user-specified timestamp. +// Reads at a timestamp are guaranteed to see a consistent prefix of the global +// transaction history: they observe modifications done by all transactions with a +// commit timestamp less than or equal to the read timestamp, and observe none of the +// modifications done by transactions with a larger commit timestamp. They will block +// until all conflicting transactions that may be assigned commit timestamps less +// than or equal to the read timestamp have finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a timestamp. As a +// result, they execute slightly faster than the equivalent boundedly stale +// concurrency modes. On the other hand, boundedly stale reads usually return +// fresher results. +// +// Use ReadTimestamp and ExactStaleness to create a bound of this type. +// +// Bounded staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to +// a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within +// the staleness bound that allows execution of the reads at the closest +// available replica without blocking. +// +// All rows yielded are consistent with each other: if any part of the read +// observes a transaction, all parts of the read see the transaction. Boundedly +// stale reads are not repeatable: two stale reads, even if they use the same +// staleness bound, can execute at different timestamps and thus return +// inconsistent results. +// +// Boundedly stale reads execute in two phases. The first phase negotiates a +// timestamp among all replicas needed to serve the read. In the second phase, +// reads are executed at the negotiated timestamp. +// +// As a result of this two-phase execution, bounded staleness reads are usually +// a little slower than comparable exact staleness reads. However, they are +// typically able to return fresher results, and are more likely to execute at +// the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of which rows +// will be read, it can only be used with single-use reads and single-use +// read-only transactions. +// +// Use MinReadTimestamp and MaxStaleness to create a bound of this type. +// +// Old read timestamps and garbage collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data in the +// background to reclaim storage space. This process is known as "version +// GC". By default, version GC reclaims versions after they are four hours +// old. Because of this, Cloud Spanner cannot perform reads at read timestamps more +// than four hours in the past. This restriction also applies to in-progress +// reads and/or SQL queries whose timestamps become too old while +// executing. Reads and SQL queries with too-old read timestamps fail with the +// error ErrorCode.FAILED_PRECONDITION. +type TimestampBound struct { + mode timestampBoundType + d time.Duration + t time.Time +} + +// StrongRead returns a TimestampBound that will perform reads and queries at a +// timestamp where all previously committed transactions are visible. +func StrongRead() TimestampBound { + return TimestampBound{mode: strong} +} + +// ExactStaleness returns a TimestampBound that will perform reads and queries +// at an exact staleness. +func ExactStaleness(d time.Duration) TimestampBound { + return TimestampBound{ + mode: exactStaleness, + d: d, + } +} + +// MaxStaleness returns a TimestampBound that will perform reads and queries at +// a time chosen to be at most "d" stale. +func MaxStaleness(d time.Duration) TimestampBound { + return TimestampBound{ + mode: maxStaleness, + d: d, + } +} + +// MinReadTimestamp returns a TimestampBound that bound that will perform reads +// and queries at a time chosen to be at least "t". +func MinReadTimestamp(t time.Time) TimestampBound { + return TimestampBound{ + mode: minReadTimestamp, + t: t, + } +} + +// ReadTimestamp returns a TimestampBound that will peform reads and queries at +// the given time. +func ReadTimestamp(t time.Time) TimestampBound { + return TimestampBound{ + mode: readTimestamp, + t: t, + } +} + +func (tb TimestampBound) String() string { + switch tb.mode { + case strong: + return fmt.Sprintf("(strong)") + case exactStaleness: + return fmt.Sprintf("(exactStaleness: %s)", tb.d) + case maxStaleness: + return fmt.Sprintf("(maxStaleness: %s)", tb.d) + case minReadTimestamp: + return fmt.Sprintf("(minReadTimestamp: %s)", tb.t) + case readTimestamp: + return fmt.Sprintf("(readTimestamp: %s)", tb.t) + default: + return fmt.Sprintf("{mode=%v, d=%v, t=%v}", tb.mode, tb.d, tb.t) + } +} + +// durationProto takes a time.Duration and converts it into pdb.Duration for +// calling gRPC APIs. +func durationProto(d time.Duration) *pbd.Duration { + n := d.Nanoseconds() + return &pbd.Duration{ + Seconds: n / int64(time.Second), + Nanos: int32(n % int64(time.Second)), + } +} + +// timestampProto takes a time.Time and converts it into pbt.Timestamp for calling +// gRPC APIs. +func timestampProto(t time.Time) *pbt.Timestamp { + return &pbt.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} + +// buildTransactionOptionsReadOnly converts a spanner.TimestampBound into a sppb.TransactionOptions_ReadOnly +// transaction option, which is then used in transactional reads. +func buildTransactionOptionsReadOnly(tb TimestampBound, returnReadTimestamp bool) *sppb.TransactionOptions_ReadOnly { + pb := &sppb.TransactionOptions_ReadOnly{ + ReturnReadTimestamp: returnReadTimestamp, + } + switch tb.mode { + case strong: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + } + case exactStaleness: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ExactStaleness{ + ExactStaleness: durationProto(tb.d), + } + case maxStaleness: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MaxStaleness{ + MaxStaleness: durationProto(tb.d), + } + case minReadTimestamp: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ + MinReadTimestamp: timestampProto(tb.t), + } + case readTimestamp: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ + ReadTimestamp: timestampProto(tb.t), + } + default: + panic(fmt.Sprintf("buildTransactionOptionsReadOnly(%v,%v)", tb, returnReadTimestamp)) + } + return pb +} diff --git a/vendor/cloud.google.com/go/spanner/timestampbound_test.go b/vendor/cloud.google.com/go/spanner/timestampbound_test.go new file mode 100644 index 0000000..1a2a692 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/timestampbound_test.go @@ -0,0 +1,207 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "testing" + "time" + + pbd "github.com/golang/protobuf/ptypes/duration" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test generating TimestampBound for strong reads. +func TestStrong(t *testing.T) { + got := StrongRead() + want := TimestampBound{mode: strong} + if !testEqual(got, want) { + t.Errorf("Strong() = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with exact staleness. +func TestExactStaleness(t *testing.T) { + got := ExactStaleness(10 * time.Second) + want := TimestampBound{mode: exactStaleness, d: 10 * time.Second} + if !testEqual(got, want) { + t.Errorf("ExactStaleness(10*time.Second) = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with max staleness. +func TestMaxStaleness(t *testing.T) { + got := MaxStaleness(10 * time.Second) + want := TimestampBound{mode: maxStaleness, d: 10 * time.Second} + if !testEqual(got, want) { + t.Errorf("MaxStaleness(10*time.Second) = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with minimum freshness requirement. +func TestMinReadTimestamp(t *testing.T) { + ts := time.Now() + got := MinReadTimestamp(ts) + want := TimestampBound{mode: minReadTimestamp, t: ts} + if !testEqual(got, want) { + t.Errorf("MinReadTimestamp(%v) = %v; want %v", ts, got, want) + } +} + +// Test generating TimestampBound for reads requesting data at a exact timestamp. +func TestReadTimestamp(t *testing.T) { + ts := time.Now() + got := ReadTimestamp(ts) + want := TimestampBound{mode: readTimestamp, t: ts} + if !testEqual(got, want) { + t.Errorf("ReadTimestamp(%v) = %v; want %v", ts, got, want) + } +} + +// Test TimestampBound.String. +func TestTimestampBoundString(t *testing.T) { + ts := time.Unix(1136239445, 0).UTC() + var tests = []struct { + tb TimestampBound + want string + }{ + { + tb: TimestampBound{mode: strong}, + want: "(strong)", + }, + { + tb: TimestampBound{mode: exactStaleness, d: 10 * time.Second}, + want: "(exactStaleness: 10s)", + }, + { + tb: TimestampBound{mode: maxStaleness, d: 10 * time.Second}, + want: "(maxStaleness: 10s)", + }, + { + tb: TimestampBound{mode: minReadTimestamp, t: ts}, + want: "(minReadTimestamp: 2006-01-02 22:04:05 +0000 UTC)", + }, + { + tb: TimestampBound{mode: readTimestamp, t: ts}, + want: "(readTimestamp: 2006-01-02 22:04:05 +0000 UTC)", + }, + } + for _, test := range tests { + got := test.tb.String() + if got != test.want { + t.Errorf("%#v.String():\ngot %q\nwant %q", test.tb, got, test.want) + } + } +} + +// Test time.Duration to pdb.Duration conversion. +func TestDurationProto(t *testing.T) { + var tests = []struct { + d time.Duration + want pbd.Duration + }{ + {time.Duration(0), pbd.Duration{Seconds: 0, Nanos: 0}}, + {time.Second, pbd.Duration{Seconds: 1, Nanos: 0}}, + {time.Millisecond, pbd.Duration{Seconds: 0, Nanos: 1e6}}, + {15 * time.Nanosecond, pbd.Duration{Seconds: 0, Nanos: 15}}, + {42 * time.Hour, pbd.Duration{Seconds: 151200}}, + {-(1*time.Hour + 4*time.Millisecond), pbd.Duration{Seconds: -3600, Nanos: -4e6}}, + } + for _, test := range tests { + got := durationProto(test.d) + if !testEqual(got, &test.want) { + t.Errorf("durationProto(%v) = %v; want %v", test.d, got, test.want) + } + } +} + +// Test time.Time to pbt.Timestamp conversion. +func TestTimeProto(t *testing.T) { + var tests = []struct { + t time.Time + want pbt.Timestamp + }{ + {time.Unix(0, 0), pbt.Timestamp{}}, + {time.Unix(1136239445, 12345), pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + {time.Unix(-1000, 12345), pbt.Timestamp{Seconds: -1000, Nanos: 12345}}, + } + for _, test := range tests { + got := timestampProto(test.t) + if !testEqual(got, &test.want) { + t.Errorf("timestampProto(%v) = %v; want %v", test.t, got, test.want) + } + } +} + +// Test readonly transaction option builder. +func TestBuildTransactionOptionsReadOnly(t *testing.T) { + ts := time.Unix(1136239445, 12345) + var tests = []struct { + tb TimestampBound + ts bool + want sppb.TransactionOptions_ReadOnly + }{ + { + StrongRead(), false, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true}, + ReturnReadTimestamp: false, + }, + }, + { + ExactStaleness(10 * time.Second), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_ExactStaleness{ + ExactStaleness: &pbd.Duration{Seconds: 10}}, + ReturnReadTimestamp: true, + }, + }, + { + MaxStaleness(10 * time.Second), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_MaxStaleness{ + MaxStaleness: &pbd.Duration{Seconds: 10}}, + ReturnReadTimestamp: true, + }, + }, + + { + MinReadTimestamp(ts), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ + MinReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + ReturnReadTimestamp: true, + }, + }, + { + ReadTimestamp(ts), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ + ReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + ReturnReadTimestamp: true, + }, + }, + } + for _, test := range tests { + got := buildTransactionOptionsReadOnly(test.tb, test.ts) + if !testEqual(got, &test.want) { + t.Errorf("buildTransactionOptionsReadOnly(%v,%v) = %v; want %v", test.tb, test.ts, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/transaction.go b/vendor/cloud.google.com/go/spanner/transaction.go new file mode 100644 index 0000000..c35ba4e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/transaction.go @@ -0,0 +1,879 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "sync" + "time" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// transactionID stores a transaction ID which uniquely identifies a transaction in Cloud Spanner. +type transactionID []byte + +// txReadEnv manages a read-transaction environment consisting of a session handle and a transaction selector. +type txReadEnv interface { + // acquire returns a read-transaction environment that can be used to perform a transactional read. + acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) + // sets the transaction's read timestamp + setTimestamp(time.Time) + // release should be called at the end of every transactional read to deal with session recycling. + release(error) +} + +// txReadOnly contains methods for doing transactional reads. +type txReadOnly struct { + // read-transaction environment for performing transactional read operations. + txReadEnv +} + +// errSessionClosed returns error for using a recycled/destroyed session +func errSessionClosed(sh *sessionHandle) error { + return spannerErrorf(codes.FailedPrecondition, + "session is already recycled / destroyed: session_id = %q, rpc_client = %v", sh.getID(), sh.getClient()) +} + +// Read returns a RowIterator for reading multiple rows from the database. +func (t *txReadOnly) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator { + return t.ReadWithOptions(ctx, table, keys, columns, nil) +} + +// ReadUsingIndex calls ReadWithOptions with ReadOptions{Index: index}. +func (t *txReadOnly) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) (ri *RowIterator) { + return t.ReadWithOptions(ctx, table, keys, columns, &ReadOptions{Index: index}) +} + +// ReadOptions provides options for reading rows from a database. +type ReadOptions struct { + // The index to use for reading. If non-empty, you can only read columns that are + // part of the index key, part of the primary key, or stored in the index due to + // a STORING clause in the index definition. + Index string + + // The maximum number of rows to read. A limit value less than 1 means no limit. + Limit int +} + +// ReadWithOptions returns a RowIterator for reading multiple rows from the database. +// Pass a ReadOptions to modify the read operation. +func (t *txReadOnly) ReadWithOptions(ctx context.Context, table string, keys KeySet, columns []string, opts *ReadOptions) (ri *RowIterator) { + ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Read") + defer func() { traceEndSpan(ctx, ri.err) }() + var ( + sh *sessionHandle + ts *sppb.TransactionSelector + err error + ) + kset, err := keys.keySetProto() + if err != nil { + return &RowIterator{err: err} + } + if sh, ts, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid, client := sh.getID(), sh.getClient() + if sid == "" || client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + index := "" + limit := 0 + if opts != nil { + index = opts.Index + if opts.Limit > 0 { + limit = opts.Limit + } + } + return stream( + contextWithOutgoingMetadata(ctx, sh.getMetadata()), + func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + return client.StreamingRead(ctx, + &sppb.ReadRequest{ + Session: sid, + Transaction: ts, + Table: table, + Index: index, + Columns: columns, + KeySet: kset, + ResumeToken: resumeToken, + Limit: int64(limit), + }) + }, + t.setTimestamp, + t.release, + ) +} + +// errRowNotFound returns error for not being able to read the row identified by key. +func errRowNotFound(table string, key Key) error { + return spannerErrorf(codes.NotFound, "row not found(Table: %v, PrimaryKey: %v)", table, key) +} + +// ReadRow reads a single row from the database. +// +// If no row is present with the given key, then ReadRow returns an error where +// spanner.ErrCode(err) is codes.NotFound. +func (t *txReadOnly) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error) { + iter := t.Read(ctx, table, key, columns) + defer iter.Stop() + row, err := iter.Next() + switch err { + case iterator.Done: + return nil, errRowNotFound(table, key) + case nil: + return row, nil + default: + return nil, err + } +} + +// Query executes a query against the database. It returns a RowIterator +// for retrieving the resulting rows. +// +// Query returns only row data, without a query plan or execution statistics. +// Use QueryWithStats to get rows along with the plan and statistics. +// Use AnalyzeQuery to get just the plan. +func (t *txReadOnly) Query(ctx context.Context, statement Statement) *RowIterator { + return t.query(ctx, statement, sppb.ExecuteSqlRequest_NORMAL) +} + +// Query executes a query against the database. It returns a RowIterator +// for retrieving the resulting rows. The RowIterator will also be populated +// with a query plan and execution statistics. +func (t *txReadOnly) QueryWithStats(ctx context.Context, statement Statement) *RowIterator { + return t.query(ctx, statement, sppb.ExecuteSqlRequest_PROFILE) +} + +// AnalyzeQuery returns the query plan for statement. +func (t *txReadOnly) AnalyzeQuery(ctx context.Context, statement Statement) (*sppb.QueryPlan, error) { + iter := t.query(ctx, statement, sppb.ExecuteSqlRequest_PLAN) + for { + _, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + } + if iter.QueryPlan == nil { + return nil, spannerErrorf(codes.Internal, "query plan unavailable") + } + return iter.QueryPlan, nil +} + +func (t *txReadOnly) query(ctx context.Context, statement Statement, mode sppb.ExecuteSqlRequest_QueryMode) (ri *RowIterator) { + ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Query") + defer func() { traceEndSpan(ctx, ri.err) }() + var ( + sh *sessionHandle + ts *sppb.TransactionSelector + err error + ) + if sh, ts, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid, client := sh.getID(), sh.getClient() + if sid == "" || client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + req := &sppb.ExecuteSqlRequest{ + Session: sid, + Transaction: ts, + Sql: statement.SQL, + QueryMode: mode, + } + if err := statement.bindParams(req); err != nil { + return &RowIterator{err: err} + } + return stream( + contextWithOutgoingMetadata(ctx, sh.getMetadata()), + func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + req.ResumeToken = resumeToken + return client.ExecuteStreamingSql(ctx, req) + }, + t.setTimestamp, + t.release) +} + +// txState is the status of a transaction. +type txState int + +const ( + // transaction is new, waiting to be initialized. + txNew txState = iota + // transaction is being initialized. + txInit + // transaction is active and can perform read/write. + txActive + // transaction is closed, cannot be used anymore. + txClosed +) + +// errRtsUnavailable returns error for read transaction's read timestamp being unavailable. +func errRtsUnavailable() error { + return spannerErrorf(codes.Internal, "read timestamp is unavailable") +} + +// errTxNotInitialized returns error for using an uninitialized transaction. +func errTxNotInitialized() error { + return spannerErrorf(codes.InvalidArgument, "cannot use a uninitialized transaction") +} + +// errTxClosed returns error for using a closed transaction. +func errTxClosed() error { + return spannerErrorf(codes.InvalidArgument, "cannot use a closed transaction") +} + +// errUnexpectedTxState returns error for transaction enters an unexpected state. +func errUnexpectedTxState(ts txState) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected transaction state: %v", ts) +} + +// ReadOnlyTransaction provides a snapshot transaction with guaranteed +// consistency across reads, but does not allow writes. Read-only +// transactions can be configured to read at timestamps in the past. +// +// Read-only transactions do not take locks. Instead, they work by choosing a +// Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do +// not acquire locks, they do not block concurrent read-write transactions. +// +// Unlike locking read-write transactions, read-only transactions never +// abort. They can fail if the chosen read timestamp is garbage collected; +// however, the default garbage collection policy is generous enough that most +// applications do not need to worry about this in practice. See the +// documentation of TimestampBound for more details. +// +// A ReadOnlyTransaction consumes resources on the server until Close is +// called. +type ReadOnlyTransaction struct { + // txReadOnly contains methods for performing transactional reads. + txReadOnly + + // singleUse indicates that the transaction can be used for only one read. + singleUse bool + + // sp is the session pool for allocating a session to execute the read-only transaction. It is set only once during initialization of the ReadOnlyTransaction. + sp *sessionPool + // mu protects concurrent access to the internal states of ReadOnlyTransaction. + mu sync.Mutex + // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadOnlyTransaction. + tx transactionID + // txReadyOrClosed is for broadcasting that transaction ID has been returned by Cloud Spanner or that transaction is closed. + txReadyOrClosed chan struct{} + // state is the current transaction status of the ReadOnly transaction. + state txState + // sh is the sessionHandle allocated from sp. + sh *sessionHandle + // rts is the read timestamp returned by transactional reads. + rts time.Time + // tb is the read staleness bound specification for transactional reads. + tb TimestampBound +} + +// errTxInitTimeout returns error for timeout in waiting for initialization of the transaction. +func errTxInitTimeout() error { + return spannerErrorf(codes.Canceled, "timeout/context canceled in waiting for transaction's initialization") +} + +// getTimestampBound returns the read staleness bound specified for the ReadOnlyTransaction. +func (t *ReadOnlyTransaction) getTimestampBound() TimestampBound { + t.mu.Lock() + defer t.mu.Unlock() + return t.tb +} + +// begin starts a snapshot read-only Transaction on Cloud Spanner. +func (t *ReadOnlyTransaction) begin(ctx context.Context) error { + var ( + locked bool + tx transactionID + rts time.Time + sh *sessionHandle + err error + ) + defer func() { + if !locked { + t.mu.Lock() + // Not necessary, just to make it clear that t.mu is being held when locked == true. + locked = true + } + if t.state != txClosed { + // Signal other initialization routines. + close(t.txReadyOrClosed) + t.txReadyOrClosed = make(chan struct{}) + } + t.mu.Unlock() + if err != nil && sh != nil { + // Got a valid session handle, but failed to initalize transaction on Cloud Spanner. + if shouldDropSession(err) { + sh.destroy() + } + // If sh.destroy was already executed, this becomes a noop. + sh.recycle() + } + }() + sh, err = t.sp.take(ctx) + if err != nil { + return err + } + err = runRetryable(contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error { + res, e := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sh.getID(), + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(t.getTimestampBound(), true), + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + if res.ReadTimestamp != nil { + rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos)) + } + return nil + }) + t.mu.Lock() + locked = true // defer function will be executed with t.mu being held. + if t.state == txClosed { // During the execution of t.begin(), t.Close() was invoked. + return errSessionClosed(sh) + } + // If begin() fails, this allows other queries to take over the initialization. + t.tx = nil + if err == nil { + t.tx = tx + t.rts = rts + t.sh = sh + // State transite to txActive. + t.state = txActive + } + return err +} + +// acquire implements txReadEnv.acquire. +func (t *ReadOnlyTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + if err := checkNestedTxn(ctx); err != nil { + return nil, nil, err + } + if t.singleUse { + return t.acquireSingleUse(ctx) + } + return t.acquireMultiUse(ctx) +} + +func (t *ReadOnlyTransaction) acquireSingleUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + t.mu.Lock() + defer t.mu.Unlock() + switch t.state { + case txClosed: + // A closed single-use transaction can never be reused. + return nil, nil, errTxClosed() + case txNew: + t.state = txClosed + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(t.tb, true), + }, + }, + }, + } + sh, err := t.sp.take(ctx) + if err != nil { + return nil, nil, err + } + // Install session handle into t, which can be used for readonly operations later. + t.sh = sh + return sh, ts, nil + } + us := t.state + // SingleUse transaction should only be in either txNew state or txClosed state. + return nil, nil, errUnexpectedTxState(us) +} + +func (t *ReadOnlyTransaction) acquireMultiUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + for { + t.mu.Lock() + switch t.state { + case txClosed: + t.mu.Unlock() + return nil, nil, errTxClosed() + case txNew: + // State transit to txInit so that no further TimestampBound change is accepted. + t.state = txInit + t.mu.Unlock() + continue + case txInit: + if t.tx != nil { + // Wait for a transaction ID to become ready. + txReadyOrClosed := t.txReadyOrClosed + t.mu.Unlock() + select { + case <-txReadyOrClosed: + // Need to check transaction state again. + continue + case <-ctx.Done(): + // The waiting for initialization is timeout, return error directly. + return nil, nil, errTxInitTimeout() + } + } + // Take the ownership of initializing the transaction. + t.tx = transactionID{} + t.mu.Unlock() + // Begin a read-only transaction. + // TODO: consider adding a transaction option which allow queries to initiate transactions by themselves. Note that this option might not be + // always good because the ID of the new transaction won't be ready till the query returns some data or completes. + if err := t.begin(ctx); err != nil { + return nil, nil, err + } + // If t.begin() succeeded, t.state should have been changed to txActive, so we can just continue here. + continue + case txActive: + sh := t.sh + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{ + Id: t.tx, + }, + } + t.mu.Unlock() + return sh, ts, nil + } + state := t.state + t.mu.Unlock() + return nil, nil, errUnexpectedTxState(state) + } +} + +func (t *ReadOnlyTransaction) setTimestamp(ts time.Time) { + t.mu.Lock() + defer t.mu.Unlock() + if t.rts.IsZero() { + t.rts = ts + } +} + +// release implements txReadEnv.release. +func (t *ReadOnlyTransaction) release(err error) { + t.mu.Lock() + sh := t.sh + t.mu.Unlock() + if sh != nil { // sh could be nil if t.acquire() fails. + if shouldDropSession(err) { + sh.destroy() + } + if t.singleUse { + // If session handle is already destroyed, this becomes a noop. + sh.recycle() + } + } +} + +// Close closes a ReadOnlyTransaction, the transaction cannot perform any reads after being closed. +func (t *ReadOnlyTransaction) Close() { + if t.singleUse { + return + } + t.mu.Lock() + if t.state != txClosed { + t.state = txClosed + close(t.txReadyOrClosed) + } + sh := t.sh + t.mu.Unlock() + if sh == nil { + return + } + // If session handle is already destroyed, this becomes a noop. + // If there are still active queries and if the recycled session is reused before they complete, Cloud Spanner will cancel them + // on behalf of the new transaction on the session. + if sh != nil { + sh.recycle() + } +} + +// Timestamp returns the timestamp chosen to perform reads and +// queries in this transaction. The value can only be read after some +// read or query has either returned some data or completed without +// returning any data. +func (t *ReadOnlyTransaction) Timestamp() (time.Time, error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.rts.IsZero() { + return t.rts, errRtsUnavailable() + } + return t.rts, nil +} + +// WithTimestampBound specifies the TimestampBound to use for read or query. +// This can only be used before the first read or query is invoked. Note: +// bounded staleness is not available with general ReadOnlyTransactions; use a +// single-use ReadOnlyTransaction instead. +// +// The returned value is the ReadOnlyTransaction so calls can be chained. +func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction { + t.mu.Lock() + defer t.mu.Unlock() + if t.state == txNew { + // Only allow to set TimestampBound before the first query. + t.tb = tb + } + return t +} + +// ReadWriteTransaction provides a locking read-write transaction. +// +// This type of transaction is the only way to write data into Cloud Spanner; +// (*Client).Apply and (*Client).ApplyAtLeastOnce use transactions +// internally. These transactions rely on pessimistic locking and, if +// necessary, two-phase commit. Locking read-write transactions may abort, +// requiring the application to retry. However, the interface exposed by +// (*Client).ReadWriteTransaction eliminates the need for applications to write +// retry loops explicitly. +// +// Locking transactions may be used to atomically read-modify-write data +// anywhere in a database. This type of transaction is externally consistent. +// +// Clients should attempt to minimize the amount of time a transaction is +// active. Faster transactions commit with higher probability and cause less +// contention. Cloud Spanner attempts to keep read locks active as long as the +// transaction continues to do reads. Long periods of inactivity at the client +// may cause Cloud Spanner to release a transaction's locks and abort it. +// +// Reads performed within a transaction acquire locks on the data being +// read. Writes can only be done at commit time, after all reads have been +// completed. Conceptually, a read-write transaction consists of zero or more +// reads or SQL queries followed by a commit. +// +// See (*Client).ReadWriteTransaction for an example. +// +// Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired are still +// valid at commit time, and it is able to acquire write locks for all +// writes. Cloud Spanner can abort the transaction for any reason. If a commit +// attempt returns ABORTED, Cloud Spanner guarantees that the transaction has not +// modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about how long +// the transaction's locks were held for. It is an error to use Cloud Spanner locks +// for any sort of mutual exclusion other than between Cloud Spanner transactions +// themselves. +// +// Aborted transactions +// +// Application code does not need to retry explicitly; RunInTransaction will +// automatically retry a transaction if an attempt results in an abort. The +// lock priority of a transaction increases after each prior aborted +// transaction, meaning that the next attempt has a slightly better chance of +// success than before. +// +// Under some circumstances (e.g., many transactions attempting to modify the +// same row(s)), a transaction can abort many times in a short period before +// successfully committing. Thus, it is not a good idea to cap the number of +// retries a transaction can attempt; instead, it is better to limit the total +// amount of wall time spent retrying. +// +// Idle transactions +// +// A transaction is considered idle if it has no outstanding reads or SQL +// queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold +// on to locks indefinitely. In that case, the commit will fail with error +// ABORTED. +// +// If this behavior is undesirable, periodically executing a simple SQL query +// in the transaction (e.g., SELECT 1) prevents the transaction from becoming +// idle. +type ReadWriteTransaction struct { + // txReadOnly contains methods for performing transactional reads. + txReadOnly + // sh is the sessionHandle allocated from sp. It is set only once during the initialization of ReadWriteTransaction. + sh *sessionHandle + // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadWriteTransaction. + // It is set only once in ReadWriteTransaction.begin() during the initialization of ReadWriteTransaction. + tx transactionID + // mu protects concurrent access to the internal states of ReadWriteTransaction. + mu sync.Mutex + // state is the current transaction status of the read-write transaction. + state txState + // wb is the set of buffered mutations waiting to be commited. + wb []*Mutation +} + +// BufferWrite adds a list of mutations to the set of updates that will be +// applied when the transaction is committed. It does not actually apply the +// write until the transaction is committed, so the operation does not +// block. The effects of the write won't be visible to any reads (including +// reads done in the same transaction) until the transaction commits. +// +// See the example for Client.ReadWriteTransaction. +func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error { + t.mu.Lock() + defer t.mu.Unlock() + if t.state == txClosed { + return errTxClosed() + } + if t.state != txActive { + return errUnexpectedTxState(t.state) + } + t.wb = append(t.wb, ms...) + return nil +} + +// acquire implements txReadEnv.acquire. +func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{ + Id: t.tx, + }, + } + t.mu.Lock() + defer t.mu.Unlock() + switch t.state { + case txClosed: + return nil, nil, errTxClosed() + case txActive: + return t.sh, ts, nil + } + return nil, nil, errUnexpectedTxState(t.state) +} + +// release implements txReadEnv.release. +func (t *ReadWriteTransaction) release(err error) { + t.mu.Lock() + sh := t.sh + t.mu.Unlock() + if sh != nil && shouldDropSession(err) { + sh.destroy() + } +} + +func beginTransaction(ctx context.Context, sid string, client sppb.SpannerClient) (transactionID, error) { + var tx transactionID + err := runRetryable(ctx, func(ctx context.Context) error { + res, e := client.BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sid, + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadWrite_{ + ReadWrite: &sppb.TransactionOptions_ReadWrite{}, + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + return nil + }) + if err != nil { + return nil, err + } + return tx, nil +} + +// begin starts a read-write transacton on Cloud Spanner, it is always called before any of the public APIs. +func (t *ReadWriteTransaction) begin(ctx context.Context) error { + if t.tx != nil { + t.state = txActive + return nil + } + tx, err := beginTransaction(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), t.sh.getID(), t.sh.getClient()) + if err == nil { + t.tx = tx + t.state = txActive + return nil + } + if shouldDropSession(err) { + t.sh.destroy() + } + return err +} + +// commit tries to commit a readwrite transaction to Cloud Spanner. It also returns the commit timestamp for the transactions. +func (t *ReadWriteTransaction) commit(ctx context.Context) (time.Time, error) { + var ts time.Time + t.mu.Lock() + t.state = txClosed // No futher operations after commit. + mPb, err := mutationsProto(t.wb) + t.mu.Unlock() + if err != nil { + return ts, err + } + // In case that sessionHandle was destroyed but transaction body fails to report it. + sid, client := t.sh.getID(), t.sh.getClient() + if sid == "" || client == nil { + return ts, errSessionClosed(t.sh) + } + err = runRetryable(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { + var trailer metadata.MD + res, e := client.Commit(ctx, &sppb.CommitRequest{ + Session: sid, + Transaction: &sppb.CommitRequest_TransactionId{ + TransactionId: t.tx, + }, + Mutations: mPb, + }, grpc.Trailer(&trailer)) + if e != nil { + return toSpannerErrorWithMetadata(e, trailer) + } + if tstamp := res.GetCommitTimestamp(); tstamp != nil { + ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) + } + return nil + }) + if shouldDropSession(err) { + t.sh.destroy() + } + return ts, err +} + +// rollback is called when a commit is aborted or the transaction body runs into error. +func (t *ReadWriteTransaction) rollback(ctx context.Context) { + t.mu.Lock() + // Forbid further operations on rollbacked transaction. + t.state = txClosed + t.mu.Unlock() + // In case that sessionHandle was destroyed but transaction body fails to report it. + sid, client := t.sh.getID(), t.sh.getClient() + if sid == "" || client == nil { + return + } + err := runRetryable(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { + _, e := client.Rollback(ctx, &sppb.RollbackRequest{ + Session: sid, + TransactionId: t.tx, + }) + return e + }) + if shouldDropSession(err) { + t.sh.destroy() + } + return +} + +// runInTransaction executes f under a read-write transaction context. +func (t *ReadWriteTransaction) runInTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (time.Time, error) { + var ( + ts time.Time + err error + ) + if err = f(context.WithValue(ctx, transactionInProgressKey{}, 1), t); err == nil { + // Try to commit if transaction body returns no error. + ts, err = t.commit(ctx) + } + if err != nil { + if isAbortErr(err) { + // Retry the transaction using the same session on ABORT error. + // Cloud Spanner will create the new transaction with the previous one's wound-wait priority. + err = errRetry(err) + return ts, err + } + // Not going to commit, according to API spec, should rollback the transaction. + t.rollback(ctx) + return ts, err + } + // err == nil, return commit timestamp. + return ts, nil +} + +// writeOnlyTransaction provides the most efficient way of doing write-only transactions. It essentially does blind writes to Cloud Spanner. +type writeOnlyTransaction struct { + // sp is the session pool which writeOnlyTransaction uses to get Cloud Spanner sessions for blind writes. + sp *sessionPool +} + +// applyAtLeastOnce commits a list of mutations to Cloud Spanner for at least once, unless one of the following happends: +// 1) Context is timeout. +// 2) An unretryable error(e.g. database not found) occurs. +// 3) There is a malformed Mutation object. +func (t *writeOnlyTransaction) applyAtLeastOnce(ctx context.Context, ms ...*Mutation) (time.Time, error) { + var ( + ts time.Time + sh *sessionHandle + ) + mPb, err := mutationsProto(ms) + if err != nil { + // Malformed mutation found, just return the error. + return ts, err + } + err = runRetryable(ctx, func(ct context.Context) error { + var e error + var trailers metadata.MD + if sh == nil || sh.getID() == "" || sh.getClient() == nil { + // No usable session for doing the commit, take one from pool. + sh, e = t.sp.take(ctx) + if e != nil { + // sessionPool.Take already retries for session creations/retrivals. + return e + } + } + res, e := sh.getClient().Commit(contextWithOutgoingMetadata(ctx, sh.getMetadata()), &sppb.CommitRequest{ + Session: sh.getID(), + Transaction: &sppb.CommitRequest_SingleUseTransaction{ + SingleUseTransaction: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadWrite_{ + ReadWrite: &sppb.TransactionOptions_ReadWrite{}, + }, + }, + }, + Mutations: mPb, + }, grpc.Trailer(&trailers)) + if e != nil { + if isAbortErr(e) { + // Mask ABORT error as retryable, because aborted transactions are allowed to be retried. + return errRetry(toSpannerErrorWithMetadata(e, trailers)) + } + if shouldDropSession(e) { + // Discard the bad session. + sh.destroy() + } + return e + } + if tstamp := res.GetCommitTimestamp(); tstamp != nil { + ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) + } + return nil + }) + if sh != nil { + sh.recycle() + } + return ts, err +} + +// isAbortedErr returns true if the error indicates that an gRPC call is aborted on the server side. +func isAbortErr(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Aborted { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/spanner/transaction_test.go b/vendor/cloud.google.com/go/spanner/transaction_test.go new file mode 100644 index 0000000..052e4a9 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/transaction_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "sync" + "testing" + "time" + + "cloud.google.com/go/spanner/internal/testutil" + + "golang.org/x/net/context" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +var ( + errAbrt = spannerErrorf(codes.Aborted, "") + errUsr = errors.New("error") +) + +// setup sets up a Client using mockclient +func mockClient(t *testing.T) (*sessionPool, *testutil.MockCloudSpannerClient, *Client) { + var ( + mc = testutil.NewMockCloudSpannerClient(t) + spc = SessionPoolConfig{} + database = "mockdb" + ) + spc.getRPCClient = func() (sppb.SpannerClient, error) { + return mc, nil + } + sp, err := newSessionPool(database, spc, nil) + if err != nil { + t.Fatalf("cannot create session pool: %v", err) + } + return sp, mc, &Client{ + database: database, + idleSessions: sp, + } +} + +// TestReadOnlyAcquire tests acquire for ReadOnlyTransaction. +func TestReadOnlyAcquire(t *testing.T) { + t.Parallel() + _, mc, client := mockClient(t) + defer client.Close() + mc.SetActions( + testutil.Action{"BeginTransaction", errUsr}, + testutil.Action{"BeginTransaction", nil}, + testutil.Action{"BeginTransaction", nil}, + ) + + // Singleuse should only be used once. + txn := client.Single() + defer txn.Close() + _, _, e := txn.acquire(context.Background()) + if e != nil { + t.Errorf("Acquire for single use, got %v, want nil.", e) + } + _, _, e = txn.acquire(context.Background()) + if wantErr := errTxClosed(); !testEqual(e, wantErr) { + t.Errorf("Second acquire for single use, got %v, want %v.", e, wantErr) + } + // Multiuse can recover from acquire failure. + txn = client.ReadOnlyTransaction() + _, _, e = txn.acquire(context.Background()) + if wantErr := toSpannerError(errUsr); !testEqual(e, wantErr) { + t.Errorf("Acquire for multi use, got %v, want %v.", e, wantErr) + } + _, _, e = txn.acquire(context.Background()) + if e != nil { + t.Errorf("Acquire for multi use, got %v, want nil.", e) + } + txn.Close() + // Multiuse can not be used after close. + _, _, e = txn.acquire(context.Background()) + if wantErr := errTxClosed(); !testEqual(e, wantErr) { + t.Errorf("Second acquire for multi use, got %v, want %v.", e, wantErr) + } + // Multiuse can be acquired concurrently. + txn = client.ReadOnlyTransaction() + defer txn.Close() + mc.Freeze() + var ( + sh1 *sessionHandle + sh2 *sessionHandle + ts1 *sppb.TransactionSelector + ts2 *sppb.TransactionSelector + wg = sync.WaitGroup{} + ) + acquire := func(sh **sessionHandle, ts **sppb.TransactionSelector) { + defer wg.Done() + var e error + *sh, *ts, e = txn.acquire(context.Background()) + if e != nil { + t.Errorf("Concurrent acquire for multiuse, got %v, expect nil.", e) + } + } + wg.Add(2) + go acquire(&sh1, &ts1) + go acquire(&sh2, &ts2) + <-time.After(100 * time.Millisecond) + mc.Unfreeze() + wg.Wait() + if !testEqual(sh1.session, sh2.session) { + t.Errorf("Expect acquire to get same session handle, got %v and %v.", sh1, sh2) + } + if !testEqual(ts1, ts2) { + t.Errorf("Expect acquire to get same transaction selector, got %v and %v.", ts1, ts2) + } +} + +// TestRetryOnAbort tests transaction retries on abort. +func TestRetryOnAbort(t *testing.T) { + t.Parallel() + _, mc, client := mockClient(t) + defer client.Close() + // commit in writeOnlyTransaction + mc.SetActions( + testutil.Action{"Commit", errAbrt}, // abort on first commit + testutil.Action{"Commit", nil}, + ) + + ms := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + if _, e := client.Apply(context.Background(), ms, ApplyAtLeastOnce()); e != nil { + t.Errorf("applyAtLeastOnce retry on abort, got %v, want nil.", e) + } + // begin and commit in ReadWriteTransaction + mc.SetActions( + testutil.Action{"BeginTransaction", nil}, // let takeWriteSession succeed and get a session handle + testutil.Action{"Commit", errAbrt}, // let first commit fail and retry will begin new transaction + testutil.Action{"BeginTransaction", errAbrt}, // this time we can fail the begin attempt + testutil.Action{"BeginTransaction", nil}, + testutil.Action{"Commit", nil}, + ) + + if _, e := client.Apply(context.Background(), ms); e != nil { + t.Errorf("ReadWriteTransaction retry on abort, got %v, want nil.", e) + } +} + +// TestBadSession tests bad session (session not found error). +// TODO: session closed from transaction close +func TestBadSession(t *testing.T) { + t.Parallel() + ctx := context.Background() + sp, mc, client := mockClient(t) + defer client.Close() + var sid string + // Prepare a session, get the session id for use in testing. + if s, e := sp.take(ctx); e != nil { + t.Fatal("Prepare session failed.") + } else { + sid = s.getID() + s.recycle() + } + + wantErr := spannerErrorf(codes.NotFound, "Session not found: %v", sid) + // ReadOnlyTransaction + mc.SetActions( + testutil.Action{"BeginTransaction", wantErr}, + testutil.Action{"BeginTransaction", wantErr}, + testutil.Action{"BeginTransaction", wantErr}, + ) + txn := client.ReadOnlyTransaction() + defer txn.Close() + if _, _, got := txn.acquire(ctx); !testEqual(wantErr, got) { + t.Errorf("Expect acquire to fail, got %v, want %v.", got, wantErr) + } + // The failure should recycle the session, we expect it to be used in following requests. + if got := txn.Query(ctx, NewStatement("SELECT 1")); !testEqual(wantErr, got.err) { + t.Errorf("Expect Query to fail, got %v, want %v.", got.err, wantErr) + } + if got := txn.Read(ctx, "Users", KeySets(Key{"alice"}, Key{"bob"}), []string{"name", "email"}); !testEqual(wantErr, got.err) { + t.Errorf("Expect Read to fail, got %v, want %v.", got.err, wantErr) + } + // writeOnlyTransaction + ms := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + mc.SetActions(testutil.Action{"Commit", wantErr}) + if _, got := client.Apply(context.Background(), ms, ApplyAtLeastOnce()); !testEqual(wantErr, got) { + t.Errorf("Expect applyAtLeastOnce to fail, got %v, want %v.", got, wantErr) + } +} + +func TestFunctionErrorReturned(t *testing.T) { + t.Parallel() + _, mc, client := mockClient(t) + defer client.Close() + mc.SetActions( + testutil.Action{"BeginTransaction", nil}, + testutil.Action{"Rollback", nil}, + ) + + want := errors.New("an error") + _, got := client.ReadWriteTransaction(context.Background(), + func(context.Context, *ReadWriteTransaction) error { return want }) + if got != want { + t.Errorf("got <%v>, want <%v>", got, want) + } + mc.CheckActionsConsumed() +} diff --git a/vendor/cloud.google.com/go/spanner/util.go b/vendor/cloud.google.com/go/spanner/util.go new file mode 100644 index 0000000..d35fec2 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/util.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +// maxUint64 returns the maximum of two uint64 +func maxUint64(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// minUint64 returns the minimum of two uint64 +func minUint64(a, b uint64) uint64 { + if a > b { + return b + } + return a +} diff --git a/vendor/cloud.google.com/go/spanner/util_test.go b/vendor/cloud.google.com/go/spanner/util_test.go new file mode 100644 index 0000000..6a0f9a8 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/util_test.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "cloud.google.com/go/internal/testutil" + "github.com/google/go-cmp/cmp" +) + +func testEqual(a, b interface{}) bool { + return testutil.Equal(a, b, + cmp.AllowUnexported(TimestampBound{}, Error{}, Mutation{}, Row{}, + Partition{}, BatchReadOnlyTransactionID{})) +} diff --git a/vendor/cloud.google.com/go/spanner/value.go b/vendor/cloud.google.com/go/spanner/value.go new file mode 100644 index 0000000..9497443 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value.go @@ -0,0 +1,1442 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/fields" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +const commitTimestampPlaceholderString = "spanner.commit_timestamp()" + +var ( + // CommitTimestamp is a special value used to tell Cloud Spanner + // to insert the commit timestamp of the transaction into a column. + // It can be used in a Mutation, or directly used in + // InsertStruct or InsertMap. See ExampleCommitTimestamp. + // This is just a placeholder and the actual value stored in this + // variable has no meaning. + CommitTimestamp time.Time = commitTimestamp + commitTimestamp = time.Unix(0, 0).In(time.FixedZone("CommitTimestamp placeholder", 0xDB)) +) + +// NullInt64 represents a Cloud Spanner INT64 that may be NULL. +type NullInt64 struct { + Int64 int64 + Valid bool // Valid is true if Int64 is not NULL. +} + +// String implements Stringer.String for NullInt64 +func (n NullInt64) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Int64) +} + +// NullString represents a Cloud Spanner STRING that may be NULL. +type NullString struct { + StringVal string + Valid bool // Valid is true if StringVal is not NULL. +} + +// String implements Stringer.String for NullString +func (n NullString) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%q", n.StringVal) +} + +// NullFloat64 represents a Cloud Spanner FLOAT64 that may be NULL. +type NullFloat64 struct { + Float64 float64 + Valid bool // Valid is true if Float64 is not NULL. +} + +// String implements Stringer.String for NullFloat64 +func (n NullFloat64) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Float64) +} + +// NullBool represents a Cloud Spanner BOOL that may be NULL. +type NullBool struct { + Bool bool + Valid bool // Valid is true if Bool is not NULL. +} + +// String implements Stringer.String for NullBool +func (n NullBool) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Bool) +} + +// NullTime represents a Cloud Spanner TIMESTAMP that may be null. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL. +} + +// String implements Stringer.String for NullTime +func (n NullTime) String() string { + if !n.Valid { + return fmt.Sprintf("%s", "") + } + return fmt.Sprintf("%q", n.Time.Format(time.RFC3339Nano)) +} + +// NullDate represents a Cloud Spanner DATE that may be null. +type NullDate struct { + Date civil.Date + Valid bool // Valid is true if Date is not NULL. +} + +// String implements Stringer.String for NullDate +func (n NullDate) String() string { + if !n.Valid { + return fmt.Sprintf("%s", "") + } + return fmt.Sprintf("%q", n.Date) +} + +// NullRow represents a Cloud Spanner STRUCT that may be NULL. +// See also the document for Row. +// Note that NullRow is not a valid Cloud Spanner column Type. +type NullRow struct { + Row Row + Valid bool // Valid is true if Row is not NULL. +} + +// GenericColumnValue represents the generic encoded value and type of the +// column. See google.spanner.v1.ResultSet proto for details. This can be +// useful for proxying query results when the result types are not known in +// advance. +// +// If you populate a GenericColumnValue from a row using Row.Column or related +// methods, do not modify the contents of Type and Value. +type GenericColumnValue struct { + Type *sppb.Type + Value *proto3.Value +} + +// Decode decodes a GenericColumnValue. The ptr argument should be a pointer +// to a Go value that can accept v. +func (v GenericColumnValue) Decode(ptr interface{}) error { + return decodeValue(v.Value, v.Type, ptr) +} + +// NewGenericColumnValue creates a GenericColumnValue from Go value that is +// valid for Cloud Spanner. +func newGenericColumnValue(v interface{}) (*GenericColumnValue, error) { + value, typ, err := encodeValue(v) + if err != nil { + return nil, err + } + return &GenericColumnValue{Value: value, Type: typ}, nil +} + +// errTypeMismatch returns error for destination not having a compatible type +// with source Cloud Spanner type. +func errTypeMismatch(srcCode, elCode sppb.TypeCode, dst interface{}) error { + s := srcCode.String() + if srcCode == sppb.TypeCode_ARRAY { + s = fmt.Sprintf("%v[%v]", srcCode, elCode) + } + return spannerErrorf(codes.InvalidArgument, "type %T cannot be used for decoding %s", dst, s) +} + +// errNilSpannerType returns error for nil Cloud Spanner type in decoding. +func errNilSpannerType() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner data type in decoding") +} + +// errNilSrc returns error for decoding from nil proto value. +func errNilSrc() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner value in decoding") +} + +// errNilDst returns error for decoding into nil interface{}. +func errNilDst(dst interface{}) error { + return spannerErrorf(codes.InvalidArgument, "cannot decode into nil type %T", dst) +} + +// errNilArrElemType returns error for input Cloud Spanner data type being a array but without a +// non-nil array element type. +func errNilArrElemType(t *sppb.Type) error { + return spannerErrorf(codes.FailedPrecondition, "array type %v is with nil array element type", t) +} + +// errDstNotForNull returns error for decoding a SQL NULL value into a destination which doesn't +// support NULL values. +func errDstNotForNull(dst interface{}) error { + return spannerErrorf(codes.InvalidArgument, "destination %T cannot support NULL SQL values", dst) +} + +// errBadEncoding returns error for decoding wrongly encoded types. +func errBadEncoding(v *proto3.Value, err error) error { + return spannerErrorf(codes.FailedPrecondition, "%v wasn't correctly encoded: <%v>", v, err) +} + +func parseNullTime(v *proto3.Value, p *NullTime, code sppb.TypeCode, isNull bool) error { + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_TIMESTAMP { + return errTypeMismatch(code, sppb.TypeCode_TYPE_CODE_UNSPECIFIED, p) + } + if isNull { + *p = NullTime{} + return nil + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := time.Parse(time.RFC3339Nano, x) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Time = y + return nil +} + +// decodeValue decodes a protobuf Value into a pointer to a Go value, as +// specified by sppb.Type. +func decodeValue(v *proto3.Value, t *sppb.Type, ptr interface{}) error { + if v == nil { + return errNilSrc() + } + if t == nil { + return errNilSpannerType() + } + code := t.Code + acode := sppb.TypeCode_TYPE_CODE_UNSPECIFIED + if code == sppb.TypeCode_ARRAY { + if t.ArrayElementType == nil { + return errNilArrElemType(t) + } + acode = t.ArrayElementType.Code + } + _, isNull := v.Kind.(*proto3.Value_NullValue) + + // Do the decoding based on the type of ptr. + switch p := ptr.(type) { + case nil: + return errNilDst(nil) + case *string: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_STRING { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getStringValue(v) + if err != nil { + return err + } + *p = x + case *NullString: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_STRING { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullString{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + p.Valid = true + p.StringVal = x + case *[]NullString: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRING { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullStringArray(x) + if err != nil { + return err + } + *p = y + case *[]string: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRING { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeStringArray(x) + if err != nil { + return err + } + *p = y + case *[]byte: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BYTES { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := base64.StdEncoding.DecodeString(x) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *[][]byte: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BYTES { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeByteArray(x) + if err != nil { + return err + } + *p = y + case *int64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_INT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := strconv.ParseInt(x, 10, 64) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *NullInt64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_INT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullInt64{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := strconv.ParseInt(x, 10, 64) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Int64 = y + case *[]NullInt64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_INT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullInt64Array(x) + if err != nil { + return err + } + *p = y + case *[]int64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_INT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeInt64Array(x) + if err != nil { + return err + } + *p = y + case *bool: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BOOL { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getBoolValue(v) + if err != nil { + return err + } + *p = x + case *NullBool: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BOOL { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullBool{} + break + } + x, err := getBoolValue(v) + if err != nil { + return err + } + p.Valid = true + p.Bool = x + case *[]NullBool: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BOOL { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullBoolArray(x) + if err != nil { + return err + } + *p = y + case *[]bool: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BOOL { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeBoolArray(x) + if err != nil { + return err + } + *p = y + case *float64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_FLOAT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getFloat64Value(v) + if err != nil { + return err + } + *p = x + case *NullFloat64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_FLOAT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullFloat64{} + break + } + x, err := getFloat64Value(v) + if err != nil { + return err + } + p.Valid = true + p.Float64 = x + case *[]NullFloat64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_FLOAT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullFloat64Array(x) + if err != nil { + return err + } + *p = y + case *[]float64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_FLOAT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeFloat64Array(x) + if err != nil { + return err + } + *p = y + case *time.Time: + var nt NullTime + if isNull { + return errDstNotForNull(ptr) + } + err := parseNullTime(v, &nt, code, isNull) + if err != nil { + return nil + } + *p = nt.Time + case *NullTime: + err := parseNullTime(v, p, code, isNull) + if err != nil { + return err + } + case *[]NullTime: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_TIMESTAMP { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullTimeArray(x) + if err != nil { + return err + } + *p = y + case *[]time.Time: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_TIMESTAMP { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeTimeArray(x) + if err != nil { + return err + } + *p = y + case *civil.Date: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_DATE { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := civil.ParseDate(x) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *NullDate: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_DATE { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullDate{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := civil.ParseDate(x) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Date = y + case *[]NullDate: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_DATE { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullDateArray(x) + if err != nil { + return err + } + *p = y + case *[]civil.Date: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_DATE { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeDateArray(x) + if err != nil { + return err + } + *p = y + case *[]NullRow: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRUCT { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeRowArray(t.ArrayElementType.StructType, x) + if err != nil { + return err + } + *p = y + case *GenericColumnValue: + *p = GenericColumnValue{Type: t, Value: v} + default: + // Check if the proto encoding is for an array of structs. + if !(code == sppb.TypeCode_ARRAY && acode == sppb.TypeCode_STRUCT) { + return errTypeMismatch(code, acode, ptr) + } + vp := reflect.ValueOf(p) + if !vp.IsValid() { + return errNilDst(p) + } + if !isPtrStructPtrSlice(vp.Type()) { + // The container is not a pointer to a struct pointer slice. + return errTypeMismatch(code, acode, ptr) + } + // Only use reflection for nil detection on slow path. + // Also, IsNil panics on many types, so check it after the type check. + if vp.IsNil() { + return errNilDst(p) + } + if isNull { + // The proto Value is encoding NULL, set the pointer to struct + // slice to nil as well. + vp.Elem().Set(reflect.Zero(vp.Elem().Type())) + break + } + x, err := getListValue(v) + if err != nil { + return err + } + if err = decodeStructArray(t.ArrayElementType.StructType, x, p); err != nil { + return err + } + } + return nil +} + +// errSrvVal returns an error for getting a wrong source protobuf value in decoding. +func errSrcVal(v *proto3.Value, want string) error { + return spannerErrorf(codes.FailedPrecondition, "cannot use %v(Kind: %T) as %s Value", + v, v.GetKind(), want) +} + +// getStringValue returns the string value encoded in proto3.Value v whose +// kind is proto3.Value_StringValue. +func getStringValue(v *proto3.Value) (string, error) { + if x, ok := v.GetKind().(*proto3.Value_StringValue); ok && x != nil { + return x.StringValue, nil + } + return "", errSrcVal(v, "String") +} + +// getBoolValue returns the bool value encoded in proto3.Value v whose +// kind is proto3.Value_BoolValue. +func getBoolValue(v *proto3.Value) (bool, error) { + if x, ok := v.GetKind().(*proto3.Value_BoolValue); ok && x != nil { + return x.BoolValue, nil + } + return false, errSrcVal(v, "Bool") +} + +// getListValue returns the proto3.ListValue contained in proto3.Value v whose +// kind is proto3.Value_ListValue. +func getListValue(v *proto3.Value) (*proto3.ListValue, error) { + if x, ok := v.GetKind().(*proto3.Value_ListValue); ok && x != nil { + return x.ListValue, nil + } + return nil, errSrcVal(v, "List") +} + +// errUnexpectedNumStr returns error for decoder getting a unexpected string for +// representing special float values. +func errUnexpectedNumStr(s string) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected string value %q for number", s) +} + +// getFloat64Value returns the float64 value encoded in proto3.Value v whose +// kind is proto3.Value_NumberValue / proto3.Value_StringValue. +// Cloud Spanner uses string to encode NaN, Infinity and -Infinity. +func getFloat64Value(v *proto3.Value) (float64, error) { + switch x := v.GetKind().(type) { + case *proto3.Value_NumberValue: + if x == nil { + break + } + return x.NumberValue, nil + case *proto3.Value_StringValue: + if x == nil { + break + } + switch x.StringValue { + case "NaN": + return math.NaN(), nil + case "Infinity": + return math.Inf(1), nil + case "-Infinity": + return math.Inf(-1), nil + default: + return 0, errUnexpectedNumStr(x.StringValue) + } + } + return 0, errSrcVal(v, "Number") +} + +// errNilListValue returns error for unexpected nil ListValue in decoding Cloud Spanner ARRAYs. +func errNilListValue(sqlType string) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil ListValue in decoding %v array", sqlType) +} + +// errDecodeArrayElement returns error for failure in decoding single array element. +func errDecodeArrayElement(i int, v proto.Message, sqlType string, err error) error { + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.Unknown, + "cannot decode %v(array element %v) as %v, error = <%v>", v, i, sqlType, err) + } + se.decorate(fmt.Sprintf("cannot decode %v(array element %v) as %v", v, i, sqlType)) + return se +} + +// decodeNullStringArray decodes proto3.ListValue pb into a NullString slice. +func decodeNullStringArray(pb *proto3.ListValue) ([]NullString, error) { + if pb == nil { + return nil, errNilListValue("STRING") + } + a := make([]NullString, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, stringType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "STRING", err) + } + } + return a, nil +} + +// decodeStringArray decodes proto3.ListValue pb into a string slice. +func decodeStringArray(pb *proto3.ListValue) ([]string, error) { + if pb == nil { + return nil, errNilListValue("STRING") + } + a := make([]string, len(pb.Values)) + st := stringType() + for i, v := range pb.Values { + if err := decodeValue(v, st, &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "STRING", err) + } + } + return a, nil +} + +// decodeNullInt64Array decodes proto3.ListValue pb into a NullInt64 slice. +func decodeNullInt64Array(pb *proto3.ListValue) ([]NullInt64, error) { + if pb == nil { + return nil, errNilListValue("INT64") + } + a := make([]NullInt64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, intType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "INT64", err) + } + } + return a, nil +} + +// decodeInt64Array decodes proto3.ListValue pb into a int64 slice. +func decodeInt64Array(pb *proto3.ListValue) ([]int64, error) { + if pb == nil { + return nil, errNilListValue("INT64") + } + a := make([]int64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, intType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "INT64", err) + } + } + return a, nil +} + +// decodeNullBoolArray decodes proto3.ListValue pb into a NullBool slice. +func decodeNullBoolArray(pb *proto3.ListValue) ([]NullBool, error) { + if pb == nil { + return nil, errNilListValue("BOOL") + } + a := make([]NullBool, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, boolType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BOOL", err) + } + } + return a, nil +} + +// decodeBoolArray decodes proto3.ListValue pb into a bool slice. +func decodeBoolArray(pb *proto3.ListValue) ([]bool, error) { + if pb == nil { + return nil, errNilListValue("BOOL") + } + a := make([]bool, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, boolType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BOOL", err) + } + } + return a, nil +} + +// decodeNullFloat64Array decodes proto3.ListValue pb into a NullFloat64 slice. +func decodeNullFloat64Array(pb *proto3.ListValue) ([]NullFloat64, error) { + if pb == nil { + return nil, errNilListValue("FLOAT64") + } + a := make([]NullFloat64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, floatType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "FLOAT64", err) + } + } + return a, nil +} + +// decodeFloat64Array decodes proto3.ListValue pb into a float64 slice. +func decodeFloat64Array(pb *proto3.ListValue) ([]float64, error) { + if pb == nil { + return nil, errNilListValue("FLOAT64") + } + a := make([]float64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, floatType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "FLOAT64", err) + } + } + return a, nil +} + +// decodeByteArray decodes proto3.ListValue pb into a slice of byte slice. +func decodeByteArray(pb *proto3.ListValue) ([][]byte, error) { + if pb == nil { + return nil, errNilListValue("BYTES") + } + a := make([][]byte, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, bytesType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BYTES", err) + } + } + return a, nil +} + +// decodeNullTimeArray decodes proto3.ListValue pb into a NullTime slice. +func decodeNullTimeArray(pb *proto3.ListValue) ([]NullTime, error) { + if pb == nil { + return nil, errNilListValue("TIMESTAMP") + } + a := make([]NullTime, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, timeType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err) + } + } + return a, nil +} + +// decodeTimeArray decodes proto3.ListValue pb into a time.Time slice. +func decodeTimeArray(pb *proto3.ListValue) ([]time.Time, error) { + if pb == nil { + return nil, errNilListValue("TIMESTAMP") + } + a := make([]time.Time, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, timeType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err) + } + } + return a, nil +} + +// decodeNullDateArray decodes proto3.ListValue pb into a NullDate slice. +func decodeNullDateArray(pb *proto3.ListValue) ([]NullDate, error) { + if pb == nil { + return nil, errNilListValue("DATE") + } + a := make([]NullDate, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, dateType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "DATE", err) + } + } + return a, nil +} + +// decodeDateArray decodes proto3.ListValue pb into a civil.Date slice. +func decodeDateArray(pb *proto3.ListValue) ([]civil.Date, error) { + if pb == nil { + return nil, errNilListValue("DATE") + } + a := make([]civil.Date, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, dateType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "DATE", err) + } + } + return a, nil +} + +func errNotStructElement(i int, v *proto3.Value) error { + return errDecodeArrayElement(i, v, "STRUCT", + spannerErrorf(codes.FailedPrecondition, "%v(type: %T) doesn't encode Cloud Spanner STRUCT", v, v)) +} + +// decodeRowArray decodes proto3.ListValue pb into a NullRow slice according to +// the structual information given in sppb.StructType ty. +func decodeRowArray(ty *sppb.StructType, pb *proto3.ListValue) ([]NullRow, error) { + if pb == nil { + return nil, errNilListValue("STRUCT") + } + a := make([]NullRow, len(pb.Values)) + for i := range pb.Values { + switch v := pb.Values[i].GetKind().(type) { + case *proto3.Value_ListValue: + a[i] = NullRow{ + Row: Row{ + fields: ty.Fields, + vals: v.ListValue.Values, + }, + Valid: true, + } + // Null elements not currently supported by the server, see + // https://cloud.google.com/spanner/docs/query-syntax#using-structs-with-select + case *proto3.Value_NullValue: + // no-op, a[i] is NullRow{} already + default: + return nil, errNotStructElement(i, pb.Values[i]) + } + } + return a, nil +} + +// errNilSpannerStructType returns error for unexpected nil Cloud Spanner STRUCT schema type in decoding. +func errNilSpannerStructType() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil StructType in decoding Cloud Spanner STRUCT") +} + +// errUnnamedField returns error for decoding a Cloud Spanner STRUCT with unnamed field into a Go struct. +func errUnnamedField(ty *sppb.StructType, i int) error { + return spannerErrorf(codes.InvalidArgument, "unnamed field %v in Cloud Spanner STRUCT %+v", i, ty) +} + +// errNoOrDupGoField returns error for decoding a Cloud Spanner +// STRUCT into a Go struct which is either missing a field, or has duplicate fields. +func errNoOrDupGoField(s interface{}, f string) error { + return spannerErrorf(codes.InvalidArgument, "Go struct %+v(type %T) has no or duplicate fields for Cloud Spanner STRUCT field %v", s, s, f) +} + +// errDupColNames returns error for duplicated Cloud Spanner STRUCT field names found in decoding a Cloud Spanner STRUCT into a Go struct. +func errDupSpannerField(f string, ty *sppb.StructType) error { + return spannerErrorf(codes.InvalidArgument, "duplicated field name %q in Cloud Spanner STRUCT %+v", f, ty) +} + +// errDecodeStructField returns error for failure in decoding a single field of a Cloud Spanner STRUCT. +func errDecodeStructField(ty *sppb.StructType, f string, err error) error { + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.Unknown, + "cannot decode field %v of Cloud Spanner STRUCT %+v, error = <%v>", f, ty, err) + } + se.decorate(fmt.Sprintf("cannot decode field %v of Cloud Spanner STRUCT %+v", f, ty)) + return se +} + +// decodeStruct decodes proto3.ListValue pb into struct referenced by pointer ptr, according to +// the structual information given in sppb.StructType ty. +func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { + if reflect.ValueOf(ptr).IsNil() { + return errNilDst(ptr) + } + if ty == nil { + return errNilSpannerStructType() + } + // t holds the structual information of ptr. + t := reflect.TypeOf(ptr).Elem() + // v is the actual value that ptr points to. + v := reflect.ValueOf(ptr).Elem() + + fields, err := fieldCache.Fields(t) + if err != nil { + return toSpannerError(err) + } + seen := map[string]bool{} + for i, f := range ty.Fields { + if f.Name == "" { + return errUnnamedField(ty, i) + } + sf := fields.Match(f.Name) + if sf == nil { + return errNoOrDupGoField(ptr, f.Name) + } + if seen[f.Name] { + // We don't allow duplicated field name. + return errDupSpannerField(f.Name, ty) + } + // Try to decode a single field. + if err := decodeValue(pb.Values[i], f.Type, v.FieldByIndex(sf.Index).Addr().Interface()); err != nil { + return errDecodeStructField(ty, f.Name, err) + } + // Mark field f.Name as processed. + seen[f.Name] = true + } + return nil +} + +// isPtrStructPtrSlice returns true if ptr is a pointer to a slice of struct pointers. +func isPtrStructPtrSlice(t reflect.Type) bool { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice { + // t is not a pointer to a slice. + return false + } + if t = t.Elem(); t.Elem().Kind() != reflect.Ptr || t.Elem().Elem().Kind() != reflect.Struct { + // the slice that t points to is not a slice of struct pointers. + return false + } + return true +} + +// decodeStructArray decodes proto3.ListValue pb into struct slice referenced by pointer ptr, according to the +// structual information given in a sppb.StructType. +func decodeStructArray(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { + if pb == nil { + return errNilListValue("STRUCT") + } + // Type of the struct pointers stored in the slice that ptr points to. + ts := reflect.TypeOf(ptr).Elem().Elem() + // The slice that ptr points to, might be nil at this point. + v := reflect.ValueOf(ptr).Elem() + // Allocate empty slice. + v.Set(reflect.MakeSlice(v.Type(), 0, len(pb.Values))) + // Decode every struct in pb.Values. + for i, pv := range pb.Values { + // Check if pv is a NULL value. + if _, isNull := pv.Kind.(*proto3.Value_NullValue); isNull { + // Append a nil pointer to the slice. + v.Set(reflect.Append(v, reflect.New(ts).Elem())) + continue + } + // Allocate empty struct. + s := reflect.New(ts.Elem()) + // Get proto3.ListValue l from proto3.Value pv. + l, err := getListValue(pv) + if err != nil { + return errDecodeArrayElement(i, pv, "STRUCT", err) + } + // Decode proto3.ListValue l into struct referenced by s.Interface(). + if err = decodeStruct(ty, l, s.Interface()); err != nil { + return errDecodeArrayElement(i, pv, "STRUCT", err) + } + // Append the decoded struct back into the slice. + v.Set(reflect.Append(v, s)) + } + return nil +} + +// errEncoderUnsupportedType returns error for not being able to encode a value of +// certain type. +func errEncoderUnsupportedType(v interface{}) error { + return spannerErrorf(codes.InvalidArgument, "client doesn't support type %T", v) +} + +// encodeValue encodes a Go native type into a proto3.Value. +func encodeValue(v interface{}) (*proto3.Value, *sppb.Type, error) { + pb := &proto3.Value{ + Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}, + } + var pt *sppb.Type + var err error + switch v := v.(type) { + case nil: + case string: + pb.Kind = stringKind(v) + pt = stringType() + case NullString: + if v.Valid { + return encodeValue(v.StringVal) + } + pt = stringType() + case []string: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(stringType()) + case []NullString: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(stringType()) + case []byte: + if v != nil { + pb.Kind = stringKind(base64.StdEncoding.EncodeToString(v)) + } + pt = bytesType() + case [][]byte: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(bytesType()) + case int: + pb.Kind = stringKind(strconv.FormatInt(int64(v), 10)) + pt = intType() + case []int: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(intType()) + case int64: + pb.Kind = stringKind(strconv.FormatInt(v, 10)) + pt = intType() + case []int64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(intType()) + case NullInt64: + if v.Valid { + return encodeValue(v.Int64) + } + pt = intType() + case []NullInt64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(intType()) + case bool: + pb.Kind = &proto3.Value_BoolValue{BoolValue: v} + pt = boolType() + case []bool: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(boolType()) + case NullBool: + if v.Valid { + return encodeValue(v.Bool) + } + pt = boolType() + case []NullBool: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(boolType()) + case float64: + pb.Kind = &proto3.Value_NumberValue{NumberValue: v} + pt = floatType() + case []float64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(floatType()) + case NullFloat64: + if v.Valid { + return encodeValue(v.Float64) + } + pt = floatType() + case []NullFloat64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(floatType()) + case time.Time: + if v == commitTimestamp { + pb.Kind = stringKind(commitTimestampPlaceholderString) + } else { + pb.Kind = stringKind(v.UTC().Format(time.RFC3339Nano)) + } + pt = timeType() + case []time.Time: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(timeType()) + case NullTime: + if v.Valid { + return encodeValue(v.Time) + } + pt = timeType() + case []NullTime: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(timeType()) + case civil.Date: + pb.Kind = stringKind(v.String()) + pt = dateType() + case []civil.Date: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(dateType()) + case NullDate: + if v.Valid { + return encodeValue(v.Date) + } + pt = dateType() + case []NullDate: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(dateType()) + case GenericColumnValue: + // Deep clone to ensure subsequent changes to v before + // transmission don't affect our encoded value. + pb = proto.Clone(v.Value).(*proto3.Value) + pt = proto.Clone(v.Type).(*sppb.Type) + default: + return nil, nil, errEncoderUnsupportedType(v) + } + return pb, pt, nil +} + +// encodeValueArray encodes a Value array into a proto3.ListValue. +func encodeValueArray(vs []interface{}) (*proto3.ListValue, error) { + lv := &proto3.ListValue{} + lv.Values = make([]*proto3.Value, 0, len(vs)) + for _, v := range vs { + pb, _, err := encodeValue(v) + if err != nil { + return nil, err + } + lv.Values = append(lv.Values, pb) + } + return lv, nil +} + +// encodeArray assumes that all values of the array element type encode without error. +func encodeArray(len int, at func(int) interface{}) (*proto3.Value, error) { + vs := make([]*proto3.Value, len) + var err error + for i := 0; i < len; i++ { + vs[i], _, err = encodeValue(at(i)) + if err != nil { + return nil, err + } + } + return listProto(vs...), nil +} + +func spannerTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + if s := t.Get("spanner"); s != "" { + if s == "-" { + return "", false, nil, nil + } + return s, true, nil, nil + } + return "", true, nil, nil +} + +var fieldCache = fields.NewCache(spannerTagParser, nil, nil) diff --git a/vendor/cloud.google.com/go/spanner/value_benchmarks_test.go b/vendor/cloud.google.com/go/spanner/value_benchmarks_test.go new file mode 100644 index 0000000..0d95ab9 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value_benchmarks_test.go @@ -0,0 +1,214 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package spanner + +import ( + "reflect" + "strconv" + "testing" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +func BenchmarkEncodeIntArray(b *testing.B) { + for _, s := range []struct { + name string + f func(a []int) (*proto3.Value, *sppb.Type, error) + }{ + {"Orig", encodeIntArrayOrig}, + {"Func", encodeIntArrayFunc}, + {"Reflect", encodeIntArrayReflect}, + } { + b.Run(s.name, func(b *testing.B) { + for _, size := range []int{1, 10, 100, 1000} { + a := make([]int, size) + b.Run(strconv.Itoa(size), func(b *testing.B) { + for i := 0; i < b.N; i++ { + s.f(a) + } + }) + } + }) + } +} + +func encodeIntArrayOrig(a []int) (*proto3.Value, *sppb.Type, error) { + vs := make([]*proto3.Value, len(a)) + var err error + for i := range a { + vs[i], _, err = encodeValue(a[i]) + if err != nil { + return nil, nil, err + } + } + return listProto(vs...), listType(intType()), nil +} + +func encodeIntArrayFunc(a []int) (*proto3.Value, *sppb.Type, error) { + v, err := encodeArray(len(a), func(i int) interface{} { return a[i] }) + if err != nil { + return nil, nil, err + } + return v, listType(intType()), nil +} + +func encodeIntArrayReflect(a []int) (*proto3.Value, *sppb.Type, error) { + v, err := encodeArrayReflect(a) + if err != nil { + return nil, nil, err + } + return v, listType(intType()), nil +} + +func encodeArrayReflect(a interface{}) (*proto3.Value, error) { + va := reflect.ValueOf(a) + len := va.Len() + vs := make([]*proto3.Value, len) + var err error + for i := 0; i < len; i++ { + vs[i], _, err = encodeValue(va.Index(i).Interface()) + if err != nil { + return nil, err + } + } + return listProto(vs...), nil +} + +func BenchmarkDecodeGeneric(b *testing.B) { + v := stringProto("test") + t := stringType() + var g GenericColumnValue + b.ResetTimer() + for i := 0; i < b.N; i++ { + decodeValue(v, t, &g) + } +} + +func BenchmarkDecodeArray(b *testing.B) { + for _, size := range []int{1, 10, 100, 1000} { + vals := make([]*proto3.Value, size) + for i := 0; i < size; i++ { + vals[i] = dateProto(d1) + } + lv := &proto3.ListValue{Values: vals} + b.Run(strconv.Itoa(size), func(b *testing.B) { + for _, s := range []struct { + name string + decode func(*proto3.ListValue) + }{ + {"DateDirect", decodeArray_Date_direct}, + {"DateFunc", decodeArray_Date_func}, + {"DateReflect", decodeArray_Date_reflect}, + {"StringDecodeStringArray", decodeStringArrayWrap}, + {"StringDirect", decodeArray_String_direct}, + {"StringFunc", decodeArray_String_func}, + {"StringReflect", decodeArray_String_reflect}, + } { + b.Run(s.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + s.decode(lv) + } + }) + } + }) + + } +} + +func decodeArray_Date_direct(pb *proto3.ListValue) { + a := make([]civil.Date, len(pb.Values)) + t := dateType() + for i, v := range pb.Values { + if err := decodeValue(v, t, &a[i]); err != nil { + panic(err) + } + } +} + +func decodeArray_Date_func(pb *proto3.ListValue) { + a := make([]civil.Date, len(pb.Values)) + if err := decodeArray_func(pb, "DATE", dateType(), func(i int) interface{} { return &a[i] }); err != nil { + panic(err) + } +} + +func decodeArray_Date_reflect(pb *proto3.ListValue) { + var a []civil.Date + if err := decodeArray_reflect(pb, "DATE", dateType(), &a); err != nil { + panic(err) + } +} + +func decodeStringArrayWrap(pb *proto3.ListValue) { + if _, err := decodeStringArray(pb); err != nil { + panic(err) + } +} + +func decodeArray_String_direct(pb *proto3.ListValue) { + a := make([]string, len(pb.Values)) + t := stringType() + for i, v := range pb.Values { + if err := decodeValue(v, t, &a[i]); err != nil { + panic(err) + } + } +} + +func decodeArray_String_func(pb *proto3.ListValue) { + + a := make([]string, len(pb.Values)) + if err := decodeArray_func(pb, "STRING", stringType(), func(i int) interface{} { return &a[i] }); err != nil { + panic(err) + } +} + +func decodeArray_String_reflect(pb *proto3.ListValue) { + var a []string + if err := decodeArray_reflect(pb, "STRING", stringType(), &a); err != nil { + panic(err) + } +} + +func decodeArray_func(pb *proto3.ListValue, name string, typ *sppb.Type, elptr func(int) interface{}) error { + if pb == nil { + return errNilListValue(name) + } + for i, v := range pb.Values { + if err := decodeValue(v, typ, elptr(i)); err != nil { + return errDecodeArrayElement(i, v, name, err) + } + } + return nil +} + +func decodeArray_reflect(pb *proto3.ListValue, name string, typ *sppb.Type, aptr interface{}) error { + if pb == nil { + return errNilListValue(name) + } + av := reflect.ValueOf(aptr).Elem() + av.Set(reflect.MakeSlice(av.Type(), len(pb.Values), len(pb.Values))) + for i, v := range pb.Values { + if err := decodeValue(v, typ, av.Index(i).Addr().Interface()); err != nil { + av.Set(reflect.Zero(av.Type())) // reset slice to nil + return errDecodeArrayElement(i, v, name, err) + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/spanner/value_test.go b/vendor/cloud.google.com/go/spanner/value_test.go new file mode 100644 index 0000000..2e14f88 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value_test.go @@ -0,0 +1,522 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +var ( + t1 = mustParseTime("2016-11-15T15:04:05.999999999Z") + // Boundaries + t2 = mustParseTime("0000-01-01T00:00:00.000000000Z") + t3 = mustParseTime("9999-12-31T23:59:59.999999999Z") + // Local timezone + t4 = time.Now() + d1 = mustParseDate("2016-11-15") + d2 = mustParseDate("1678-01-01") +) + +func mustParseTime(s string) time.Time { + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + panic(err) + } + return t +} + +func mustParseDate(s string) civil.Date { + d, err := civil.ParseDate(s) + if err != nil { + panic(err) + } + return d +} + +// Test encoding Values. +func TestEncodeValue(t *testing.T) { + var ( + tString = stringType() + tInt = intType() + tBool = boolType() + tFloat = floatType() + tBytes = bytesType() + tTime = timeType() + tDate = dateType() + ) + for i, test := range []struct { + in interface{} + want *proto3.Value + wantType *sppb.Type + }{ + // STRING / STRING ARRAY + {"abc", stringProto("abc"), tString}, + {NullString{"abc", true}, stringProto("abc"), tString}, + {NullString{"abc", false}, nullProto(), tString}, + {[]string(nil), nullProto(), listType(tString)}, + {[]string{"abc", "bcd"}, listProto(stringProto("abc"), stringProto("bcd")), listType(tString)}, + {[]NullString{{"abcd", true}, {"xyz", false}}, listProto(stringProto("abcd"), nullProto()), listType(tString)}, + // BYTES / BYTES ARRAY + {[]byte("foo"), bytesProto([]byte("foo")), tBytes}, + {[]byte(nil), nullProto(), tBytes}, + {[][]byte{nil, []byte("ab")}, listProto(nullProto(), bytesProto([]byte("ab"))), listType(tBytes)}, + {[][]byte(nil), nullProto(), listType(tBytes)}, + // INT64 / INT64 ARRAY + {7, intProto(7), tInt}, + {[]int(nil), nullProto(), listType(tInt)}, + {[]int{31, 127}, listProto(intProto(31), intProto(127)), listType(tInt)}, + {int64(81), intProto(81), tInt}, + {[]int64(nil), nullProto(), listType(tInt)}, + {[]int64{33, 129}, listProto(intProto(33), intProto(129)), listType(tInt)}, + {NullInt64{11, true}, intProto(11), tInt}, + {NullInt64{11, false}, nullProto(), tInt}, + {[]NullInt64{{35, true}, {131, false}}, listProto(intProto(35), nullProto()), listType(tInt)}, + // BOOL / BOOL ARRAY + {true, boolProto(true), tBool}, + {NullBool{true, true}, boolProto(true), tBool}, + {NullBool{true, false}, nullProto(), tBool}, + {[]bool{true, false}, listProto(boolProto(true), boolProto(false)), listType(tBool)}, + {[]NullBool{{true, true}, {true, false}}, listProto(boolProto(true), nullProto()), listType(tBool)}, + // FLOAT64 / FLOAT64 ARRAY + {3.14, floatProto(3.14), tFloat}, + {NullFloat64{3.1415, true}, floatProto(3.1415), tFloat}, + {NullFloat64{math.Inf(1), true}, floatProto(math.Inf(1)), tFloat}, + {NullFloat64{3.14159, false}, nullProto(), tFloat}, + {[]float64(nil), nullProto(), listType(tFloat)}, + {[]float64{3.141, 0.618, math.Inf(-1)}, listProto(floatProto(3.141), floatProto(0.618), floatProto(math.Inf(-1))), listType(tFloat)}, + {[]NullFloat64{{3.141, true}, {0.618, false}}, listProto(floatProto(3.141), nullProto()), listType(tFloat)}, + // TIMESTAMP / TIMESTAMP ARRAY + {t1, timeProto(t1), tTime}, + {NullTime{t1, true}, timeProto(t1), tTime}, + {NullTime{t1, false}, nullProto(), tTime}, + {[]time.Time(nil), nullProto(), listType(tTime)}, + {[]time.Time{t1, t2, t3, t4}, listProto(timeProto(t1), timeProto(t2), timeProto(t3), timeProto(t4)), listType(tTime)}, + {[]NullTime{{t1, true}, {t1, false}}, listProto(timeProto(t1), nullProto()), listType(tTime)}, + // DATE / DATE ARRAY + {d1, dateProto(d1), tDate}, + {NullDate{d1, true}, dateProto(d1), tDate}, + {NullDate{civil.Date{}, false}, nullProto(), tDate}, + {[]civil.Date(nil), nullProto(), listType(tDate)}, + {[]civil.Date{d1, d2}, listProto(dateProto(d1), dateProto(d2)), listType(tDate)}, + {[]NullDate{{d1, true}, {civil.Date{}, false}}, listProto(dateProto(d1), nullProto()), listType(tDate)}, + // GenericColumnValue + {GenericColumnValue{tString, stringProto("abc")}, stringProto("abc"), tString}, + {GenericColumnValue{tString, nullProto()}, nullProto(), tString}, + // not actually valid (stringProto inside int list), but demonstrates pass-through. + { + GenericColumnValue{ + Type: listType(tInt), + Value: listProto(intProto(5), nullProto(), stringProto("bcd")), + }, + listProto(intProto(5), nullProto(), stringProto("bcd")), + listType(tInt), + }, + // placeholder + {CommitTimestamp, stringProto(commitTimestampPlaceholderString), tTime}, + } { + got, gotType, err := encodeValue(test.in) + if err != nil { + t.Fatalf("#%d: got error during encoding: %v, want nil", i, err) + } + if !testEqual(got, test.want) { + t.Errorf("#%d: got encode result: %v, want %v", i, got, test.want) + } + if !testEqual(gotType, test.wantType) { + t.Errorf("#%d: got encode type: %v, want %v", i, gotType, test.wantType) + } + } +} + +// Test decoding Values. +func TestDecodeValue(t *testing.T) { + for i, test := range []struct { + in *proto3.Value + t *sppb.Type + want interface{} + fail bool + }{ + // STRING + {stringProto("abc"), stringType(), "abc", false}, + {nullProto(), stringType(), "abc", true}, + {stringProto("abc"), stringType(), NullString{"abc", true}, false}, + {nullProto(), stringType(), NullString{}, false}, + // STRING ARRAY with []NullString + { + listProto(stringProto("abc"), nullProto(), stringProto("bcd")), + listType(stringType()), + []NullString{{"abc", true}, {}, {"bcd", true}}, + false, + }, + {nullProto(), listType(stringType()), []NullString(nil), false}, + // STRING ARRAY with []string + { + listProto(stringProto("abc"), stringProto("bcd")), + listType(stringType()), + []string{"abc", "bcd"}, + false, + }, + // BYTES + {bytesProto([]byte("ab")), bytesType(), []byte("ab"), false}, + {nullProto(), bytesType(), []byte(nil), false}, + // BYTES ARRAY + {listProto(bytesProto([]byte("ab")), nullProto()), listType(bytesType()), [][]byte{[]byte("ab"), nil}, false}, + {nullProto(), listType(bytesType()), [][]byte(nil), false}, + //INT64 + {intProto(15), intType(), int64(15), false}, + {nullProto(), intType(), int64(0), true}, + {intProto(15), intType(), NullInt64{15, true}, false}, + {nullProto(), intType(), NullInt64{}, false}, + // INT64 ARRAY with []NullInt64 + {listProto(intProto(91), nullProto(), intProto(87)), listType(intType()), []NullInt64{{91, true}, {}, {87, true}}, false}, + {nullProto(), listType(intType()), []NullInt64(nil), false}, + // INT64 ARRAY with []int64 + {listProto(intProto(91), intProto(87)), listType(intType()), []int64{91, 87}, false}, + // BOOL + {boolProto(true), boolType(), true, false}, + {nullProto(), boolType(), true, true}, + {boolProto(true), boolType(), NullBool{true, true}, false}, + {nullProto(), boolType(), NullBool{}, false}, + // BOOL ARRAY with []NullBool + {listProto(boolProto(true), boolProto(false), nullProto()), listType(boolType()), []NullBool{{true, true}, {false, true}, {}}, false}, + {nullProto(), listType(boolType()), []NullBool(nil), false}, + // BOOL ARRAY with []bool + {listProto(boolProto(true), boolProto(false)), listType(boolType()), []bool{true, false}, false}, + // FLOAT64 + {floatProto(3.14), floatType(), 3.14, false}, + {nullProto(), floatType(), 0.00, true}, + {floatProto(3.14), floatType(), NullFloat64{3.14, true}, false}, + {nullProto(), floatType(), NullFloat64{}, false}, + // FLOAT64 ARRAY with []NullFloat64 + { + listProto(floatProto(math.Inf(1)), floatProto(math.Inf(-1)), nullProto(), floatProto(3.1)), + listType(floatType()), + []NullFloat64{{math.Inf(1), true}, {math.Inf(-1), true}, {}, {3.1, true}}, + false, + }, + {nullProto(), listType(floatType()), []NullFloat64(nil), false}, + // FLOAT64 ARRAY with []float64 + { + listProto(floatProto(math.Inf(1)), floatProto(math.Inf(-1)), floatProto(3.1)), + listType(floatType()), + []float64{math.Inf(1), math.Inf(-1), 3.1}, + false, + }, + // TIMESTAMP + {timeProto(t1), timeType(), t1, false}, + {timeProto(t1), timeType(), NullTime{t1, true}, false}, + {nullProto(), timeType(), NullTime{}, false}, + // TIMESTAMP ARRAY with []NullTime + {listProto(timeProto(t1), timeProto(t2), timeProto(t3), nullProto()), listType(timeType()), []NullTime{{t1, true}, {t2, true}, {t3, true}, {}}, false}, + {nullProto(), listType(timeType()), []NullTime(nil), false}, + // TIMESTAMP ARRAY with []time.Time + {listProto(timeProto(t1), timeProto(t2), timeProto(t3)), listType(timeType()), []time.Time{t1, t2, t3}, false}, + // DATE + {dateProto(d1), dateType(), d1, false}, + {dateProto(d1), dateType(), NullDate{d1, true}, false}, + {nullProto(), dateType(), NullDate{}, false}, + // DATE ARRAY with []NullDate + {listProto(dateProto(d1), dateProto(d2), nullProto()), listType(dateType()), []NullDate{{d1, true}, {d2, true}, {}}, false}, + {nullProto(), listType(dateType()), []NullDate(nil), false}, + // DATE ARRAY with []civil.Date + {listProto(dateProto(d1), dateProto(d2)), listType(dateType()), []civil.Date{d1, d2}, false}, + // STRUCT ARRAY + // STRUCT schema is equal to the following Go struct: + // type s struct { + // Col1 NullInt64 + // Col2 []struct { + // SubCol1 float64 + // SubCol2 string + // } + // } + { + in: listProto( + listProto( + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + ), + listProto( + nullProto(), + nullProto(), + ), + nullProto(), + ), + t: listType( + structType( + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + ), + ), + want: []NullRow{ + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + }, + vals: []*proto3.Value{ + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + }, + }, + Valid: true, + }, + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + }, + vals: []*proto3.Value{ + nullProto(), + nullProto(), + }, + }, + Valid: true, + }, + {}, + }, + fail: false, + }, + { + in: listProto( + listProto( + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + ), + listProto( + nullProto(), + nullProto(), + ), + nullProto(), + ), + t: listType( + structType( + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + ), + ), + want: []*struct { + Col1 NullInt64 + StructCol []*struct { + SubCol1 NullFloat64 + SubCol2 string + } `spanner:"Col2"` + }{ + { + Col1: NullInt64{3, true}, + StructCol: []*struct { + SubCol1 NullFloat64 + SubCol2 string + }{ + { + SubCol1: NullFloat64{3.14, true}, + SubCol2: "this", + }, + { + SubCol1: NullFloat64{0.57, true}, + SubCol2: "siht", + }, + }, + }, + { + Col1: NullInt64{}, + StructCol: []*struct { + SubCol1 NullFloat64 + SubCol2 string + }(nil), + }, + nil, + }, + fail: false, + }, + // GenericColumnValue + {stringProto("abc"), stringType(), GenericColumnValue{stringType(), stringProto("abc")}, false}, + {nullProto(), stringType(), GenericColumnValue{stringType(), nullProto()}, false}, + // not actually valid (stringProto inside int list), but demonstrates pass-through. + { + in: listProto(intProto(5), nullProto(), stringProto("bcd")), + t: listType(intType()), + want: GenericColumnValue{ + Type: listType(intType()), + Value: listProto(intProto(5), nullProto(), stringProto("bcd")), + }, + fail: false, + }, + } { + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := decodeValue(test.in, test.t, gotp.Interface()); err != nil { + if !test.fail { + t.Errorf("%d: cannot decode %v(%v): %v", i, test.in, test.t, err) + } + continue + } + if test.fail { + t.Errorf("%d: decoding %v(%v) succeeds unexpectedly, want error", i, test.in, test.t) + continue + } + got := reflect.Indirect(gotp).Interface() + if !testEqual(got, test.want) { + t.Errorf("%d: unexpected decoding result - got %v, want %v", i, got, test.want) + continue + } + } +} + +// Test error cases for decodeValue. +func TestDecodeValueErrors(t *testing.T) { + for i, test := range []struct { + in *proto3.Value + t *sppb.Type + v interface{} + }{ + {nullProto(), stringType(), nil}, + {nullProto(), stringType(), 1}, + } { + err := decodeValue(test.in, test.t, test.v) + if err == nil { + t.Errorf("#%d: want error, got nil", i) + } + } +} + +// Test NaN encoding/decoding. +func TestNaN(t *testing.T) { + // Decode NaN value. + f := 0.0 + nf := NullFloat64{} + // To float64 + if err := decodeValue(floatProto(math.NaN()), floatType(), &f); err != nil { + t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) + } + if !math.IsNaN(f) { + t.Errorf("f = %v, want %v", f, math.NaN()) + } + // To NullFloat64 + if err := decodeValue(floatProto(math.NaN()), floatType(), &nf); err != nil { + t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) + } + if !math.IsNaN(nf.Float64) || !nf.Valid { + t.Errorf("f = %v, want %v", f, NullFloat64{math.NaN(), true}) + } + // Encode NaN value + // From float64 + v, _, err := encodeValue(math.NaN()) + if err != nil { + t.Errorf("encodeValue returns %q for NaN, want nil", err) + } + x, ok := v.GetKind().(*proto3.Value_NumberValue) + if !ok { + t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) + } + if !math.IsNaN(x.NumberValue) { + t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) + } + // From NullFloat64 + v, _, err = encodeValue(NullFloat64{math.NaN(), true}) + if err != nil { + t.Errorf("encodeValue returns %q for NaN, want nil", err) + } + x, ok = v.GetKind().(*proto3.Value_NumberValue) + if !ok { + t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) + } + if !math.IsNaN(x.NumberValue) { + t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) + } +} + +func TestGenericColumnValue(t *testing.T) { + for _, test := range []struct { + in GenericColumnValue + want interface{} + fail bool + }{ + {GenericColumnValue{stringType(), stringProto("abc")}, "abc", false}, + {GenericColumnValue{stringType(), stringProto("abc")}, 5, true}, + {GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}, []NullInt64{{91, true}, {}, {87, true}}, false}, + {GenericColumnValue{intType(), intProto(42)}, GenericColumnValue{intType(), intProto(42)}, false}, // trippy! :-) + } { + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := test.in.Decode(gotp.Interface()); err != nil { + if !test.fail { + t.Errorf("cannot decode %v to %v: %v", test.in, test.want, err) + } + continue + } + if test.fail { + t.Errorf("decoding %v to %v succeeds unexpectedly", test.in, test.want) + } + + // Test we can go backwards as well. + v, err := newGenericColumnValue(test.want) + if err != nil { + t.Errorf("NewGenericColumnValue failed: %v", err) + continue + } + if !testEqual(*v, test.in) { + t.Errorf("unexpected encode result - got %v, want %v", v, test.in) + } + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/Recognize_smoke_test.go b/vendor/cloud.google.com/go/speech/apiv1/Recognize_smoke_test.go new file mode 100644 index 0000000..4b95cec --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/Recognize_smoke_test.go @@ -0,0 +1,80 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestSpeechSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var languageCode string = "en-US" + var sampleRateHertz int32 = 44100 + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var config = &speechpb.RecognitionConfig{ + LanguageCode: languageCode, + SampleRateHertz: sampleRateHertz, + Encoding: encoding, + } + var uri string = "gs://gapic-toolkit/hello.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.RecognizeRequest{ + Config: config, + Audio: audio, + } + + if _, err := c.Recognize(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/doc.go b/vendor/cloud.google.com/go/speech/apiv1/doc.go new file mode 100644 index 0000000..f6e6d18 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/doc.go @@ -0,0 +1,45 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package speech is an auto-generated package for the +// Google Cloud Speech API. + +// +// Google Cloud Speech API. +package speech // import "cloud.google.com/go/speech/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/mock_test.go b/vendor/cloud.google.com/go/speech/apiv1/mock_test.go new file mode 100644 index 0000000..389aafe --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/mock_test.go @@ -0,0 +1,405 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockSpeechServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + speechpb.SpeechServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSpeechServer) Recognize(ctx context.Context, req *speechpb.RecognizeRequest) (*speechpb.RecognizeResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*speechpb.RecognizeResponse), nil +} + +func (s *mockSpeechServer) LongRunningRecognize(ctx context.Context, req *speechpb.LongRunningRecognizeRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockSpeechServer) StreamingRecognize(stream speechpb.Speech_StreamingRecognizeServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*speechpb.StreamingRecognizeResponse)); err != nil { + return err + } + } + return nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockSpeech mockSpeechServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + speechpb.RegisterSpeechServer(serv, &mockSpeech) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestSpeechRecognize(t *testing.T) { + var expectedResponse *speechpb.RecognizeResponse = &speechpb.RecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRateHertz int32 = 44100 + var languageCode string = "en-US" + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRateHertz: sampleRateHertz, + LanguageCode: languageCode, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.RecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Recognize(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = gstatus.Error(errCode, "test error") + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRateHertz int32 = 44100 + var languageCode string = "en-US" + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRateHertz: sampleRateHertz, + LanguageCode: languageCode, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.RecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Recognize(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechLongRunningRecognize(t *testing.T) { + var expectedResponse *speechpb.LongRunningRecognizeResponse = &speechpb.LongRunningRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRateHertz int32 = 44100 + var languageCode string = "en-US" + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRateHertz: sampleRateHertz, + LanguageCode: languageCode, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.LongRunningRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.LongRunningRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechLongRunningRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = nil + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRateHertz int32 = 44100 + var languageCode string = "en-US" + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRateHertz: sampleRateHertz, + LanguageCode: languageCode, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.LongRunningRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.LongRunningRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechStreamingRecognize(t *testing.T) { + var expectedResponse *speechpb.StreamingRecognizeResponse = &speechpb.StreamingRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechStreamingRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = gstatus.Error(errCode, "test error") + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/speech_client.go b/vendor/cloud.google.com/go/speech/apiv1/speech_client.go new file mode 100644 index 0000000..9e071e1 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/speech_client.go @@ -0,0 +1,263 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + Recognize []gax.CallOption + LongRunningRecognize []gax.CallOption + StreamingRecognize []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("speech.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + Recognize: retry[[2]string{"default", "idempotent"}], + LongRunningRecognize: retry[[2]string{"default", "non_idempotent"}], + StreamingRecognize: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Speech API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client speechpb.SpeechClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new speech client. +// +// Service that implements Google Cloud Speech API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: speechpb.NewSpeechClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Recognize performs synchronous speech recognition: receive results after all audio +// has been sent and processed. +func (c *Client) Recognize(ctx context.Context, req *speechpb.RecognizeRequest, opts ...gax.CallOption) (*speechpb.RecognizeResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Recognize[0:len(c.CallOptions.Recognize):len(c.CallOptions.Recognize)], opts...) + var resp *speechpb.RecognizeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Recognize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// LongRunningRecognize performs asynchronous speech recognition: receive results via the +// google.longrunning.Operations interface. Returns either an +// Operation.error or an Operation.response which contains +// a LongRunningRecognizeResponse message. +func (c *Client) LongRunningRecognize(ctx context.Context, req *speechpb.LongRunningRecognizeRequest, opts ...gax.CallOption) (*LongRunningRecognizeOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.LongRunningRecognize[0:len(c.CallOptions.LongRunningRecognize):len(c.CallOptions.LongRunningRecognize)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.LongRunningRecognize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &LongRunningRecognizeOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// StreamingRecognize performs bidirectional streaming speech recognition: receive results while +// sending audio. This method is only available via the gRPC API (not REST). +func (c *Client) StreamingRecognize(ctx context.Context, opts ...gax.CallOption) (speechpb.Speech_StreamingRecognizeClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingRecognize[0:len(c.CallOptions.StreamingRecognize):len(c.CallOptions.StreamingRecognize)], opts...) + var resp speechpb.Speech_StreamingRecognizeClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StreamingRecognize(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// LongRunningRecognizeOperation manages a long-running operation from LongRunningRecognize. +type LongRunningRecognizeOperation struct { + lro *longrunning.Operation +} + +// LongRunningRecognizeOperation returns a new LongRunningRecognizeOperation from a given name. +// The name must be that of a previously created LongRunningRecognizeOperation, possibly from a different process. +func (c *Client) LongRunningRecognizeOperation(name string) *LongRunningRecognizeOperation { + return &LongRunningRecognizeOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *LongRunningRecognizeOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*speechpb.LongRunningRecognizeResponse, error) { + var resp speechpb.LongRunningRecognizeResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *LongRunningRecognizeOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*speechpb.LongRunningRecognizeResponse, error) { + var resp speechpb.LongRunningRecognizeResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *LongRunningRecognizeOperation) Metadata() (*speechpb.LongRunningRecognizeMetadata, error) { + var meta speechpb.LongRunningRecognizeMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *LongRunningRecognizeOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *LongRunningRecognizeOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/speech_client_example_test.go b/vendor/cloud.google.com/go/speech/apiv1/speech_client_example_test.go new file mode 100644 index 0000000..1377f42 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/speech_client_example_test.go @@ -0,0 +1,110 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech_test + +import ( + "io" + + "cloud.google.com/go/speech/apiv1" + "golang.org/x/net/context" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_Recognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.RecognizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Recognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_LongRunningRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.LongRunningRecognizeRequest{ + // TODO: Fill request struct fields. + } + op, err := c.LongRunningRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_StreamingRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.StreamingRecognize(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*speechpb.StreamingRecognizeRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/SyncRecognize_smoke_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/SyncRecognize_smoke_test.go new file mode 100644 index 0000000..8277d7f --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/SyncRecognize_smoke_test.go @@ -0,0 +1,80 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestSpeechSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var languageCode string = "en-US" + var sampleRate int32 = 44100 + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var config = &speechpb.RecognitionConfig{ + LanguageCode: languageCode, + SampleRate: sampleRate, + Encoding: encoding, + } + var uri string = "gs://gapic-toolkit/hello.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.SyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + if _, err := c.SyncRecognize(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go new file mode 100644 index 0000000..f136941 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package speech is an auto-generated package for the +// Google Cloud Speech API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Google Cloud Speech API. +package speech // import "cloud.google.com/go/speech/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go new file mode 100644 index 0000000..627e907 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go @@ -0,0 +1,400 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockSpeechServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + speechpb.SpeechServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSpeechServer) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest) (*speechpb.SyncRecognizeResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*speechpb.SyncRecognizeResponse), nil +} + +func (s *mockSpeechServer) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockSpeechServer) StreamingRecognize(stream speechpb.Speech_StreamingRecognizeServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*speechpb.StreamingRecognizeResponse)); err != nil { + return err + } + } + return nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockSpeech mockSpeechServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + speechpb.RegisterSpeechServer(serv, &mockSpeech) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestSpeechSyncRecognize(t *testing.T) { + var expectedResponse *speechpb.SyncRecognizeResponse = &speechpb.SyncRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.SyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SyncRecognize(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechSyncRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = gstatus.Error(errCode, "test error") + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.SyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SyncRecognize(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechAsyncRecognize(t *testing.T) { + var expectedResponse *speechpb.AsyncRecognizeResponse = &speechpb.AsyncRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.AsyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AsyncRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechAsyncRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = nil + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.AsyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AsyncRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechStreamingRecognize(t *testing.T) { + var resultIndex int32 = 520358448 + var expectedResponse = &speechpb.StreamingRecognizeResponse{ + ResultIndex: resultIndex, + } + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechStreamingRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = gstatus.Error(errCode, "test error") + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go new file mode 100644 index 0000000..02f267f --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go @@ -0,0 +1,265 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + SyncRecognize []gax.CallOption + AsyncRecognize []gax.CallOption + StreamingRecognize []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("speech.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + SyncRecognize: retry[[2]string{"default", "idempotent"}], + AsyncRecognize: retry[[2]string{"default", "idempotent"}], + StreamingRecognize: retry[[2]string{"default", "non_idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Speech API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client speechpb.SpeechClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new speech client. +// +// Service that implements Google Cloud Speech API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: speechpb.NewSpeechClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// SyncRecognize performs synchronous speech recognition: receive results after all audio +// has been sent and processed. +func (c *Client) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest, opts ...gax.CallOption) (*speechpb.SyncRecognizeResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SyncRecognize[0:len(c.CallOptions.SyncRecognize):len(c.CallOptions.SyncRecognize)], opts...) + var resp *speechpb.SyncRecognizeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.SyncRecognize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsyncRecognize performs asynchronous speech recognition: receive results via the +// [google.longrunning.Operations] +// (/speech/reference/rest/v1beta1/operations#Operation) +// interface. Returns either an +// Operation.error or an Operation.response which contains +// an AsyncRecognizeResponse message. +func (c *Client) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest, opts ...gax.CallOption) (*AsyncRecognizeOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AsyncRecognize[0:len(c.CallOptions.AsyncRecognize):len(c.CallOptions.AsyncRecognize)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AsyncRecognize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AsyncRecognizeOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// StreamingRecognize performs bidirectional streaming speech recognition: receive results while +// sending audio. This method is only available via the gRPC API (not REST). +func (c *Client) StreamingRecognize(ctx context.Context, opts ...gax.CallOption) (speechpb.Speech_StreamingRecognizeClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingRecognize[0:len(c.CallOptions.StreamingRecognize):len(c.CallOptions.StreamingRecognize)], opts...) + var resp speechpb.Speech_StreamingRecognizeClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StreamingRecognize(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsyncRecognizeOperation manages a long-running operation from AsyncRecognize. +type AsyncRecognizeOperation struct { + lro *longrunning.Operation +} + +// AsyncRecognizeOperation returns a new AsyncRecognizeOperation from a given name. +// The name must be that of a previously created AsyncRecognizeOperation, possibly from a different process. +func (c *Client) AsyncRecognizeOperation(name string) *AsyncRecognizeOperation { + return &AsyncRecognizeOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AsyncRecognizeOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*speechpb.AsyncRecognizeResponse, error) { + var resp speechpb.AsyncRecognizeResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AsyncRecognizeOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*speechpb.AsyncRecognizeResponse, error) { + var resp speechpb.AsyncRecognizeResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AsyncRecognizeOperation) Metadata() (*speechpb.AsyncRecognizeMetadata, error) { + var meta speechpb.AsyncRecognizeMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AsyncRecognizeOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AsyncRecognizeOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go new file mode 100644 index 0000000..8627565 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go @@ -0,0 +1,110 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech_test + +import ( + "io" + + "cloud.google.com/go/speech/apiv1beta1" + "golang.org/x/net/context" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_SyncRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.SyncRecognizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SyncRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AsyncRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.AsyncRecognizeRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AsyncRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_StreamingRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.StreamingRecognize(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*speechpb.StreamingRecognizeRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 0000000..829aa90 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,245 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "net/http" + "reflect" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + Role ACLRole +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool + userProject string // for requester-pays buckets +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the permission level for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectSet(ctx, entity, role, false) + } + if a.isDefault { + return a.objectSet(ctx, entity, role, true) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) + a.configureCall(req, ctx) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(req, ctx) + return req.Do() + }) +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.BucketAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.List(a.bucket) + a.configureCall(req, ctx) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + r := make([]ACLRule, len(acls.Items)) + for i, v := range acls.Items { + r[i].Entity = ACLEntity(v.Entity) + r[i].Role = ACLRole(v.Role) + } + return r, nil +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) + a.configureCall(req, ctx) + _, err := req.Do() + return err + }) + if err != nil { + return err + } + return nil +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(req, ctx) + return req.Do() + }) + if err != nil { + return err + } + return nil +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) + a.configureCall(req, ctx) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + if isBucketDefault { + req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) + } else { + req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + } + a.configureCall(req, ctx) + return runWithRetry(ctx, func() error { + _, err := req.Do() + return err + }) +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) + a.configureCall(req, ctx) + return req.Do() + }) +} + +func (a *ACLHandle) configureCall(call interface { + Header() http.Header +}, ctx context.Context) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if a.userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) + } + setClientHeader(call.Header()) +} + +func toACLRules(items []*raw.ObjectAccessControl) []ACLRule { + r := make([]ACLRule, 0, len(items)) + for _, item := range items { + r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)}) + } + return r +} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 0000000..41e5d42 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,944 @@ +// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "net/http" + "reflect" + "time" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + c *Client + name string + acl ACLHandle + defaultObjectACL ACLHandle + conds *BucketConditions + userProject string // project for Requester Pays buckets +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + return &BucketHandle{ + c: c, + name: name, + acl: ACLHandle{ + c: c, + bucket: name, + }, + defaultObjectACL: ACLHandle{ + c: c, + bucket: name, + isDefault: true, + }, + } +} + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if bkt.Location == "" && bkt.Lifecycle != nil { + bkt.Location = "US" + } + req := b.c.raw.Buckets.Insert(projectID, bkt) + setClientHeader(req.Header()) + return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newDeleteCall() + if err != nil { + return err + } + return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) +} + +func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { + req := b.c.raw.Buckets.Delete(b.name) + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (b *BucketHandle) ACL() *ACLHandle { + return &b.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (b *BucketHandle) DefaultObjectACL() *ACLHandle { + return &b.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + userProject: b.userProject, + }, + gen: -1, + userProject: b.userProject, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newGetCall() + if err != nil { + return nil, err + } + var resp *raw.Bucket + err = runWithRetry(ctx, func() error { + resp, err = req.Context(ctx).Do() + return err + }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp) +} + +func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { + req := b.c.raw.Buckets.Get(b.name).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newPatchCall(&uattrs) + if err != nil { + return nil, err + } + // TODO(jba): retry iff metagen is set? + rb, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + return newBucket(rb) +} + +func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { + rb := uattrs.toRawBucket() + req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +// Read-only fields are ignored by BucketHandle.Create. +type BucketAttrs struct { + // Name is the name of the bucket. + // This field is read-only. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + // This field is read-only. + MetaGeneration int64 + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "MULTI_REGIONAL", + // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which + // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on + // the bucket's location settings. + StorageClass string + + // Created is the creation time of the bucket. + // This field is read-only. + Created time.Time + + // VersioningEnabled reports whether this bucket has versioning enabled. + VersioningEnabled bool + + // Labels are the bucket's labels. + Labels map[string]string + + // RequesterPays reports whether the bucket is a Requester Pays bucket. + // Clients performing operations on Requester Pays buckets must provide + // a user project (see BucketHandle.UserProject), which will be billed + // for the operations. + RequesterPays bool + + // Lifecycle is the lifecycle configuration for objects in the bucket. + Lifecycle Lifecycle + + // Retention policy enforces a minimum retention time for all objects + // contained in the bucket. A RetentionPolicy of nil implies the bucket + // has no minimum data retention. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // The bucket's Cross-Origin Resource Sharing (CORS) configuration. + CORS []CORS +} + +// Lifecycle is the lifecycle configuration for objects in the bucket. +type Lifecycle struct { + Rules []LifecycleRule +} + +// Retention policy enforces a minimum retention time for all objects +// contained in the bucket. +// +// Any attempt to overwrite or delete objects younger than the retention +// period will result in an error. An unlocked retention policy can be +// modified or removed from the bucket via the Update method. A +// locked retention policy cannot be removed or shortened in duration +// for the lifetime of the bucket. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +type RetentionPolicy struct { + // RetentionPeriod specifies the duration that objects need to be + // retained. Retention duration must be greater than zero and less than + // 100 years. Note that enforcement of retention periods less than a day + // is not guaranteed. Such periods should only be used for testing + // purposes. + RetentionPeriod time.Duration + + // EffectiveTime is the time from which the policy was enforced and + // effective. This field is read-only. + EffectiveTime time.Time +} + +const ( + // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + rfc3339Date = "2006-01-02" + + // DeleteAction is a lifecycle action that deletes a live and/or archived + // objects. Takes precendence over SetStorageClass actions. + DeleteAction = "Delete" + + // SetStorageClassAction changes the storage class of live and/or archived + // objects. + SetStorageClassAction = "SetStorageClass" +) + +// LifecycleRule is a lifecycle configuration rule. +// +// When all the configured conditions are met by an object in the bucket, the +// configured action will automatically be taken on that object. +type LifecycleRule struct { + // Action is the action to take when all of the associated conditions are + // met. + Action LifecycleAction + + // Condition is the set of conditions that must be met for the associated + // action to be taken. + Condition LifecycleCondition +} + +// LifecycleAction is a lifecycle configuration action. +type LifecycleAction struct { + // Type is the type of action to take on matching objects. + // + // Acceptable values are "Delete" to delete matching objects and + // "SetStorageClass" to set the storage class defined in StorageClass on + // matching objects. + Type string + + // StorageClass is the storage class to set on matching objects if the Action + // is "SetStorageClass". + StorageClass string +} + +// Liveness specifies whether the object is live or not. +type Liveness int + +const ( + // LiveAndArchived includes both live and archived objects. + LiveAndArchived Liveness = iota + // Live specifies that the object is still live. + Live + // Archived specifies that the object is archived. + Archived +) + +// LifecycleCondition is a set of conditions used to match objects and take an +// action automatically. +// +// All configured conditions must be met for the associated action to be taken. +type LifecycleCondition struct { + // AgeInDays is the age of the object in days. + AgeInDays int64 + + // CreatedBefore is the time the object was created. + // + // This condition is satisfied when an object is created before midnight of + // the specified date in UTC. + CreatedBefore time.Time + + // Liveness specifies the object's liveness. Relevant only for versioned objects + Liveness Liveness + + // MatchesStorageClasses is the condition matching the object's storage + // class. + // + // Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", + // "STANDARD", and "DURABLE_REDUCED_AVAILABILITY". + MatchesStorageClasses []string + + // NumNewerVersions is the condition matching objects with a number of newer versions. + // + // If the value is N, this condition is satisfied when there are at least N + // versions (including the live version) newer than this version of the + // object. + NumNewerVersions int64 +} + +func newBucket(b *raw.Bucket) (*BucketAttrs, error) { + if b == nil { + return nil, nil + } + rp, err := toRetentionPolicy(b.RetentionPolicy) + if err != nil { + return nil, err + } + bucket := &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, + Lifecycle: toLifecycle(b.Lifecycle), + RetentionPolicy: rp, + CORS: toCORS(b.Cors), + } + acl := make([]ACLRule, len(b.Acl)) + for i, rule := range b.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.ACL = acl + objACL := make([]ACLRule, len(b.DefaultObjectAcl)) + for i, rule := range b.DefaultObjectAcl { + objACL[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.DefaultObjectACL = objACL + return bucket, nil +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + var acl []*raw.BucketAccessControl + if len(b.ACL) > 0 { + acl = make([]*raw.BucketAccessControl, len(b.ACL)) + for i, rule := range b.ACL { + acl[i] = &raw.BucketAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + dACL := toRawObjectACL(b.DefaultObjectACL) + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *raw.BucketVersioning + if b.VersioningEnabled { + v = &raw.BucketVersioning{Enabled: true} + } + var bb *raw.BucketBilling + if b.RequesterPays { + bb = &raw.BucketBilling{RequesterPays: true} + } + return &raw.Bucket{ + Name: b.Name, + DefaultObjectAcl: dACL, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: acl, + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), + } +} + +// The bucket's Cross-Origin Resource Sharing (CORS) configuration. +type CORS struct { + // MaxAge is the value to return in the Access-Control-Max-Age + // header used in preflight responses. + MaxAge time.Duration + + // Methods is the list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Methods []string + + // Origins is the list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origins []string + + // ResponseHeaders is the list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeaders []string +} + +type BucketAttrsToUpdate struct { + // VersioningEnabled, if set, updates whether the bucket uses versioning. + VersioningEnabled optional.Bool + + // RequesterPays, if set, updates whether the bucket is a Requester Pays bucket. + RequesterPays optional.Bool + + // RetentionPolicy, if set, updates the retention policy of the bucket. Using + // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // CORS, if set, replaces the CORS configuration with a new configuration. + // When an empty slice is provided, all CORS policies are removed; when nil + // is provided, the value is ignored in the update. + CORS []CORS + + setLabels map[string]string + deleteLabels map[string]bool +} + +// SetLabel causes a label to be added or modified when ua is used +// in a call to Bucket.Update. +func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { + if ua.setLabels == nil { + ua.setLabels = map[string]string{} + } + ua.setLabels[name] = value +} + +// DeleteLabel causes a label to be deleted when ua is used in a +// call to Bucket.Update. +func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { + if ua.deleteLabels == nil { + ua.deleteLabels = map[string]bool{} + } + ua.deleteLabels[name] = true +} + +func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { + rb := &raw.Bucket{} + if ua.CORS != nil { + rb.Cors = toRawCORS(ua.CORS) + rb.ForceSendFields = append(rb.ForceSendFields, "Cors") + } + if ua.RetentionPolicy != nil { + if ua.RetentionPolicy.RetentionPeriod == 0 { + rb.NullFields = append(rb.NullFields, "RetentionPolicy") + rb.RetentionPolicy = nil + } else { + rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy() + } + } + if ua.VersioningEnabled != nil { + rb.Versioning = &raw.BucketVersioning{ + Enabled: optional.ToBool(ua.VersioningEnabled), + ForceSendFields: []string{"Enabled"}, + } + } + if ua.RequesterPays != nil { + rb.Billing = &raw.BucketBilling{ + RequesterPays: optional.ToBool(ua.RequesterPays), + ForceSendFields: []string{"RequesterPays"}, + } + } + if ua.setLabels != nil || ua.deleteLabels != nil { + rb.Labels = map[string]string{} + for k, v := range ua.setLabels { + rb.Labels[k] = v + } + if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { + rb.ForceSendFields = append(rb.ForceSendFields, "Labels") + } + for l := range ua.deleteLabels { + rb.NullFields = append(rb.NullFields, "Labels."+l) + } + } + return rb +} + +// If returns a new BucketHandle that applies a set of preconditions. +// Preconditions already set on the BucketHandle are ignored. +// Operations on the new handle will only occur if the preconditions are +// satisfied. The only valid preconditions for buckets are MetagenerationMatch +// and MetagenerationNotMatch. +func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { + b2 := *b + b2.conds = &conds + return &b2 +} + +// BucketConditions constrain bucket methods to act on specific metagenerations. +// +// The zero value is an empty set of constraints. +type BucketConditions struct { + // MetagenerationMatch specifies that the bucket must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the bucket must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *BucketConditions) validate(method string) error { + if *c == (BucketConditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +// UserProject returns a new BucketHandle that passes the project ID as the user +// project for all subsequent calls. Calls with a user project will be billed to that +// project rather than to the bucket's owning project. +// +// A user project is required for all operations on Requester Pays buckets. +func (b *BucketHandle) UserProject(projectID string) *BucketHandle { + b2 := *b + b2.userProject = projectID + b2.acl.userProject = projectID + b2.defaultObjectACL.userProject = projectID + return &b2 +} + +// LockRetentionPolicy locks a bucket's retention policy until a previously-configured +// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less +// than a day, the retention policy is treated as a development configuration and locking +// will have no effect. The BucketHandle must have a metageneration condition that +// matches the bucket's metageneration. See BucketHandle.If. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { + var metageneration int64 + if b.conds != nil { + metageneration = b.conds.MetagenerationMatch + } + req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration) + _, err := req.Context(ctx).Do() + return err +} + +// applyBucketConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + cval := reflect.ValueOf(call) + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { + if rp == nil { + return nil + } + return &raw.BucketRetentionPolicy{ + RetentionPeriod: int64(rp.RetentionPeriod / time.Second), + } +} + +func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { + if rp == nil { + return nil, nil + } + t, err := time.Parse(time.RFC3339, rp.EffectiveTime) + if err != nil { + return nil, err + } + return &RetentionPolicy{ + RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second, + EffectiveTime: t, + }, nil +} + +func toRawCORS(c []CORS) []*raw.BucketCors { + var out []*raw.BucketCors + for _, v := range c { + out = append(out, &raw.BucketCors{ + MaxAgeSeconds: int64(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + +func toCORS(rc []*raw.BucketCors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second, + Methods: v.Method, + Origins: v.Origin, + ResponseHeaders: v.ResponseHeader, + }) + } + return out +} + +func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { + var rl raw.BucketLifecycle + if len(l.Rules) == 0 { + return nil + } + for _, r := range l.Rules { + rr := &raw.BucketLifecycleRule{ + Action: &raw.BucketLifecycleRuleAction{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: r.Condition.AgeInDays, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + NumNewerVersions: r.Condition.NumNewerVersions, + }, + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = googleapi.Bool(true) + case Archived: + rr.Condition.IsLive = googleapi.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.Rule { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.Action.Type, + StorageClass: rr.Action.StorageClass, + }, + Condition: LifecycleCondition{ + AgeInDays: rr.Condition.Age, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + NumNewerVersions: rr.Condition.NumNewerVersions, + }, + } + + switch { + case rr.Condition.IsLive == nil: + r.Condition.Liveness = LiveAndArchived + case *rr.Condition.IsLive == true: + r.Condition.Liveness = Live + case *rr.Condition.IsLive == false: + r.Condition.Liveness = Archived + } + + if rr.Condition.CreatedBefore != "" { + r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) + } + l.Rules = append(l.Rules, r) + } + return l +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + if q != nil { + it.query = *q + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.bucket.c.raw.Objects.List(it.bucket.name) + setClientHeader(req.Header()) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(pageToken) + if it.bucket.userProject != "" { + req.UserProject(it.bucket.userProject) + } + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil +} + +// TODO(jbd): Add storage.buckets.update. + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + it := &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { + req := it.client.raw.Buckets.List(it.projectID) + setClientHeader(req.Header()) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + return "", err + } + for _, item := range resp.Items { + b, err := newBucket(item) + if err != nil { + return "", err + } + it.buckets = append(it.buckets, b) + } + return resp.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/storage/bucket_test.go b/vendor/cloud.google.com/go/storage/bucket_test.go new file mode 100644 index 0000000..b52d59b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket_test.go @@ -0,0 +1,319 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "net/http" + "reflect" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +func TestBucketAttrsToRawBucket(t *testing.T) { + t.Parallel() + attrs := &BucketAttrs{ + Name: "name", + ACL: []ACLRule{{Entity: "bob@example.com", Role: RoleOwner}}, + DefaultObjectACL: []ACLRule{{Entity: AllUsers, Role: RoleReader}}, + Location: "loc", + StorageClass: "class", + RetentionPolicy: &RetentionPolicy{ + RetentionPeriod: 3 * time.Second, + }, + VersioningEnabled: false, + // should be ignored: + MetaGeneration: 39, + Created: time.Now(), + Labels: map[string]string{"label": "value"}, + CORS: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"GET", "POST"}, + Origins: []string{"*"}, + ResponseHeaders: []string{"FOO"}, + }, + }, + } + got := attrs.toRawBucket() + want := &raw.Bucket{ + Name: "name", + Acl: []*raw.BucketAccessControl{ + {Entity: "bob@example.com", Role: "OWNER"}, + }, + DefaultObjectAcl: []*raw.ObjectAccessControl{ + {Entity: "allUsers", Role: "READER"}, + }, + Location: "loc", + StorageClass: "class", + RetentionPolicy: &raw.BucketRetentionPolicy{ + RetentionPeriod: 3, + }, + Versioning: nil, // ignore VersioningEnabled if false + Labels: map[string]string{"label": "value"}, + Cors: []*raw.BucketCors{ + { + MaxAgeSeconds: 3600, + Method: []string{"GET", "POST"}, + Origin: []string{"*"}, + ResponseHeader: []string{"FOO"}, + }, + }, + } + if msg := testutil.Diff(got, want); msg != "" { + t.Error(msg) + } + + attrs.VersioningEnabled = true + attrs.RequesterPays = true + got = attrs.toRawBucket() + want.Versioning = &raw.BucketVersioning{Enabled: true} + want.Billing = &raw.BucketBilling{RequesterPays: true} + if msg := testutil.Diff(got, want); msg != "" { + t.Error(msg) + } +} + +func TestBucketAttrsToUpdateToRawBucket(t *testing.T) { + t.Parallel() + au := &BucketAttrsToUpdate{ + VersioningEnabled: false, + RequesterPays: false, + } + au.SetLabel("a", "foo") + au.DeleteLabel("b") + au.SetLabel("c", "") + got := au.toRawBucket() + want := &raw.Bucket{ + Versioning: &raw.BucketVersioning{ + Enabled: false, + ForceSendFields: []string{"Enabled"}, + }, + Labels: map[string]string{ + "a": "foo", + "c": "", + }, + Billing: &raw.BucketBilling{ + RequesterPays: false, + ForceSendFields: []string{"RequesterPays"}, + }, + NullFields: []string{"Labels.b"}, + } + if msg := testutil.Diff(got, want); msg != "" { + t.Error(msg) + } + + var au2 BucketAttrsToUpdate + au2.DeleteLabel("b") + got = au2.toRawBucket() + want = &raw.Bucket{ + Labels: map[string]string{}, + ForceSendFields: []string{"Labels"}, + NullFields: []string{"Labels.b"}, + } + + if msg := testutil.Diff(got, want); msg != "" { + t.Error(msg) + } +} + +func TestCallBuilders(t *testing.T) { + rc, err := raw.New(&http.Client{}) + if err != nil { + t.Fatal(err) + } + c := &Client{raw: rc} + const metagen = 17 + + b := c.Bucket("name") + bm := b.If(BucketConditions{MetagenerationMatch: metagen}).UserProject("p") + + equal := func(x, y interface{}) bool { + return testutil.Equal(x, y, + cmp.AllowUnexported( + raw.BucketsGetCall{}, + raw.BucketsDeleteCall{}, + raw.BucketsPatchCall{}, + ), + cmp.FilterPath(func(p cmp.Path) bool { + return p[len(p)-1].Type() == reflect.TypeOf(&raw.Service{}) + }, cmp.Ignore()), + ) + } + + for i, test := range []struct { + callFunc func(*BucketHandle) (interface{}, error) + want interface { + Header() http.Header + } + metagenFunc func(interface{}) + }{ + { + func(b *BucketHandle) (interface{}, error) { return b.newGetCall() }, + rc.Buckets.Get("name").Projection("full"), + func(req interface{}) { req.(*raw.BucketsGetCall).IfMetagenerationMatch(metagen).UserProject("p") }, + }, + { + func(b *BucketHandle) (interface{}, error) { return b.newDeleteCall() }, + rc.Buckets.Delete("name"), + func(req interface{}) { req.(*raw.BucketsDeleteCall).IfMetagenerationMatch(metagen).UserProject("p") }, + }, + { + func(b *BucketHandle) (interface{}, error) { + return b.newPatchCall(&BucketAttrsToUpdate{ + VersioningEnabled: false, + RequesterPays: false, + }) + }, + rc.Buckets.Patch("name", &raw.Bucket{ + Versioning: &raw.BucketVersioning{ + Enabled: false, + ForceSendFields: []string{"Enabled"}, + }, + Billing: &raw.BucketBilling{ + RequesterPays: false, + ForceSendFields: []string{"RequesterPays"}, + }, + }).Projection("full"), + func(req interface{}) { req.(*raw.BucketsPatchCall).IfMetagenerationMatch(metagen).UserProject("p") }, + }, + } { + got, err := test.callFunc(b) + if err != nil { + t.Fatal(err) + } + setClientHeader(test.want.Header()) + if !equal(got, test.want) { + t.Errorf("#%d: got %#v, want %#v", i, got, test.want) + } + got, err = test.callFunc(bm) + if err != nil { + t.Fatal(err) + } + test.metagenFunc(test.want) + if !equal(got, test.want) { + t.Errorf("#%d:\ngot %#v\nwant %#v", i, got, test.want) + } + } + + // Error. + bm = b.If(BucketConditions{MetagenerationMatch: 1, MetagenerationNotMatch: 2}) + if _, err := bm.newGetCall(); err == nil { + t.Errorf("got nil, want error") + } + if _, err := bm.newDeleteCall(); err == nil { + t.Errorf("got nil, want error") + } + if _, err := bm.newPatchCall(&BucketAttrsToUpdate{}); err == nil { + t.Errorf("got nil, want error") + } +} + +func TestNewBucket(t *testing.T) { + labels := map[string]string{"a": "b"} + matchClasses := []string{"MULTI_REGIONAL", "REGIONAL", "STANDARD"} + rb := &raw.Bucket{ + Name: "name", + Location: "loc", + Metageneration: 3, + StorageClass: "sc", + TimeCreated: "2017-10-23T04:05:06Z", + Versioning: &raw.BucketVersioning{Enabled: true}, + Labels: labels, + Billing: &raw.BucketBilling{RequesterPays: true}, + Lifecycle: &raw.BucketLifecycle{ + Rule: []*raw.BucketLifecycleRule{{ + Action: &raw.BucketLifecycleRuleAction{ + Type: "SetStorageClass", + StorageClass: "NEARLINE", + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: 10, + IsLive: googleapi.Bool(true), + CreatedBefore: "2017-01-02", + MatchesStorageClass: matchClasses, + NumNewerVersions: 3, + }, + }}, + }, + RetentionPolicy: &raw.BucketRetentionPolicy{ + RetentionPeriod: 3, + EffectiveTime: time.Now().Format(time.RFC3339), + }, + Cors: []*raw.BucketCors{ + { + MaxAgeSeconds: 3600, + Method: []string{"GET", "POST"}, + Origin: []string{"*"}, + ResponseHeader: []string{"FOO"}, + }, + }, + Acl: []*raw.BucketAccessControl{ + {Bucket: "name", Role: "READER", Email: "joe@example.com", Entity: "allUsers"}, + }, + } + want := &BucketAttrs{ + Name: "name", + Location: "loc", + MetaGeneration: 3, + StorageClass: "sc", + Created: time.Date(2017, 10, 23, 4, 5, 6, 0, time.UTC), + VersioningEnabled: true, + Labels: labels, + RequesterPays: true, + Lifecycle: Lifecycle{ + Rules: []LifecycleRule{ + { + Action: LifecycleAction{ + Type: SetStorageClassAction, + StorageClass: "NEARLINE", + }, + Condition: LifecycleCondition{ + AgeInDays: 10, + Liveness: Live, + CreatedBefore: time.Date(2017, 1, 2, 0, 0, 0, 0, time.UTC), + MatchesStorageClasses: matchClasses, + NumNewerVersions: 3, + }, + }, + }, + }, + RetentionPolicy: &RetentionPolicy{ + RetentionPeriod: 3 * time.Second, + }, + CORS: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"GET", "POST"}, + Origins: []string{"*"}, + ResponseHeaders: []string{"FOO"}, + }, + }, + ACL: []ACLRule{{Entity: "allUsers", Role: RoleReader}}, + DefaultObjectACL: []ACLRule{}, + } + got, err := newBucket(rb) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want, cmpopts.IgnoreTypes(time.Time{})); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go new file mode 100644 index 0000000..c9fb027 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -0,0 +1,207 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// CopierFrom creates a Copier that can copy src to dst. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +// +// For Requester Pays buckets, the user project of dst is billed, unless it is empty, +// in which case the user project of src is billed. +func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { + return &Copier{dst: dst, src: src} +} + +// A Copier copies a source object to a destination. +type Copier struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Copier. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // RewriteToken can be set before calling Run to resume a copy + // operation. After Run returns a non-nil error, RewriteToken will + // have been updated to contain the value needed to resume the copy. + RewriteToken string + + // ProgressFunc can be used to monitor the progress of a multi-RPC copy + // operation. If ProgressFunc is not nil and copying requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then + // ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far and the total size in bytes of the source object. + // + // ProgressFunc is intended to make upload progress available to the + // application. For example, the implementation of ProgressFunc may update + // a progress bar in the application's UI, or log the result of + // float64(copiedBytes)/float64(totalBytes). + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(copiedBytes, totalBytes uint64) + + dst, src *ObjectHandle +} + +// Run performs the copy. +func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.src.validate(); err != nil { + return nil, err + } + if err := c.dst.validate(); err != nil { + return nil, err + } + // Convert destination attributes to raw form, omitting the bucket. + // If the bucket is included but name or content-type aren't, the service + // returns a 400 with "Required" as the only message. Omitting the bucket + // does not cause any problems. + rawObject := c.ObjectAttrs.toRawObject("") + for { + res, err := c.callRewrite(ctx, rawObject) + if err != nil { + return nil, err + } + if c.ProgressFunc != nil { + c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) + } + if res.Done { // Finished successfully. + return newObject(res.Resource), nil + } + } +} + +func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { + call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) + + call.Context(ctx).Projection("full") + if c.RewriteToken != "" { + call.RewriteToken(c.RewriteToken) + } + if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } else if c.src.userProject != "" { + call.UserProject(c.src.userProject) + } + if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { + return nil, err + } + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) + if err != nil { + return nil, err + } + c.RewriteToken = res.RewriteToken + return res, nil +} + +// ComposerFrom creates a Composer that can compose srcs into dst. +// You can immediately call Run on the returned Composer, or you can +// configure it first. +// +// The encryption key for the destination object will be used to decrypt all +// source objects and encrypt the destination object. It is an error +// to specify an encryption key for any of the source objects. +func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { + return &Composer{dst: dst, srcs: srcs} +} + +// A Composer composes source objects into a destination object. +// +// For Requester Pays buckets, the user project of dst is billed. +type Composer struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Composer. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + dst *ObjectHandle + srcs []*ObjectHandle +} + +// Run performs the compose operation. +func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.dst.validate(); err != nil { + return nil, err + } + if len(c.srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + for _, src := range c.srcs { + if err := src.validate(); err != nil { + return nil, err + } + if src.bucket != c.dst.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) + } + if src.encryptionKey != nil { + return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if err != nil { + return nil, err + } + return newObject(obj), nil +} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go new file mode 100644 index 0000000..9040ac2 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -0,0 +1,167 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package storage provides an easy way to work with Google Cloud Storage. +Google Cloud Storage stores data in named objects, which are grouped into buckets. + +More information about Google Cloud Storage is available at +https://cloud.google.com/storage/docs. + +All of the methods of this package use exponential backoff to retry calls +that fail with certain errors, as described in +https://cloud.google.com/storage/docs/exponential-backoff. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + +The client will use your default application credentials. + +If you only wish to access public data, you can create +an unauthenticated client with + + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + +Buckets + +A Google Cloud Storage bucket is a collection of objects. To work with a +bucket, make a bucket handle: + + bkt := client.Bucket(bucketName) + +A handle is a reference to a bucket. You can have a handle even if the +bucket doesn't exist yet. To create a bucket in Google Cloud Storage, +call Create on the handle: + + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Note that although buckets are associated with projects, bucket names are +global across all projects. + +Each bucket has associated metadata, represented in this package by +BucketAttrs. The third argument to BucketHandle.Create allows you to set +the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use +Attrs: + + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + +Objects + +An object holds arbitrary data as a sequence of bytes, like a file. You +refer to objects using a handle, just as with buckets, but unlike buckets +you don't explicitly create an object. Instead, the first time you write +to an object it will be created. You can use the standard Go io.Reader +and io.Writer interfaces to read and write object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will either create the object or overwrite whatever is there already. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with Attrs: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +ACLs + +Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of +ACLRules, each of which specifies the role of a user, group or project. ACLs +are suitable for fine-grained control, but you may prefer using IAM to control +access at the project level (see +https://cloud.google.com/storage/docs/access-control/iam). + +To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: + + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } + +You can also set and delete ACLs. + +Conditions + +Every object has a generation and a metageneration. The generation changes +whenever the content changes, and the metageneration changes whenever the +metadata changes. Conditions let you check these values before an operation; +the operation only executes if the conditions match. You can use conditions to +prevent race conditions in read-modify-write operations. + +For example, say you've read an object's metadata into objAttrs. Now +you want to write to that object, but only if its contents haven't changed +since you read it. Here is how to express that: + + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. + +Signed URLs + +You can obtain a URL that lets anyone read or write an object for a limited time. +You don't need to create a client to do this. See the documentation of +SignedURL for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/example_test.go b/vendor/cloud.google.com/go/storage/example_test.go new file mode 100644 index 0000000..d4aec8b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/example_test.go @@ -0,0 +1,641 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage_test + +import ( + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "log" + "os" + "time" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +func ExampleNewClient() { + ctx := context.Background() + // Use Google Application Default Credentials to authorize and authenticate the client. + // More information about Application Default Credentials and how to enable is at + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +// This example shows how to create an unauthenticated client, which +// can be used to access public data. +func ExampleNewClient_unauthenticated() { + ctx := context.Background() + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Create() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + if err := client.Bucket("my-bucket").Create(ctx, "my-project", nil); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + if err := client.Bucket("my-bucket").Delete(ctx); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Attrs() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + attrs, err := client.Bucket("my-bucket").Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(attrs) +} + +func ExampleBucketHandle_Update() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Enable versioning in the bucket, regardless of its previous value. + attrs, err := client.Bucket("my-bucket").Update(ctx, + storage.BucketAttrsToUpdate{VersioningEnabled: true}) + if err != nil { + // TODO: handle error. + } + fmt.Println(attrs) +} + +// If your update is based on the bucket's previous attributes, match the +// metageneration number to make sure the bucket hasn't changed since you read it. +func ExampleBucketHandle_Update_readModifyWrite() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + attrs, err := b.Attrs(ctx) + if err != nil { + // TODO: handle error. + } + var au storage.BucketAttrsToUpdate + au.SetLabel("lab", attrs.Labels["lab"]+"-more") + if attrs.Labels["delete-me"] == "yes" { + au.DeleteLabel("delete-me") + } + attrs, err = b. + If(storage.BucketConditions{MetagenerationMatch: attrs.MetaGeneration}). + Update(ctx, au) + if err != nil { + // TODO: handle error. + } + fmt.Println(attrs) +} + +func ExampleClient_Buckets() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket") + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleBucketIterator_Next() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Buckets(ctx, "my-project") + for { + bucketAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(bucketAttrs) + } +} + +func ExampleBucketHandle_Objects() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket").Objects(ctx, nil) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleBucketHandle_AddNotification() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + n, err := b.AddNotification(ctx, &storage.Notification{ + TopicProjectID: "my-project", + TopicID: "my-topic", + PayloadFormat: storage.JSONPayload, + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(n.ID) +} + +func ExampleBucketHandle_LockRetentionPolicy() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + attrs, err := b.Attrs(ctx) + if err != nil { + // TODO: handle error. + } + // Note that locking the bucket without first attaching a RetentionPolicy + // that's at least 1 day is a no-op + err = b.If(storage.BucketConditions{MetagenerationMatch: attrs.MetaGeneration}).LockRetentionPolicy(ctx) + if err != nil { + // TODO: handle err + } +} + +func ExampleBucketHandle_Notifications() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + ns, err := b.Notifications(ctx) + if err != nil { + // TODO: handle error. + } + for id, n := range ns { + fmt.Printf("%s: %+v\n", id, n) + } +} + +var notificationID string + +func ExampleBucketHandle_DeleteNotification() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + // TODO: Obtain notificationID from BucketHandle.AddNotification + // or BucketHandle.Notifications. + err = b.DeleteNotification(ctx, notificationID) + if err != nil { + // TODO: handle error. + } +} + +func ExampleObjectIterator_Next() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket").Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(objAttrs) + } +} + +func ExampleSignedURL() { + pkey, err := ioutil.ReadFile("my-private-key.pem") + if err != nil { + // TODO: handle error. + } + url, err := storage.SignedURL("my-bucket", "my-object", &storage.SignedURLOptions{ + GoogleAccessID: "xxx@developer.gserviceaccount.com", + PrivateKey: pkey, + Method: "GET", + Expires: time.Now().Add(48 * time.Hour), + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(url) +} + +func ExampleObjectHandle_Attrs() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + objAttrs, err := client.Bucket("my-bucket").Object("my-object").Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs) +} + +func ExampleObjectHandle_Attrs_withConditions() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + // Read the object. + objAttrs1, err := obj.Attrs(ctx) + if err != nil { + // TODO: handle error. + } + // Do something else for a while. + time.Sleep(5 * time.Minute) + // Now read the same contents, even if the object has been written since the last read. + objAttrs2, err := obj.Generation(objAttrs1.Generation).Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs1, objAttrs2) +} + +func ExampleObjectHandle_Update() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Change only the content type of the object. + objAttrs, err := client.Bucket("my-bucket").Object("my-object").Update(ctx, storage.ObjectAttrsToUpdate{ + ContentType: "text/html", + ContentDisposition: "", // delete ContentDisposition + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs) +} + +func ExampleObjectHandle_NewReader() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + rc, err := client.Bucket("my-bucket").Object("my-object").NewReader(ctx) + if err != nil { + // TODO: handle error. + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + // TODO: handle error. + } + fmt.Println("file contents:", slurp) +} + +func ExampleObjectHandle_NewRangeReader() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Read only the first 64K. + rc, err := client.Bucket("bucketname").Object("filename1").NewRangeReader(ctx, 0, 64*1024) + if err != nil { + // TODO: handle error. + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + // TODO: handle error. + } + fmt.Println("first 64K of file contents:", slurp) +} + +func ExampleObjectHandle_NewWriter() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + _ = wc // TODO: Use the Writer. +} + +func ExampleWriter_Write() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.ContentType = "text/plain" + wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} + if _, err := wc.Write([]byte("hello world")); err != nil { + // TODO: handle error. + // Note that Write may return nil in some error situations, + // so always check the error from Close. + } + if err := wc.Close(); err != nil { + // TODO: handle error. + } + fmt.Println("updated object:", wc.Attrs()) +} + +// To make sure the data you write is uncorrupted, use an MD5 or CRC32c +// checksum. This example illustrates CRC32c. +func ExampleWriter_Write_checksum() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + data := []byte("verify me") + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.CRC32C = crc32.Checksum(data, crc32.MakeTable(crc32.Castagnoli)) + wc.SendCRC32C = true + if _, err := wc.Write([]byte("hello world")); err != nil { + // TODO: handle error. + // Note that Write may return nil in some error situations, + // so always check the error from Close. + } + if err := wc.Close(); err != nil { + // TODO: handle error. + } + fmt.Println("updated object:", wc.Attrs()) +} + +func ExampleObjectHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // To delete multiple objects in a bucket, list them with an + // ObjectIterator, then Delete them. + + // If you are using this package on the App Engine Flex runtime, + // you can init a bucket client with your app's default bucket name. + // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. + bucket := client.Bucket("my-bucket") + it := bucket.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != iterator.Done { + // TODO: Handle error. + } + if err == iterator.Done { + break + } + if err := bucket.Object(objAttrs.Name).Delete(ctx); err != nil { + // TODO: Handle error. + } + } + fmt.Println("deleted all object items in the bucket specified.") +} + +func ExampleACLHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // No longer grant access to the bucket to everyone on the Internet. + if err := client.Bucket("my-bucket").ACL().Delete(ctx, storage.AllUsers); err != nil { + // TODO: handle error. + } +} + +func ExampleACLHandle_Set() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Let any authenticated user read my-bucket/my-object. + obj := client.Bucket("my-bucket").Object("my-object") + if err := obj.ACL().Set(ctx, storage.AllAuthenticatedUsers, storage.RoleReader); err != nil { + // TODO: handle error. + } +} + +func ExampleACLHandle_List() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // List the default object ACLs for my-bucket. + aclRules, err := client.Bucket("my-bucket").DefaultObjectACL().List(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(aclRules) +} + +func ExampleCopier_Run() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + src := client.Bucket("bucketname").Object("file1") + dst := client.Bucket("another-bucketname").Object("file2") + + // Copy content and modify metadata. + copier := dst.CopierFrom(src) + copier.ContentType = "text/plain" + attrs, err := copier.Run(ctx) + if err != nil { + // TODO: Handle error, possibly resuming with copier.RewriteToken. + } + fmt.Println(attrs) + + // Just copy content. + attrs, err = dst.CopierFrom(src).Run(ctx) + if err != nil { + // TODO: Handle error. No way to resume. + } + fmt.Println(attrs) +} + +func ExampleCopier_Run_progress() { + // Display progress across multiple rewrite RPCs. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + src := client.Bucket("bucketname").Object("file1") + dst := client.Bucket("another-bucketname").Object("file2") + + copier := dst.CopierFrom(src) + copier.ProgressFunc = func(copiedBytes, totalBytes uint64) { + log.Printf("copy %.1f%% done", float64(copiedBytes)/float64(totalBytes)*100) + } + if _, err := copier.Run(ctx); err != nil { + // TODO: handle error. + } +} + +var key1, key2 []byte + +func ExampleObjectHandle_CopierFrom_rotateEncryptionKeys() { + // To rotate the encryption key on an object, copy it onto itself. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("bucketname").Object("obj") + // Assume obj is encrypted with key1, and we want to change to key2. + _, err = obj.Key(key2).CopierFrom(obj.Key(key1)).Run(ctx) + if err != nil { + // TODO: handle error. + } +} + +func ExampleComposer_Run() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + bkt := client.Bucket("bucketname") + src1 := bkt.Object("o1") + src2 := bkt.Object("o2") + dst := bkt.Object("o3") + // Compose and modify metadata. + c := dst.ComposerFrom(src1, src2) + c.ContentType = "text/plain" + attrs, err := c.Run(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(attrs) + // Just compose. + attrs, err = dst.ComposerFrom(src1, src2).Run(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(attrs) +} + +var gen int64 + +func ExampleObjectHandle_Generation() { + // Read an object's contents from generation gen, regardless of the + // current generation of the object. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + rc, err := obj.Generation(gen).NewReader(ctx) + if err != nil { + // TODO: handle error. + } + defer rc.Close() + if _, err := io.Copy(os.Stdout, rc); err != nil { + // TODO: handle error. + } +} + +func ExampleObjectHandle_If() { + // Read from an object only if the current generation is gen. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + rc, err := obj.If(storage.Conditions{GenerationMatch: gen}).NewReader(ctx) + if err != nil { + // TODO: handle error. + } + defer rc.Close() + if _, err := io.Copy(os.Stdout, rc); err != nil { + // TODO: handle error. + } +} + +var secretKey []byte + +func ExampleObjectHandle_Key() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + // Encrypt the object's contents. + w := obj.Key(secretKey).NewWriter(ctx) + if _, err := w.Write([]byte("top secret")); err != nil { + // TODO: handle error. + } + if err := w.Close(); err != nil { + // TODO: handle error. + } +} diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go new file mode 100644 index 0000000..b85e8c3 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go110.go @@ -0,0 +1,30 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.10 + +package storage + +import "google.golang.org/api/googleapi" + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/storage/go17.go b/vendor/cloud.google.com/go/storage/go17.go new file mode 100644 index 0000000..982db4e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go17.go @@ -0,0 +1,26 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package storage + +import ( + "context" + "net/http" +) + +func withContext(r *http.Request, ctx context.Context) *http.Request { + return r.WithContext(ctx) +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go new file mode 100644 index 0000000..a87b857 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -0,0 +1,129 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// IAM provides access to IAM access control for the bucket. +func (b *BucketHandle) IAM() *iam.Handle { + return iam.InternalNewHandleClient(&iamClient{ + raw: b.c.raw, + userProject: b.userProject, + }, b.name) +} + +// iamClient implements the iam.client interface. +type iamClient struct { + raw *raw.Service + userProject string +} + +func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") + defer func() { trace.EndSpan(ctx, err) }() + + call := c.raw.Buckets.GetIamPolicy(resource) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var rp *raw.Policy + err = runWithRetry(ctx, func() error { + rp, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") + defer func() { trace.EndSpan(ctx, err) }() + + rp := iamToStoragePolicy(p) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + return runWithRetry(ctx, func() error { + _, err := call.Context(ctx).Do() + return err + }) +} + +func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") + defer func() { trace.EndSpan(ctx, err) }() + + call := c.raw.Buckets.TestIamPermissions(resource, perms) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var res *raw.TestIamPermissionsResponse + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { + return &raw.Policy{ + Bindings: iamToStorageBindings(ip.Bindings), + Etag: string(ip.Etag), + } +} + +func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { + var rbs []*raw.PolicyBindings + for _, ib := range ibs { + rbs = append(rbs, &raw.PolicyBindings{ + Role: ib.Role, + Members: ib.Members, + }) + } + return rbs +} + +func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { + return &iampb.Policy{ + Bindings: iamFromStorageBindings(rp.Bindings), + Etag: []byte(rp.Etag), + } +} + +func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { + var ibs []*iampb.Binding + for _, rb := range rbs { + ibs = append(ibs, &iampb.Binding{ + Role: rb.Role, + Members: rb.Members, + }) + } + return ibs +} diff --git a/vendor/cloud.google.com/go/storage/integration_test.go b/vendor/cloud.google.com/go/storage/integration_test.go new file mode 100644 index 0000000..0b64142 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/integration_test.go @@ -0,0 +1,2285 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "flag" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "log" + "math/rand" + "net/http" + "os" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/net/context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + itesting "google.golang.org/api/iterator/testing" + "google.golang.org/api/option" +) + +const testPrefix = "go-integration-test" + +var ( + uidSpace = testutil.NewUIDSpace(testPrefix) + bucketName = uidSpace.New() +) + +func TestMain(m *testing.M) { + integrationTest := initIntegrationTest() + exit := m.Run() + if integrationTest { + if err := cleanup(); err != nil { + // No need to be loud if cleanup() fails; we'll get + // any undeleted buckets next time. + log.Printf("Post-test cleanup failed: %v\n", err) + } + } + os.Exit(exit) +} + +// If integration tests will be run, create a unique bucket for them. +func initIntegrationTest() bool { + flag.Parse() // needed for testing.Short() + ctx := context.Background() + if testing.Short() { + return false + } + client := config(ctx) + if client == nil { + return false + } + defer client.Close() + if err := client.Bucket(bucketName).Create(ctx, testutil.ProjID(), nil); err != nil { + log.Fatalf("creating bucket %q: %v", bucketName, err) + } + return true +} + +// testConfig returns the Client used to access GCS. testConfig skips +// the current test if credentials are not available or when being run +// in Short mode. +func testConfig(ctx context.Context, t *testing.T) *Client { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + client := config(ctx) + if client == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + return client +} + +// config is like testConfig, but it doesn't need a *testing.T. +func config(ctx context.Context) *Client { + ts := testutil.TokenSource(ctx, ScopeFullControl) + if ts == nil { + return nil + } + p := testutil.ProjID() + if p == "" { + log.Fatal("The project ID must be set. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + return client +} + +func TestIntegration_BucketMethods(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + projectID := testutil.ProjID() + newBucketName := uidSpace.New() + b := client.Bucket(newBucketName) + // Test Create and Delete. + if err := b.Create(ctx, projectID, nil); err != nil { + t.Fatalf("Bucket(%v).Create(%v, %v) failed: %v", newBucketName, projectID, nil, err) + } + attrs, err := b.Attrs(ctx) + if err != nil { + t.Error(err) + } else { + if got, want := attrs.MetaGeneration, int64(1); got != want { + t.Errorf("got metagen %d, want %d", got, want) + } + if got, want := attrs.StorageClass, "STANDARD"; got != want { + t.Errorf("got storage class %q, want %q", got, want) + } + if attrs.VersioningEnabled { + t.Error("got versioning enabled, wanted it disabled") + } + } + if err := client.Bucket(newBucketName).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucketName, err) + } + + // Test Create and Delete with attributes. + labels := map[string]string{ + "l1": "v1", + "empty": "", + } + attrs = &BucketAttrs{ + StorageClass: "NEARLINE", + VersioningEnabled: true, + Labels: labels, + Lifecycle: Lifecycle{ + Rules: []LifecycleRule{{ + Action: LifecycleAction{ + Type: SetStorageClassAction, + StorageClass: "NEARLINE", + }, + Condition: LifecycleCondition{ + AgeInDays: 10, + Liveness: Archived, + CreatedBefore: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), + MatchesStorageClasses: []string{"MULTI_REGIONAL", "STANDARD"}, + NumNewerVersions: 3, + }, + }, { + Action: LifecycleAction{ + Type: DeleteAction, + }, + Condition: LifecycleCondition{ + AgeInDays: 30, + Liveness: Live, + CreatedBefore: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), + MatchesStorageClasses: []string{"NEARLINE"}, + NumNewerVersions: 10, + }, + }}, + }, + } + if err := client.Bucket(newBucketName).Create(ctx, projectID, attrs); err != nil { + t.Fatalf("Bucket(%v).Create(%v, %+v) failed: %v", newBucketName, projectID, attrs, err) + } + attrs, err = b.Attrs(ctx) + if err != nil { + t.Error(err) + } else { + if got, want := attrs.MetaGeneration, int64(1); got != want { + t.Errorf("got metagen %d, want %d", got, want) + } + if got, want := attrs.StorageClass, "NEARLINE"; got != want { + t.Errorf("got storage class %q, want %q", got, want) + } + if !attrs.VersioningEnabled { + t.Error("got versioning disabled, wanted it enabled") + } + if got, want := attrs.Labels, labels; !testutil.Equal(got, want) { + t.Errorf("labels: got %v, want %v", got, want) + } + } + if err := client.Bucket(newBucketName).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucketName, err) + } +} + +func TestIntegration_BucketUpdate(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + b := client.Bucket(bucketName) + attrs, err := b.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + if attrs.VersioningEnabled { + t.Fatal("bucket should not have versioning by default") + } + if len(attrs.Labels) > 0 { + t.Fatal("bucket should not have labels initially") + } + + // Using empty BucketAttrsToUpdate should be a no-nop. + attrs, err = b.Update(ctx, BucketAttrsToUpdate{}) + if err != nil { + t.Fatal(err) + } + if attrs.VersioningEnabled { + t.Fatal("should not have versioning") + } + if len(attrs.Labels) > 0 { + t.Fatal("should not have labels") + } + + // Turn on versioning, add some labels. + ua := BucketAttrsToUpdate{VersioningEnabled: true} + ua.SetLabel("l1", "v1") + ua.SetLabel("empty", "") + attrs, err = b.Update(ctx, ua) + if err != nil { + t.Fatal(err) + } + if !attrs.VersioningEnabled { + t.Fatal("should have versioning now") + } + wantLabels := map[string]string{ + "l1": "v1", + "empty": "", + } + if !testutil.Equal(attrs.Labels, wantLabels) { + t.Fatalf("got %v, want %v", attrs.Labels, wantLabels) + } + + // Turn off versioning again; add and remove some more labels. + ua = BucketAttrsToUpdate{VersioningEnabled: false} + ua.SetLabel("l1", "v2") // update + ua.SetLabel("new", "new") // create + ua.DeleteLabel("empty") // delete + ua.DeleteLabel("absent") // delete non-existent + attrs, err = b.Update(ctx, ua) + if err != nil { + t.Fatal(err) + } + if attrs.VersioningEnabled { + t.Fatal("should have versioning off") + } + wantLabels = map[string]string{ + "l1": "v2", + "new": "new", + } + if !testutil.Equal(attrs.Labels, wantLabels) { + t.Fatalf("got %v, want %v", attrs.Labels, wantLabels) + } +} + +func TestIntegration_ConditionalDelete(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + o := client.Bucket(bucketName).Object("conddel") + + wc := o.NewWriter(ctx) + wc.ContentType = "text/plain" + if _, err := wc.Write([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := wc.Close(); err != nil { + t.Fatal(err) + } + + gen := wc.Attrs().Generation + metaGen := wc.Attrs().Metageneration + + if err := o.Generation(gen - 1).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with Generation") + } + if err := o.If(Conditions{MetagenerationMatch: metaGen + 1}).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with IfMetaGenerationMatch") + } + if err := o.If(Conditions{MetagenerationNotMatch: metaGen}).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with IfMetaGenerationNotMatch") + } + if err := o.Generation(gen).Delete(ctx); err != nil { + t.Fatalf("final delete failed: %v", err) + } +} + +func TestIntegration_Objects(t *testing.T) { + // TODO(jba): Use subtests (Go 1.7). + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucketName) + + const defaultType = "text/plain" + + // Populate object names and make a map for their contents. + objects := []string{ + "obj1", + "obj2", + "obj/with/slashes", + } + contents := make(map[string][]byte) + + // Test Writer. + for _, obj := range objects { + c := randomContents() + if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + contents[obj] = c + } + + testObjectIterator(t, bkt, objects) + + // Test Reader. + for _, obj := range objects { + rc, err := bkt.Object(obj).NewReader(ctx) + if err != nil { + t.Errorf("Can't create a reader for %v, errored with %v", obj, err) + continue + } + if !rc.checkCRC { + t.Errorf("%v: not checking CRC", obj) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("Can't ReadAll object %v, errored with %v", obj, err) + } + if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { + t.Errorf("Contents (%q) = %q; want %q", obj, got, want) + } + if got, want := rc.Size(), len(contents[obj]); got != int64(want) { + t.Errorf("Size (%q) = %d; want %d", obj, got, want) + } + if got, want := rc.ContentType(), "text/plain"; got != want { + t.Errorf("ContentType (%q) = %q; want %q", obj, got, want) + } + if got, want := rc.CacheControl(), "public, max-age=60"; got != want { + t.Errorf("CacheControl (%q) = %q; want %q", obj, got, want) + } + rc.Close() + + // Check early close. + buf := make([]byte, 1) + rc, err = bkt.Object(obj).NewReader(ctx) + if err != nil { + t.Fatalf("%v: %v", obj, err) + } + _, err = rc.Read(buf) + if err != nil { + t.Fatalf("%v: %v", obj, err) + } + if got, want := buf, contents[obj][:1]; !bytes.Equal(got, want) { + t.Errorf("Contents[0] (%q) = %q; want %q", obj, got, want) + } + if err := rc.Close(); err != nil { + t.Errorf("%v Close: %v", obj, err) + } + } + + obj := objects[0] + objlen := int64(len(contents[obj])) + // Test Range Reader. + for i, r := range []struct { + offset, length, want int64 + }{ + {0, objlen, objlen}, + {0, objlen / 2, objlen / 2}, + {objlen / 2, objlen, objlen / 2}, + {0, 0, 0}, + {objlen / 2, 0, 0}, + {objlen / 2, -1, objlen / 2}, + {0, objlen * 2, objlen}, + } { + rc, err := bkt.Object(obj).NewRangeReader(ctx, r.offset, r.length) + if err != nil { + t.Errorf("%d: Can't create a range reader for %v, errored with %v", i, obj, err) + continue + } + if rc.Size() != objlen { + t.Errorf("%d: Reader has a content-size of %d, want %d", i, rc.Size(), objlen) + } + if rc.Remain() != r.want { + t.Errorf("%d: Reader's available bytes reported as %d, want %d", i, rc.Remain(), r.want) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("%d:Can't ReadAll object %v, errored with %v", i, obj, err) + continue + } + if len(slurp) != int(r.want) { + t.Errorf("%d:RangeReader (%d, %d): Read %d bytes, wanted %d bytes", i, r.offset, r.length, len(slurp), r.want) + continue + } + if got, want := slurp, contents[obj][r.offset:r.offset+r.want]; !bytes.Equal(got, want) { + t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want) + } + rc.Close() + } + + // Test content encoding + const zeroCount = 20 << 20 + w := bkt.Object("gzip-test").NewWriter(ctx) + w.ContentEncoding = "gzip" + gw := gzip.NewWriter(w) + if _, err := io.Copy(gw, io.LimitReader(zeros{}, zeroCount)); err != nil { + t.Fatalf("io.Copy, upload: %v", err) + } + if err := gw.Close(); err != nil { + t.Errorf("gzip.Close(): %v", err) + } + if err := w.Close(); err != nil { + t.Errorf("w.Close(): %v", err) + } + r, err := bkt.Object("gzip-test").NewReader(ctx) + if err != nil { + t.Fatalf("NewReader(gzip-test): %v", err) + } + n, err := io.Copy(ioutil.Discard, r) + if err != nil { + t.Errorf("io.Copy, download: %v", err) + } + if n != zeroCount { + t.Errorf("downloaded bad data: got %d bytes, want %d", n, zeroCount) + } + + // Test NotFound. + _, err = bkt.Object("obj-not-exists").NewReader(ctx) + if err != ErrObjectNotExist { + t.Errorf("Object should not exist, err found to be %v", err) + } + + objName := objects[0] + + // Test NewReader googleapi.Error. + // Since a 429 or 5xx is hard to cause, we trigger a 416. + realLen := len(contents[objName]) + _, err = bkt.Object(objName).NewRangeReader(ctx, int64(realLen*2), 10) + if err, ok := err.(*googleapi.Error); !ok { + t.Error("NewRangeReader did not return a googleapi.Error") + } else { + if err.Code != 416 { + t.Errorf("Code = %d; want %d", err.Code, 416) + } + if len(err.Header) == 0 { + t.Error("Missing googleapi.Error.Header") + } + if len(err.Body) == 0 { + t.Error("Missing googleapi.Error.Body") + } + } + + // Test StatObject. + o, err := bkt.Object(objName).Attrs(ctx) + if err != nil { + t.Error(err) + } + if got, want := o.Name, objName; got != want { + t.Errorf("Name (%v) = %q; want %q", objName, got, want) + } + if got, want := o.ContentType, defaultType; got != want { + t.Errorf("ContentType (%v) = %q; want %q", objName, got, want) + } + created := o.Created + // Check that the object is newer than its containing bucket. + bAttrs, err := bkt.Attrs(ctx) + if err != nil { + t.Error(err) + } + if o.Created.Before(bAttrs.Created) { + t.Errorf("Object %v is older than its containing bucket, %v", o, bAttrs) + } + + // Test object copy. + copyName := "copy-" + objName + copyObj, err := bkt.Object(copyName).CopierFrom(bkt.Object(objName)).Run(ctx) + if err != nil { + t.Errorf("Copier.Run failed with %v", err) + } else if !namesEqual(copyObj, bucketName, copyName) { + t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", + copyObj.Bucket, copyObj.Name, bucketName, copyName) + } + + // Copying with attributes. + const contentEncoding = "identity" + copier := bkt.Object(copyName).CopierFrom(bkt.Object(objName)) + copier.ContentEncoding = contentEncoding + copyObj, err = copier.Run(ctx) + if err != nil { + t.Errorf("Copier.Run failed with %v", err) + } else { + if !namesEqual(copyObj, bucketName, copyName) { + t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", + copyObj.Bucket, copyObj.Name, bucketName, copyName) + } + if copyObj.ContentEncoding != contentEncoding { + t.Errorf("Copy ContentEncoding: got %q, want %q", copyObj.ContentEncoding, contentEncoding) + } + } + + // Test UpdateAttrs. + metadata := map[string]string{"key": "value"} + updated, err := bkt.Object(objName).Update(ctx, ObjectAttrsToUpdate{ + ContentType: "text/html", + ContentLanguage: "en", + Metadata: metadata, + ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, + }) + if err != nil { + t.Errorf("UpdateAttrs failed with %v", err) + } else { + if got, want := updated.ContentType, "text/html"; got != want { + t.Errorf("updated.ContentType == %q; want %q", got, want) + } + if got, want := updated.ContentLanguage, "en"; got != want { + t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) + } + if got, want := updated.Metadata, metadata; !testutil.Equal(got, want) { + t.Errorf("updated.Metadata == %+v; want %+v", updated.Metadata, want) + } + if got, want := updated.Created, created; got != want { + t.Errorf("updated.Created == %q; want %q", got, want) + } + if !updated.Created.Before(updated.Updated) { + t.Errorf("updated.Updated should be newer than update.Created") + } + } + // Delete ContentType and ContentLanguage. + updated, err = bkt.Object(objName).Update(ctx, ObjectAttrsToUpdate{ + ContentType: "", + ContentLanguage: "", + Metadata: map[string]string{}, + }) + if err != nil { + t.Errorf("UpdateAttrs failed with %v", err) + } else { + if got, want := updated.ContentType, ""; got != want { + t.Errorf("updated.ContentType == %q; want %q", got, want) + } + if got, want := updated.ContentLanguage, ""; got != want { + t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) + } + if updated.Metadata != nil { + t.Errorf("updated.Metadata == %+v; want nil", updated.Metadata) + } + if got, want := updated.Created, created; got != want { + t.Errorf("updated.Created == %q; want %q", got, want) + } + if !updated.Created.Before(updated.Updated) { + t.Errorf("updated.Updated should be newer than update.Created") + } + } + + // Test checksums. + checksumCases := []struct { + name string + contents [][]byte + size int64 + md5 string + crc32c uint32 + }{ + { + name: "checksum-object", + contents: [][]byte{[]byte("hello"), []byte("world")}, + size: 10, + md5: "fc5e038d38a57032085441e7fe7010b0", + crc32c: 1456190592, + }, + { + name: "zero-object", + contents: [][]byte{}, + size: 0, + md5: "d41d8cd98f00b204e9800998ecf8427e", + crc32c: 0, + }, + } + for _, c := range checksumCases { + wc := bkt.Object(c.name).NewWriter(ctx) + for _, data := range c.contents { + if _, err := wc.Write(data); err != nil { + t.Errorf("Write(%q) failed with %q", data, err) + } + } + if err = wc.Close(); err != nil { + t.Errorf("%q: close failed with %q", c.name, err) + } + obj := wc.Attrs() + if got, want := obj.Size, c.size; got != want { + t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) + } + if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { + t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) + } + if got, want := obj.CRC32C, c.crc32c; got != want { + t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) + } + } + + // Test public ACL. + publicObj := objects[0] + if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil { + t.Errorf("PutACLEntry failed with %v", err) + } + publicClient, err := NewClient(ctx, option.WithHTTPClient(http.DefaultClient)) + if err != nil { + t.Fatal(err) + } + + slurp, err := readObject(ctx, publicClient.Bucket(bucketName).Object(publicObj)) + if err != nil { + t.Errorf("readObject failed with %v", err) + } else if !bytes.Equal(slurp, contents[publicObj]) { + t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj]) + } + + // Test writer error handling. + wc := publicClient.Bucket(bucketName).Object(publicObj).NewWriter(ctx) + if _, err := wc.Write([]byte("hello")); err != nil { + t.Errorf("Write unexpectedly failed with %v", err) + } + if err = wc.Close(); err == nil { + t.Error("Close expected an error, found none") + } + + // Test deleting the copy object. + if err := bkt.Object(copyName).Delete(ctx); err != nil { + t.Errorf("Deletion of %v failed with %v", copyName, err) + } + // Deleting it a second time should return ErrObjectNotExist. + if err := bkt.Object(copyName).Delete(ctx); err != ErrObjectNotExist { + t.Errorf("second deletion of %v = %v; want ErrObjectNotExist", copyName, err) + } + _, err = bkt.Object(copyName).Attrs(ctx) + if err != ErrObjectNotExist { + t.Errorf("Copy is expected to be deleted, stat errored with %v", err) + } + + // Test object composition. + var compSrcs []*ObjectHandle + var wantContents []byte + for _, obj := range objects { + compSrcs = append(compSrcs, bkt.Object(obj)) + wantContents = append(wantContents, contents[obj]...) + } + checkCompose := func(obj *ObjectHandle, wantContentType string) { + rc, err := obj.NewReader(ctx) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + slurp, err = ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("ioutil.ReadAll: %v", err) + } + defer rc.Close() + if !bytes.Equal(slurp, wantContents) { + t.Errorf("Composed object contents\ngot: %q\nwant: %q", slurp, wantContents) + } + if got := rc.ContentType(); got != wantContentType { + t.Errorf("Composed object content-type = %q, want %q", got, wantContentType) + } + } + + // Compose should work even if the user sets no destination attributes. + compDst := bkt.Object("composed1") + c := compDst.ComposerFrom(compSrcs...) + if _, err := c.Run(ctx); err != nil { + t.Fatalf("ComposeFrom error: %v", err) + } + checkCompose(compDst, "application/octet-stream") + + // It should also work if we do. + compDst = bkt.Object("composed2") + c = compDst.ComposerFrom(compSrcs...) + c.ContentType = "text/json" + if _, err := c.Run(ctx); err != nil { + t.Fatalf("ComposeFrom error: %v", err) + } + checkCompose(compDst, "text/json") +} + +func namesEqual(obj *ObjectAttrs, bucketName, objectName string) bool { + return obj.Bucket == bucketName && obj.Name == objectName +} + +func testObjectIterator(t *testing.T, bkt *BucketHandle, objects []string) { + ctx := context.Background() + // Collect the list of items we expect: ObjectAttrs in lexical order by name. + names := make([]string, len(objects)) + copy(names, objects) + sort.Strings(names) + var attrs []*ObjectAttrs + for _, name := range names { + attr, err := bkt.Object(name).Attrs(ctx) + if err != nil { + t.Errorf("Object(%q).Attrs: %v", name, err) + return + } + attrs = append(attrs, attr) + } + msg, ok := itesting.TestIterator(attrs, + func() interface{} { return bkt.Objects(ctx, &Query{Prefix: "obj"}) }, + func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() }) + if !ok { + t.Errorf("ObjectIterator.Next: %s", msg) + } + // TODO(jba): test query.Delimiter != "" +} + +func TestIntegration_SignedURL(t *testing.T) { + // To test SignedURL, we need a real user email and private key. Extract them + // from the JSON key file. + jwtConf, err := testutil.JWTConfig() + if err != nil { + t.Fatal(err) + } + if jwtConf == nil { + t.Skip("JSON key file is not present") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucketName) + obj := "signedURL" + contents := []byte("This is a test of SignedURL.\n") + md5 := "Jyxvgwm9n2MsrGTMPbMeYA==" // base64-encoded MD5 of contents + if err := writeObject(ctx, bkt.Object(obj), "text/plain", contents); err != nil { + t.Fatalf("writing: %v", err) + } + for _, test := range []struct { + desc string + opts SignedURLOptions + headers map[string][]string + fail bool + }{ + { + desc: "basic", + }, + { + desc: "MD5 sent and matches", + opts: SignedURLOptions{MD5: md5}, + headers: map[string][]string{"Content-MD5": {md5}}, + }, + { + desc: "MD5 not sent", + opts: SignedURLOptions{MD5: md5}, + fail: true, + }, + { + desc: "Content-Type sent and matches", + opts: SignedURLOptions{ContentType: "text/plain"}, + headers: map[string][]string{"Content-Type": {"text/plain"}}, + }, + { + desc: "Content-Type sent but does not match", + opts: SignedURLOptions{ContentType: "text/plain"}, + headers: map[string][]string{"Content-Type": {"application/json"}}, + fail: true, + }, + { + desc: "Canonical headers sent and match", + opts: SignedURLOptions{Headers: []string{ + " X-Goog-Foo: Bar baz ", + "X-Goog-Novalue", // ignored: no value + "X-Google-Foo", // ignored: wrong prefix + }}, + headers: map[string][]string{"X-Goog-foo": {"Bar baz "}}, + }, + { + desc: "Canonical headers sent but don't match", + opts: SignedURLOptions{Headers: []string{" X-Goog-Foo: Bar baz"}}, + headers: map[string][]string{"X-Goog-Foo": {"bar baz"}}, + fail: true, + }, + } { + opts := test.opts + opts.GoogleAccessID = jwtConf.Email + opts.PrivateKey = jwtConf.PrivateKey + opts.Method = "GET" + opts.Expires = time.Now().Add(time.Hour) + u, err := SignedURL(bucketName, obj, &opts) + if err != nil { + t.Errorf("%s: SignedURL: %v", test.desc, err) + continue + } + got, err := getURL(u, test.headers) + if err != nil && !test.fail { + t.Errorf("%s: getURL %q: %v", test.desc, u, err) + } else if err == nil && !bytes.Equal(got, contents) { + t.Errorf("%s: got %q, want %q", test.desc, got, contents) + } + } +} + +// Make a GET request to a URL using an unauthenticated client, and return its contents. +func getURL(url string, headers map[string][]string) ([]byte, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header = headers + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + bytes, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + return nil, fmt.Errorf("code=%d, body=%s", res.StatusCode, string(bytes)) + } + return bytes, nil +} + +func TestIntegration_ACL(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucketName) + + entity := ACLEntity("domain-google.com") + rule := ACLRule{Entity: entity, Role: RoleReader} + if err := bkt.DefaultObjectACL().Set(ctx, entity, RoleReader); err != nil { + t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err) + } + acl, err := bkt.DefaultObjectACL().List(ctx) + if err != nil { + t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucketName, err) + } else if !hasRule(acl, rule) { + t.Errorf("default ACL missing %#v", rule) + } + aclObjects := []string{"acl1", "acl2"} + for _, obj := range aclObjects { + c := randomContents() + if err := writeObject(ctx, bkt.Object(obj), "", c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + } + name := aclObjects[0] + o := bkt.Object(name) + acl, err = o.ACL().List(ctx) + if err != nil { + t.Errorf("Can't retrieve ACL of %v", name) + } else if !hasRule(acl, rule) { + t.Errorf("object ACL missing %+v", rule) + } + if err := o.ACL().Delete(ctx, entity); err != nil { + t.Errorf("object ACL: could not delete entity %s", entity) + } + // Delete the default ACL rule. We can't move this code earlier in the + // test, because the test depends on the fact that the object ACL inherits + // it. + if err := bkt.DefaultObjectACL().Delete(ctx, entity); err != nil { + t.Errorf("default ACL: could not delete entity %s", entity) + } + + entity2 := ACLEntity("user-jbd@google.com") + rule2 := ACLRule{Entity: entity2, Role: RoleReader} + if err := bkt.ACL().Set(ctx, entity2, RoleReader); err != nil { + t.Errorf("Error while putting bucket ACL rule: %v", err) + } + bACL, err := bkt.ACL().List(ctx) + if err != nil { + t.Errorf("Error while getting the ACL of the bucket: %v", err) + } else if !hasRule(bACL, rule2) { + t.Errorf("bucket ACL missing %+v", rule2) + } + if err := bkt.ACL().Delete(ctx, entity2); err != nil { + t.Errorf("Error while deleting bucket ACL rule: %v", err) + } + +} + +func hasRule(acl []ACLRule, rule ACLRule) bool { + for _, r := range acl { + if r == rule { + return true + } + } + return false +} + +func TestIntegration_ValidObjectNames(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucketName) + + validNames := []string{ + "gopher", + "Гоферови", + "a", + strings.Repeat("a", 1024), + } + for _, name := range validNames { + if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { + t.Errorf("Object %q write failed: %v. Want success", name, err) + continue + } + defer bkt.Object(name).Delete(ctx) + } + + invalidNames := []string{ + "", // Too short. + strings.Repeat("a", 1025), // Too long. + "new\nlines", + "bad\xffunicode", + } + for _, name := range invalidNames { + // Invalid object names will either cause failure during Write or Close. + if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { + continue + } + defer bkt.Object(name).Delete(ctx) + t.Errorf("%q should have failed. Didn't", name) + } +} + +func TestIntegration_WriterContentType(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucketName).Object("content") + testCases := []struct { + content string + setType, wantType string + }{ + { + content: "It was the best of times, it was the worst of times.", + wantType: "text/plain; charset=utf-8", + }, + { + content: "My first page", + wantType: "text/html; charset=utf-8", + }, + { + content: "My first page", + setType: "text/html", + wantType: "text/html", + }, + { + content: "My first page", + setType: "image/jpeg", + wantType: "image/jpeg", + }, + } + for i, tt := range testCases { + if err := writeObject(ctx, obj, tt.setType, []byte(tt.content)); err != nil { + t.Errorf("writing #%d: %v", i, err) + } + attrs, err := obj.Attrs(ctx) + if err != nil { + t.Errorf("obj.Attrs: %v", err) + continue + } + if got := attrs.ContentType; got != tt.wantType { + t.Errorf("Content-Type = %q; want %q\nContent: %q\nSet Content-Type: %q", got, tt.wantType, tt.content, tt.setType) + } + } +} + +func TestIntegration_ZeroSizedObject(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucketName).Object("zero") + + // Check writing it works as expected. + w := obj.NewWriter(ctx) + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + defer obj.Delete(ctx) + + // Check we can read it too. + body, err := readObject(ctx, obj) + if err != nil { + t.Fatalf("readObject: %v", err) + } + if len(body) != 0 { + t.Errorf("Body is %v, want empty []byte{}", body) + } +} + +func TestIntegration_Encryption(t *testing.T) { + // This function tests customer-supplied encryption keys for all operations + // involving objects. Bucket and ACL operations aren't tested because they + // aren't affected by customer encryption. Neither is deletion. + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucketName).Object("customer-encryption") + key := []byte("my-secret-AES-256-encryption-key") + keyHash := sha256.Sum256(key) + keyHashB64 := base64.StdEncoding.EncodeToString(keyHash[:]) + key2 := []byte("My-Secret-AES-256-Encryption-Key") + contents := "top secret." + + checkMetadataCall := func(msg string, f func(o *ObjectHandle) (*ObjectAttrs, error)) { + // Performing a metadata operation without the key should succeed. + attrs, err := f(obj) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + // The key hash should match... + if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { + t.Errorf("%s: key hash: got %q, want %q", msg, got, want) + } + // ...but CRC and MD5 should not be present. + if attrs.CRC32C != 0 { + t.Errorf("%s: CRC: got %v, want 0", msg, attrs.CRC32C) + } + if len(attrs.MD5) > 0 { + t.Errorf("%s: MD5: got %v, want len == 0", msg, attrs.MD5) + } + + // Performing a metadata operation with the key should succeed. + attrs, err = f(obj.Key(key)) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + // Check the key and content hashes. + if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { + t.Errorf("%s: key hash: got %q, want %q", msg, got, want) + } + if attrs.CRC32C == 0 { + t.Errorf("%s: CRC: got 0, want non-zero", msg) + } + if len(attrs.MD5) == 0 { + t.Errorf("%s: MD5: got len == 0, want len > 0", msg) + } + } + + checkRead := func(msg string, o *ObjectHandle, k []byte, wantContents string) { + // Reading the object without the key should fail. + if _, err := readObject(ctx, o); err == nil { + t.Errorf("%s: reading without key: want error, got nil", msg) + } + // Reading the object with the key should succeed. + got, err := readObject(ctx, o.Key(k)) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + gotContents := string(got) + // And the contents should match what we wrote. + if gotContents != wantContents { + t.Errorf("%s: contents: got %q, want %q", msg, gotContents, wantContents) + } + } + + checkReadUnencrypted := func(msg string, obj *ObjectHandle, wantContents string) { + got, err := readObject(ctx, obj) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + gotContents := string(got) + if gotContents != wantContents { + t.Errorf("%s: got %q, want %q", msg, gotContents, wantContents) + } + } + + // Write to obj using our own encryption key, which is a valid 32-byte + // AES-256 key. + w := obj.Key(key).NewWriter(ctx) + w.Write([]byte(contents)) + if err := w.Close(); err != nil { + t.Fatal(err) + } + + checkMetadataCall("Attrs", func(o *ObjectHandle) (*ObjectAttrs, error) { + return o.Attrs(ctx) + }) + + checkMetadataCall("Update", func(o *ObjectHandle) (*ObjectAttrs, error) { + return o.Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) + }) + + checkRead("first object", obj, key, contents) + + obj2 := client.Bucket(bucketName).Object("customer-encryption-2") + // Copying an object without the key should fail. + if _, err := obj2.CopierFrom(obj).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // Copying an object with the key should succeed. + if _, err := obj2.CopierFrom(obj.Key(key)).Run(ctx); err != nil { + t.Fatal(err) + } + // The destination object is not encrypted; we can read it without a key. + checkReadUnencrypted("copy dest", obj2, contents) + + // Providing a key on the destination but not the source should fail, + // since the source is encrypted. + if _, err := obj2.Key(key2).CopierFrom(obj).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + + // But copying with keys for both source and destination should succeed. + if _, err := obj2.Key(key2).CopierFrom(obj.Key(key)).Run(ctx); err != nil { + t.Fatal(err) + } + // And the destination should be encrypted, meaning we can only read it + // with a key. + checkRead("copy destination", obj2, key2, contents) + + // Change obj2's key to prepare for compose, where all objects must have + // the same key. Also illustrates key rotation: copy an object to itself + // with a different key. + if _, err := obj2.Key(key).CopierFrom(obj2.Key(key2)).Run(ctx); err != nil { + t.Fatal(err) + } + obj3 := client.Bucket(bucketName).Object("customer-encryption-3") + // Composing without keys should fail. + if _, err := obj3.ComposerFrom(obj, obj2).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // Keys on the source objects result in an error. + if _, err := obj3.ComposerFrom(obj.Key(key), obj2).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // A key on the destination object both decrypts the source objects + // and encrypts the destination. + if _, err := obj3.Key(key).ComposerFrom(obj, obj2).Run(ctx); err != nil { + t.Fatalf("got %v, want nil", err) + } + // Check that the destination in encrypted. + checkRead("compose destination", obj3, key, contents+contents) + + // You can't compose one or more unencrypted source objects into an + // encrypted destination object. + _, err := obj2.CopierFrom(obj2.Key(key)).Run(ctx) // unencrypt obj2 + if err != nil { + t.Fatal(err) + } + if _, err := obj3.Key(key).ComposerFrom(obj2).Run(ctx); err == nil { + t.Fatal("got nil, want error") + } +} + +func TestIntegration_NonexistentBucket(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(uidSpace.New()) + if _, err := bkt.Attrs(ctx); err != ErrBucketNotExist { + t.Errorf("Attrs: got %v, want ErrBucketNotExist", err) + } + it := bkt.Objects(ctx, nil) + if _, err := it.Next(); err != ErrBucketNotExist { + t.Errorf("Objects: got %v, want ErrBucketNotExist", err) + } +} + +func TestIntegration_PerObjectStorageClass(t *testing.T) { + const ( + defaultStorageClass = "STANDARD" + newStorageClass = "MULTI_REGIONAL" + ) + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucketName) + + // The bucket should have the default storage class. + battrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + if battrs.StorageClass != defaultStorageClass { + t.Fatalf("bucket storage class: got %q, want %q", + battrs.StorageClass, defaultStorageClass) + } + // Write an object; it should start with the bucket's storage class. + obj := bkt.Object("posc") + if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { + t.Fatal(err) + } + oattrs, err := obj.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + if oattrs.StorageClass != defaultStorageClass { + t.Fatalf("object storage class: got %q, want %q", + oattrs.StorageClass, defaultStorageClass) + } + // Now use Copy to change the storage class. + copier := obj.CopierFrom(obj) + copier.StorageClass = newStorageClass + oattrs2, err := copier.Run(ctx) + if err != nil { + log.Fatal(err) + } + if oattrs2.StorageClass != newStorageClass { + t.Fatalf("new object storage class: got %q, want %q", + oattrs2.StorageClass, newStorageClass) + } + + // We can also write a new object using a non-default storage class. + obj2 := bkt.Object("posc2") + w := obj2.NewWriter(ctx) + w.StorageClass = newStorageClass + if _, err := w.Write([]byte("xxx")); err != nil { + t.Fatal(err) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + if w.Attrs().StorageClass != newStorageClass { + t.Fatalf("new object storage class: got %q, want %q", + w.Attrs().StorageClass, newStorageClass) + } +} + +func TestIntegration_BucketInCopyAttrs(t *testing.T) { + // Confirm that if bucket is included in the object attributes of a rewrite + // call, but object name and content-type aren't, then we get an error. See + // the comment in Copier.Run. + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucketName) + obj := bkt.Object("bucketInCopyAttrs") + if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { + t.Fatal(err) + } + copier := obj.CopierFrom(obj) + rawObject := copier.ObjectAttrs.toRawObject(bucketName) + _, err := copier.callRewrite(ctx, rawObject) + if err == nil { + t.Errorf("got nil, want error") + } +} + +func TestIntegration_NoUnicodeNormalization(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + bkt := client.Bucket("storage-library-test-bucket") + + for _, tst := range []struct { + nameQuoted, content string + }{ + {`"Caf\u00e9"`, "Normalization Form C"}, + {`"Cafe\u0301"`, "Normalization Form D"}, + } { + name, err := strconv.Unquote(tst.nameQuoted) + if err != nil { + t.Fatalf("invalid name: %s: %v", tst.nameQuoted, err) + } + got, err := readObject(ctx, bkt.Object(name)) + if err != nil { + t.Fatal(err) + } + if g := string(got); g != tst.content { + t.Errorf("content of %s is %q, want %q", tst.nameQuoted, g, tst.content) + } + } +} + +func TestIntegration_HashesOnUpload(t *testing.T) { + // Check that the user can provide hashes on upload, and that these are checked. + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := testConfig(ctx, t) + if client == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + defer client.Close() + obj := client.Bucket(bucketName).Object("hashesOnUpload-1") + data := []byte("I can't wait to be verified") + + write := func(w *Writer) error { + if _, err := w.Write(data); err != nil { + w.Close() + return err + } + return w.Close() + } + + crc32c := crc32.Checksum(data, crc32cTable) + // The correct CRC should succeed. + w := obj.NewWriter(ctx) + w.CRC32C = crc32c + w.SendCRC32C = true + if err := write(w); err != nil { + t.Fatal(err) + } + + // If we change the CRC, validation should fail. + w = obj.NewWriter(ctx) + w.CRC32C = crc32c + 1 + w.SendCRC32C = true + if err := write(w); err == nil { + t.Fatal("write with bad CRC32c: want error, got nil") + } + + // If we have the wrong CRC but forget to send it, we succeed. + w = obj.NewWriter(ctx) + w.CRC32C = crc32c + 1 + if err := write(w); err != nil { + t.Fatal(err) + } + + // MD5 + md5 := md5.Sum(data) + // The correct MD5 should succeed. + w = obj.NewWriter(ctx) + w.MD5 = md5[:] + if err := write(w); err != nil { + t.Fatal(err) + } + + // If we change the MD5, validation should fail. + w = obj.NewWriter(ctx) + w.MD5 = append([]byte(nil), md5[:]...) + w.MD5[0]++ + if err := write(w); err == nil { + t.Fatal("write with bad MD5: want error, got nil") + } +} + +func TestIntegration_BucketIAM(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucketName) + + // This bucket is unique to this test run. So we don't have + // to worry about other runs interfering with our IAM policy + // changes. + + member := "projectViewer:" + testutil.ProjID() + role := iam.RoleName("roles/storage.objectViewer") + // Get the bucket's IAM policy. + policy, err := bkt.IAM().Policy(ctx) + if err != nil { + t.Fatalf("Getting policy: %v", err) + } + // The member should not have the role. + if policy.HasRole(member, role) { + t.Errorf("member %q has role %q", member, role) + } + // Change the policy. + policy.Add(member, role) + if err := bkt.IAM().SetPolicy(ctx, policy); err != nil { + t.Fatalf("SetPolicy: %v", err) + } + // Confirm that the binding was added. + policy, err = bkt.IAM().Policy(ctx) + if err != nil { + t.Fatalf("Getting policy: %v", err) + } + if !policy.HasRole(member, role) { + t.Errorf("member %q does not have role %q", member, role) + } + + // Check TestPermissions. + // This client should have all these permissions (and more). + perms := []string{"storage.buckets.get", "storage.buckets.delete"} + got, err := bkt.IAM().TestPermissions(ctx, perms) + if err != nil { + t.Fatalf("TestPermissions: %v", err) + } + sort.Strings(perms) + sort.Strings(got) + if !testutil.Equal(got, perms) { + t.Errorf("got %v, want %v", got, perms) + } +} + +func TestIntegration_RequesterPays(t *testing.T) { + // This test needs a second project and user (token source) to test + // all possibilities. Since we need these things for Firestore already, + // we use them here. + // + // There are up to three entities involved in a requester-pays call: + // + // 1. The user making the request. Here, we use + // a. The account used to create the token source used for all our + // integration tests (see testutil.TokenSource). + // b. The account used for the Firestore tests. + // 2. The project that owns the requester-pays bucket. Here, that + // is the test project ID (see testutil.ProjID). + // 3. The project provided as the userProject parameter of the request; + // the project to be billed. This test uses: + // a. The project that owns the requester-pays bucket (same as (2)) + // b. Another project (the Firestore project). + // + // The following must hold for this test to work: + // - (1a) must have resourcemanager.projects.createBillingAssignment permission + // (Owner role) on (2) (the project, not the bucket). + // - (1b) must NOT have that permission on (2). + // - (1b) must have serviceusage.services.use permission (Editor role) on (3b). + // - (1b) must NOT have that permission on (3a). + // - (1a) must NOT have that permission on (3b). + const wantErrorCode = 400 + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + bucketName2 := uidSpace.New() + b := client.Bucket(bucketName2) + projID := testutil.ProjID() + // Use Firestore project as a project that does not contain the bucket. + otherProjID := os.Getenv(envFirestoreProjID) + if otherProjID == "" { + t.Fatalf("need a second project (env var %s)", envFirestoreProjID) + } + ts := testutil.TokenSourceEnv(ctx, envFirestorePrivateKey, ScopeFullControl) + if ts == nil { + t.Fatalf("need a second account (env var %s)", envFirestorePrivateKey) + } + otherClient, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + defer otherClient.Close() + ob := otherClient.Bucket(bucketName2) + user, err := keyFileEmail(os.Getenv("GCLOUD_TESTS_GOLANG_KEY")) + if err != nil { + t.Fatal(err) + } + otherUser, err := keyFileEmail(os.Getenv(envFirestorePrivateKey)) + if err != nil { + t.Fatal(err) + } + + // Create a requester-pays bucket. The bucket is contained in the project projID. + if err := b.Create(ctx, projID, &BucketAttrs{RequesterPays: true}); err != nil { + t.Fatal(err) + } + if err := b.ACL().Set(ctx, ACLEntity("user-"+otherUser), RoleOwner); err != nil { + t.Fatal(err) + } + + // Extract the error code from err if it's a googleapi.Error. + errCode := func(err error) int { + if err == nil { + return 0 + } + if err, ok := err.(*googleapi.Error); ok { + return err.Code + } + return -1 + } + + // Call f under various conditions. + // Here b and ob refer to the same bucket, but b is bound to client, + // while ob is bound to otherClient. The clients differ in their credentials, + // i.e. the identity of the user making the RPC: b's user is an Owner on the + // bucket's containing project, ob's is not. + call := func(msg string, f func(*BucketHandle) error) { + // user: an Owner on the containing project + // userProject: absent + // result: success, by the rule permitting access by owners of the containing bucket. + if err := f(b); err != nil { + t.Errorf("%s: %v, want nil\n"+ + "confirm that %s is an Owner on %s", + msg, err, user, projID) + } + // user: an Owner on the containing project + // userProject: containing project + // result: success, by the same rule as above; userProject is unnecessary but allowed. + if err := f(b.UserProject(projID)); err != nil { + t.Errorf("%s: got %v, want nil", msg, err) + } + // user: not an Owner on the containing project + // userProject: absent + // result: failure, by the standard requester-pays rule + err := f(ob) + if got, want := errCode(err), wantErrorCode; got != want { + t.Errorf("%s: got error %s, want code %d\n"+ + "confirm that %s is NOT an Owner on %s", + msg, err, want, otherUser, projID) + } + // user: not an Owner on the containing project + // userProject: not the containing one, but user has Editor role on it + // result: success, by the standard requester-pays rule + if err := f(ob.UserProject(otherProjID)); err != nil { + t.Errorf("%s: got %v, want nil\n"+ + "confirm that %s is an Editor on %s and that that project has billing enabled", + msg, err, otherUser, otherProjID) + } + // user: not an Owner on the containing project + // userProject: the containing one, on which the user does NOT have Editor permission. + // result: failure + err = f(ob.UserProject("veener-jba")) + if got, want := errCode(err), 403; got != want { + t.Errorf("%s: got error %s, want code %d\n"+ + "confirm that %s is NOT an Editor on %s", + msg, err, want, otherUser, "veener-jba") + } + } + + // Getting its attributes requires a user project. + var attrs *BucketAttrs + call("Bucket attrs", func(b *BucketHandle) error { + a, err := b.Attrs(ctx) + if a != nil { + attrs = a + } + return err + }) + if attrs != nil { + if got, want := attrs.RequesterPays, true; got != want { + t.Fatalf("attr.RequesterPays = %t, want %t", got, want) + } + } + // Object operations. + call("write object", func(b *BucketHandle) error { + return writeObject(ctx, b.Object("foo"), "text/plain", []byte("hello")) + }) + call("read object", func(b *BucketHandle) error { + _, err := readObject(ctx, b.Object("foo")) + return err + }) + call("object attrs", func(b *BucketHandle) error { + _, err := b.Object("foo").Attrs(ctx) + return err + }) + call("update object", func(b *BucketHandle) error { + _, err := b.Object("foo").Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) + return err + }) + + // ACL operations. + entity := ACLEntity("domain-google.com") + call("bucket acl set", func(b *BucketHandle) error { + return b.ACL().Set(ctx, entity, RoleReader) + }) + call("bucket acl list", func(b *BucketHandle) error { + _, err := b.ACL().List(ctx) + return err + }) + call("bucket acl delete", func(b *BucketHandle) error { + err := b.ACL().Delete(ctx, entity) + if errCode(err) == 404 { + // Since we call the function multiple times, it will + // fail with NotFound for all but the first. + return nil + } + return err + }) + call("default object acl set", func(b *BucketHandle) error { + return b.DefaultObjectACL().Set(ctx, entity, RoleReader) + }) + call("default object acl list", func(b *BucketHandle) error { + _, err := b.DefaultObjectACL().List(ctx) + return err + }) + call("default object acl delete", func(b *BucketHandle) error { + err := b.DefaultObjectACL().Delete(ctx, entity) + if errCode(err) == 404 { + return nil + } + return err + }) + call("object acl set", func(b *BucketHandle) error { + return b.Object("foo").ACL().Set(ctx, entity, RoleReader) + }) + call("object acl list", func(b *BucketHandle) error { + _, err := b.Object("foo").ACL().List(ctx) + return err + }) + call("object acl delete", func(b *BucketHandle) error { + err := b.Object("foo").ACL().Delete(ctx, entity) + if errCode(err) == 404 { + return nil + } + return err + }) + + // Copy and compose. + call("copy", func(b *BucketHandle) error { + _, err := b.Object("copy").CopierFrom(b.Object("foo")).Run(ctx) + return err + }) + call("compose", func(b *BucketHandle) error { + _, err := b.Object("compose").ComposerFrom(b.Object("foo"), b.Object("copy")).Run(ctx) + return err + }) + + // Deletion. + call("delete object", func(b *BucketHandle) error { + err := b.Object("foo").Delete(ctx) + if err == ErrObjectNotExist { + return nil + } + return err + }) + for _, obj := range []string{"copy", "compose"} { + if err := b.UserProject(projID).Object(obj).Delete(ctx); err != nil { + t.Fatalf("could not delete %q: %v", obj, err) + } + } + if err := b.Delete(ctx); err != nil { + t.Fatalf("deleting bucket: %v", err) + } +} + +// TODO(jba): move to testutil, factor out from firestore/integration_test.go. +const ( + envFirestoreProjID = "GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID" + envFirestorePrivateKey = "GCLOUD_TESTS_GOLANG_FIRESTORE_KEY" +) + +func keyFileEmail(filename string) (string, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return "", err + } + var v struct { + ClientEmail string `json:"client_email"` + } + if err := json.Unmarshal(bytes, &v); err != nil { + return "", err + } + return v.ClientEmail, nil +} + +func TestNotifications(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + bkt := client.Bucket(bucketName) + + checkNotifications := func(msg string, want map[string]*Notification) { + got, err := bkt.Notifications(ctx) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("%s: got=-, want=+:\n%s", msg, diff) + } + } + checkNotifications("initial", map[string]*Notification{}) + + nArg := &Notification{ + TopicProjectID: testutil.ProjID(), + TopicID: "go-storage-notification-test", + PayloadFormat: NoPayload, + } + n, err := bkt.AddNotification(ctx, nArg) + if err != nil { + t.Fatal(err) + } + nArg.ID = n.ID + if !testutil.Equal(n, nArg) { + t.Errorf("got %+v, want %+v", n, nArg) + } + checkNotifications("after add", map[string]*Notification{n.ID: n}) + + if err := bkt.DeleteNotification(ctx, n.ID); err != nil { + t.Fatal(err) + } + checkNotifications("after delete", map[string]*Notification{}) +} + +func TestIntegration_Public(t *testing.T) { + // Confirm that an unauthenticated client can access a public bucket. + + // See https://cloud.google.com/storage/docs/public-datasets/landsat + const landsatBucket = "gcp-public-data-landsat" + const landsatPrefix = "LC08/PRE/044/034/LC80440342016259LGN00/" + const landsatObject = landsatPrefix + "LC80440342016259LGN00_MTL.txt" + + // Create an unauthenticated client. + ctx := context.Background() + client, err := NewClient(ctx, option.WithoutAuthentication()) + if err != nil { + t.Fatal(err) + } + defer client.Close() + bkt := client.Bucket(landsatBucket) + obj := bkt.Object(landsatObject) + + // Read a public object. + bytes, err := readObject(ctx, obj) + if err != nil { + t.Fatal(err) + } + if got, want := len(bytes), 7903; got != want { + t.Errorf("len(bytes) = %d, want %d", got, want) + } + + // List objects in a public bucket. + iter := bkt.Objects(ctx, &Query{Prefix: landsatPrefix}) + gotCount := 0 + for { + _, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + gotCount++ + } + if wantCount := 13; gotCount != wantCount { + t.Errorf("object count: got %d, want %d", gotCount, wantCount) + } + + errCode := func(err error) int { + if err, ok := err.(*googleapi.Error); !ok { + return -1 + } else { + return err.Code + } + } + + // Reading from or writing to a non-public bucket fails. + c := testConfig(ctx, t) + defer c.Close() + nonPublicObj := client.Bucket(bucketName).Object("noauth") + // Oddly, reading returns 403 but writing returns 401. + _, err = readObject(ctx, nonPublicObj) + if got, want := errCode(err), 403; got != want { + t.Errorf("got code %d; want %d\nerror: %v", got, want, err) + } + err = writeObject(ctx, nonPublicObj, "text/plain", []byte("b")) + if got, want := errCode(err), 401; got != want { + t.Errorf("got code %d; want %d\nerror: %v", got, want, err) + } +} + +func TestIntegration_ReadCRC(t *testing.T) { + // Test that the checksum is handled correctly when reading files. + // For gzipped files, see https://github.com/GoogleCloudPlatform/google-cloud-dotnet/issues/1641. + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + const ( + // This is an uncompressed file. + // See https://cloud.google.com/storage/docs/public-datasets/landsat + uncompressedBucket = "gcp-public-data-landsat" + uncompressedObject = "LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt" + + gzippedBucket = "storage-library-test-bucket" + gzippedObject = "gzipped-text.txt" + gzippedContents = "hello world" // uncompressed contents of the file + ) + ctx := context.Background() + client, err := NewClient(ctx, option.WithoutAuthentication()) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + for _, test := range []struct { + desc string + obj *ObjectHandle + offset, length int64 + readCompressed bool // don't decompress a gzipped file + + wantErr bool + wantCheck bool // Should Reader try to check the CRC? + wantChecked bool // Did Reader actually check the CRC? + }{ + { + desc: "uncompressed, entire file", + obj: client.Bucket(uncompressedBucket).Object(uncompressedObject), + offset: 0, + length: -1, + readCompressed: false, + wantCheck: true, + wantChecked: true, + }, + { + desc: "uncompressed, entire file, don't decompress", + obj: client.Bucket(uncompressedBucket).Object(uncompressedObject), + offset: 0, + length: -1, + readCompressed: true, + wantCheck: true, + wantChecked: true, + }, + { + desc: "uncompressed, suffix", + obj: client.Bucket(uncompressedBucket).Object(uncompressedObject), + offset: 1, + length: -1, + readCompressed: false, + wantCheck: false, + wantChecked: false, + }, + { + desc: "uncompressed, prefix", + obj: client.Bucket(uncompressedBucket).Object(uncompressedObject), + offset: 0, + length: 18, + readCompressed: false, + wantCheck: false, + wantChecked: false, + }, + { + // When a gzipped file is unzipped by GCS, we can't verify the checksum + // because it was computed against the zipped contents. There is no + // header that indicates that a gzipped file is being served unzipped. + // But our CRC check only happens if there is a Content-Length header, + // and that header is absent for this read. + desc: "compressed, entire file, server unzips", + obj: client.Bucket(gzippedBucket).Object(gzippedObject), + offset: 0, + length: -1, + readCompressed: false, + wantCheck: true, + wantChecked: false, + }, + { + // When we read a gzipped file uncompressed, it's like reading a regular file: + // the served content and the CRC match. + desc: "compressed, entire file, read compressed", + obj: client.Bucket(gzippedBucket).Object(gzippedObject), + offset: 0, + length: -1, + readCompressed: true, + wantCheck: true, + wantChecked: true, + }, + { + desc: "compressed, partial, server unzips", + obj: client.Bucket(gzippedBucket).Object(gzippedObject), + offset: 1, + length: 8, + readCompressed: false, + wantErr: true, // GCS can't serve part of a gzipped object + wantCheck: false, + wantChecked: false, + }, + { + desc: "compressed, partial, read compressed", + obj: client.Bucket(gzippedBucket).Object(gzippedObject), + offset: 1, + length: 8, + readCompressed: true, + wantCheck: false, + wantChecked: false, + }, + } { + obj := test.obj.ReadCompressed(test.readCompressed) + r, err := obj.NewRangeReader(ctx, test.offset, test.length) + if err != nil { + if test.wantErr { + continue + } + t.Fatalf("%s: %v", test.desc, err) + } + if got, want := r.checkCRC, test.wantCheck; got != want { + t.Errorf("%s, checkCRC: got %t, want %t", test.desc, got, want) + } + _, err = ioutil.ReadAll(r) + _ = r.Close() + if err != nil { + t.Fatalf("%s: %v", test.desc, err) + } + if got, want := r.checkedCRC, test.wantChecked; got != want { + t.Errorf("%s, checkedCRC: got %t, want %t", test.desc, got, want) + } + } +} + +func TestIntegration_CancelWrite(t *testing.T) { + // Verify that canceling the writer's context immediately stops uploading an object. + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + bkt := client.Bucket(bucketName) + + cctx, cancel := context.WithCancel(ctx) + defer cancel() + obj := bkt.Object("cancel-write") + w := obj.NewWriter(cctx) + w.ChunkSize = googleapi.MinUploadChunkSize + buf := make([]byte, w.ChunkSize) + // Write the first chunk. This is read in its entirety before sending the request + // (see google.golang.org/api/gensupport.PrepareUpload), so we expect it to return + // without error. + _, err := w.Write(buf) + if err != nil { + t.Fatal(err) + } + // Now cancel the context. + cancel() + // The next Write should return context.Canceled. + _, err = w.Write(buf) + if err != context.Canceled { + t.Fatalf("got %v, wanted context.Canceled", err) + } + // The Close should too. + err = w.Close() + if err != context.Canceled { + t.Fatalf("got %v, wanted context.Canceled", err) + } +} + +func TestIntegration_UpdateCORS(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + initialSettings := []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"POST"}, + Origins: []string{"some-origin.com"}, + ResponseHeaders: []string{"foo-bar"}, + }, + } + + for _, test := range []struct { + input []CORS + want []CORS + }{ + { + input: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"GET"}, + Origins: []string{"*"}, + ResponseHeaders: []string{"some-header"}, + }, + }, + want: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"GET"}, + Origins: []string{"*"}, + ResponseHeaders: []string{"some-header"}, + }, + }, + }, + { + input: []CORS{}, + want: nil, + }, + { + input: nil, + want: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"POST"}, + Origins: []string{"some-origin.com"}, + ResponseHeaders: []string{"foo-bar"}, + }, + }, + }, + } { + bkt := client.Bucket(uidSpace.New()) + defer func(b *BucketHandle) { + err := b.Delete(ctx) + if err != nil { + t.Fatal(err) + } + }(bkt) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{CORS: initialSettings}) + if err != nil { + t.Fatal(err) + } + + _, err = bkt.Update(ctx, BucketAttrsToUpdate{CORS: test.input}) + if err != nil { + t.Fatal(err) + } + + attrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + + if diff := testutil.Diff(attrs.CORS, test.want); diff != "" { + t.Errorf("input: %v\ngot=-, want=+:\n%s", test.input, diff) + } + } +} + +func TestIntegration_UpdateRetentionPolicy(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + initial := &RetentionPolicy{RetentionPeriod: time.Minute} + + for _, test := range []struct { + input *RetentionPolicy + want *RetentionPolicy + }{ + { // Update + input: &RetentionPolicy{RetentionPeriod: time.Hour}, + want: &RetentionPolicy{RetentionPeriod: time.Hour}, + }, + { // Update even with timestamp (EffectiveTime should be ignored) + input: &RetentionPolicy{RetentionPeriod: time.Hour, EffectiveTime: time.Now()}, + want: &RetentionPolicy{RetentionPeriod: time.Hour}, + }, + { // Remove + input: &RetentionPolicy{}, + want: nil, + }, + { // Remove even with timestamp (EffectiveTime should be ignored) + input: &RetentionPolicy{EffectiveTime: time.Now()}, + want: nil, + }, + { // Ignore + input: nil, + want: initial, + }, + } { + bkt := client.Bucket(uidSpace.New()) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{RetentionPolicy: initial}) + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := bkt.Delete(ctx); err != nil { + t.Fatal(err) + } + }() + + _, err = bkt.Update(ctx, BucketAttrsToUpdate{RetentionPolicy: test.input}) + if err != nil { + t.Fatal(err) + } + + attrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + + if attrs.RetentionPolicy != nil && attrs.RetentionPolicy.EffectiveTime.Unix() == 0 { + // Should be set by the server and parsed by the client + t.Fatal("EffectiveTime should be set, but it was not") + } + if diff := testutil.Diff(attrs.RetentionPolicy, test.want, cmpopts.IgnoreTypes(time.Time{})); diff != "" { + t.Errorf("input: %v\ngot=-, want=+:\n%s", test.input, diff) + } + } +} + +func TestIntegration_DeleteObjectInBucketWithRetentionPolicy(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(uidSpace.New()) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: 25 * time.Hour}}) + if err != nil { + t.Fatal(err) + } + + oh := bkt.Object("some-object") + if err = writeObject(ctx, oh, "text/plain", []byte("hello world")); err != nil { + t.Fatal(err) + } + + err = oh.Delete(ctx) + if err == nil { + t.Fatal("expected to err deleting an object in a bucket with retention period, but got nil") + } + + // Remove the retention period + _, err = bkt.Update(ctx, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{RetentionPeriod: 0}}) + if err != nil { + t.Fatal(err) + } + + err = oh.Delete(ctx) + if err != nil { + t.Fatal(err) + } + + if err := bkt.Delete(ctx); err != nil { + t.Fatal(err) + } +} + +func TestIntegration_LockBucket(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(uidSpace.New()) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour * 25}}) + if err != nil { + t.Fatal(err) + } + + attrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + + err = bkt.If(BucketConditions{MetagenerationMatch: attrs.MetaGeneration}).LockRetentionPolicy(ctx) + if err != nil { + t.Fatal("could not lock", err) + } + + _, err = bkt.Update(ctx, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour}}) + if err == nil { + t.Fatal("Expected error updating locked bucket, got nil") + } +} + +func TestIntegration_LockBucket_MetagenerationRequired(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(uidSpace.New()) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour * 25}}) + if err != nil { + t.Fatal(err) + } + + err = bkt.LockRetentionPolicy(ctx) + if err == nil { + t.Fatal("expected error locking bucket without metageneration condition, got nil") + } +} + +func writeObject(ctx context.Context, obj *ObjectHandle, contentType string, contents []byte) error { + w := obj.NewWriter(ctx) + w.ContentType = contentType + w.CacheControl = "public, max-age=60" + if contents != nil { + if _, err := w.Write(contents); err != nil { + _ = w.Close() + return err + } + } + return w.Close() +} + +func readObject(ctx context.Context, obj *ObjectHandle) ([]byte, error) { + r, err := obj.NewReader(ctx) + if err != nil { + return nil, err + } + defer r.Close() + return ioutil.ReadAll(r) +} + +// cleanup deletes the bucket used for testing, as well as old +// testing buckets that weren't cleaned previously. +func cleanup() error { + if testing.Short() { + return nil // Don't clean up in short mode. + } + ctx := context.Background() + client := config(ctx) + if client == nil { + return nil // Don't cleanup if we're not configured correctly. + } + defer client.Close() + if err := killBucket(ctx, client, bucketName); err != nil { + return err + } + + // Delete buckets whose name begins with our test prefix, and which were + // created a while ago. (Unfortunately GCS doesn't provide last-modified + // time, which would be a better way to check for staleness.) + const expireAge = 24 * time.Hour + projectID := testutil.ProjID() + it := client.Buckets(ctx, projectID) + it.Prefix = testPrefix + for { + bktAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + if time.Since(bktAttrs.Created) > expireAge { + log.Printf("deleting bucket %q, which is more than %s old", bktAttrs.Name, expireAge) + if err := killBucket(ctx, client, bktAttrs.Name); err != nil { + return err + } + } + } + return nil +} + +// killBucket deletes a bucket and all its objects. +func killBucket(ctx context.Context, client *Client, bucketName string) error { + bkt := client.Bucket(bucketName) + // Bucket must be empty to delete. + it := bkt.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + if err := bkt.Object(objAttrs.Name).Delete(ctx); err != nil { + return fmt.Errorf("deleting %q: %v", bucketName+"/"+objAttrs.Name, err) + } + } + // GCS is eventually consistent, so this delete may fail because the + // replica still sees an object in the bucket. We log the error and expect + // a later test run to delete the bucket. + if err := bkt.Delete(ctx); err != nil { + log.Printf("deleting %q: %v", bucketName, err) + } + return nil +} + +func randomContents() []byte { + h := md5.New() + io.WriteString(h, fmt.Sprintf("hello world%d", rand.Intn(100000))) + return h.Sum(nil) +} + +type zeros struct{} + +func (zeros) Read(p []byte) (int, error) { return len(p), nil } diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go new file mode 100644 index 0000000..46423a8 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -0,0 +1,36 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" +) + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +func runWithRetry(ctx context.Context, call func() error) error { + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + if shouldRetry(err) { + return false, nil + } + return true, err + }) +} diff --git a/vendor/cloud.google.com/go/storage/invoke_test.go b/vendor/cloud.google.com/go/storage/invoke_test.go new file mode 100644 index 0000000..f7a5807 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke_test.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" +) + +func TestInvoke(t *testing.T) { + t.Parallel() + ctx := context.Background() + // Time-based tests are flaky. We just make sure that invoke eventually + // returns with the right error. + + for _, test := range []struct { + count int // number of times to return retryable error + retryCode int // error code for retryable error + err error // error to return after count returns of retryCode + }{ + {0, 0, nil}, + {0, 0, errors.New("foo")}, + {1, 429, nil}, + {1, 429, errors.New("bar")}, + {2, 518, nil}, + {2, 599, &googleapi.Error{Code: 428}}, + } { + counter := 0 + call := func() error { + counter++ + if counter <= test.count { + return &googleapi.Error{Code: test.retryCode} + } + return test.err + } + got := runWithRetry(ctx, call) + if got != test.err { + t.Errorf("%v: got %v, want %v", test, got, test.err) + } + } +} diff --git a/vendor/cloud.google.com/go/storage/not_go110.go b/vendor/cloud.google.com/go/storage/not_go110.go new file mode 100644 index 0000000..c354e74 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/not_go110.go @@ -0,0 +1,40 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.10 + +package storage + +import ( + "net/url" + "strings" + + "google.golang.org/api/googleapi" +) + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry on REFUSED_STREAM. + // Unfortunately the error type is unexported, so we resort to string + // matching. + return strings.Contains(e.Error(), "REFUSED_STREAM") + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/storage/not_go17.go b/vendor/cloud.google.com/go/storage/not_go17.go new file mode 100644 index 0000000..1f6f7ae --- /dev/null +++ b/vendor/cloud.google.com/go/storage/not_go17.go @@ -0,0 +1,26 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.7 + +package storage + +import ( + "net/http" +) + +func withContext(r *http.Request, _ interface{}) *http.Request { + // In Go 1.6 and below, ignore the context. + return r +} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go new file mode 100644 index 0000000..d371a67 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -0,0 +1,188 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + "regexp" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// A Notification describes how to send Cloud PubSub messages when certain +// events occur in a bucket. +type Notification struct { + //The ID of the notification. + ID string + + // The ID of the topic to which this subscription publishes. + TopicID string + + // The ID of the project to which the topic belongs. + TopicProjectID string + + // Only send notifications about listed event types. If empty, send notifications + // for all event types. + // See https://cloud.google.com/storage/docs/pubsub-notifications#events. + EventTypes []string + + // If present, only apply this notification configuration to object names that + // begin with this prefix. + ObjectNamePrefix string + + // An optional list of additional attributes to attach to each Cloud PubSub + // message published for this notification subscription. + CustomAttributes map[string]string + + // The contents of the message payload. + // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. + PayloadFormat string +} + +// Values for Notification.PayloadFormat. +const ( + // Send no payload with notification messages. + NoPayload = "NONE" + + // Send object metadata as JSON with notification messages. + JSONPayload = "JSON_API_V1" +) + +// Values for Notification.EventTypes. +const ( + // Event that occurs when an object is successfully created. + ObjectFinalizeEvent = "OBJECT_FINALIZE" + + // Event that occurs when the metadata of an existing object changes. + ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" + + // Event that occurs when an object is permanently deleted. + ObjectDeleteEvent = "OBJECT_DELETE" + + // Event that occurs when the live version of an object becomes an + // archived version. + ObjectArchiveEvent = "OBJECT_ARCHIVE" +) + +func toNotification(rn *raw.Notification) *Notification { + n := &Notification{ + ID: rn.Id, + EventTypes: rn.EventTypes, + ObjectNamePrefix: rn.ObjectNamePrefix, + CustomAttributes: rn.CustomAttributes, + PayloadFormat: rn.PayloadFormat, + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) + return n +} + +var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") + +// parseNotificationTopic extracts the project and topic IDs from from the full +// resource name returned by the service. If the name is malformed, it returns +// "?" for both IDs. +func parseNotificationTopic(nt string) (projectID, topicID string) { + matches := topicRE.FindStringSubmatch(nt) + if matches == nil { + return "?", "?" + } + return matches[1], matches[2] +} + +func toRawNotification(n *Notification) *raw.Notification { + return &raw.Notification{ + Id: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: string(n.PayloadFormat), + } +} + +// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID +// and PayloadFormat, and must not set its ID. The other fields are all optional. The +// returned Notification's ID can be used to refer to it. +func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") + defer func() { trace.EndSpan(ctx, err) }() + + if n.ID != "" { + return nil, errors.New("storage: AddNotification: ID must not be set") + } + if n.TopicProjectID == "" { + return nil, errors.New("storage: AddNotification: missing TopicProjectID") + } + if n.TopicID == "" { + return nil, errors.New("storage: AddNotification: missing TopicID") + } + call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + rn, err := call.Context(ctx).Do() + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +// Notifications returns all the Notifications configured for this bucket, as a map +// indexed by notification ID. +func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") + defer func() { trace.EndSpan(ctx, err) }() + + call := b.c.raw.Notifications.List(b.name) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + var res *raw.Notifications + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func notificationsToMap(rns []*raw.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, rn := range rns { + m[rn.Id] = toNotification(rn) + } + return m +} + +// DeleteNotification deletes the notification with the given ID. +func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + call := b.c.raw.Notifications.Delete(b.name, id) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + return call.Context(ctx).Do() +} diff --git a/vendor/cloud.google.com/go/storage/notifications_test.go b/vendor/cloud.google.com/go/storage/notifications_test.go new file mode 100644 index 0000000..3d40cea --- /dev/null +++ b/vendor/cloud.google.com/go/storage/notifications_test.go @@ -0,0 +1,98 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +func TestParseNotificationTopic(t *testing.T) { + for _, test := range []struct { + in string + wantProjectID string + wantTopicID string + }{ + {"", "?", "?"}, + {"foobar", "?", "?"}, + {"//pubsub.googleapis.com/projects/foo", "?", "?"}, + {"//pubsub.googleapis.com/projects/my-project/topics/my-topic", + "my-project", "my-topic"}, + } { + gotProjectID, gotTopicID := parseNotificationTopic(test.in) + if gotProjectID != test.wantProjectID || gotTopicID != test.wantTopicID { + t.Errorf("%q: got (%q, %q), want (%q, %q)", + test.in, gotProjectID, gotTopicID, test.wantProjectID, test.wantTopicID) + } + } + +} + +func TestConvertNotification(t *testing.T) { + want := &Notification{ + ID: "id", + TopicProjectID: "my-project", + TopicID: "my-topic", + EventTypes: []string{ObjectFinalizeEvent}, + ObjectNamePrefix: "prefix", + CustomAttributes: map[string]string{"a": "b"}, + PayloadFormat: JSONPayload, + } + got := toNotification(toRawNotification(want)) + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } +} + +func TestNotificationsToMap(t *testing.T) { + got := notificationsToMap(nil) + want := map[string]*Notification{} + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + in := []*raw.Notification{ + {Id: "a", Topic: "//pubsub.googleapis.com/projects/P1/topics/T1"}, + {Id: "b", Topic: "//pubsub.googleapis.com/projects/P2/topics/T2"}, + {Id: "c", Topic: "//pubsub.googleapis.com/projects/P3/topics/T3"}, + } + got = notificationsToMap(in) + want = map[string]*Notification{ + "a": &Notification{ID: "a", TopicProjectID: "P1", TopicID: "T1"}, + "b": &Notification{ID: "b", TopicProjectID: "P2", TopicID: "T2"}, + "c": &Notification{ID: "c", TopicProjectID: "P3", TopicID: "T3"}, + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } +} + +func TestAddNotificationsErrors(t *testing.T) { + c := &Client{} + b := c.Bucket("b") + for _, n := range []*Notification{ + {ID: "foo", TopicProjectID: "p", TopicID: "t"}, // has ID + {TopicProjectID: "p"}, // missing TopicID + {TopicID: "t"}, // missing TopicProjectID + } { + _, err := b.AddNotification(context.Background(), n) + if err == nil { + t.Errorf("%+v: got nil, want error", n) + } + } +} diff --git a/vendor/cloud.google.com/go/storage/oc_test.go b/vendor/cloud.google.com/go/storage/oc_test.go new file mode 100644 index 0000000..5bf2f59 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/oc_test.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package storage + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" +) + +func TestIntegration_OCTracing(t *testing.T) { + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + te := testutil.NewTestExporter() + defer te.Unregister() + + bkt := client.Bucket(bucketName) + bkt.Attrs(ctx) + + if len(te.Spans) == 0 { + t.Fatalf("Expected some spans to be created, but got %d", 0) + } +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 0000000..6af5a94 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,245 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" +) + +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + RawQuery: conditionsQuery(o.gen, o.conds), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = withContext(req, ctx) + if length < 0 && offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } + if o.userProject != "" { + req.Header.Set("X-Goog-User-Project", o.userProject) + } + if o.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { + return nil, err + } + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + return nil + }) + if err != nil { + return nil, err + } + + var size int64 // total size of object, even if a range was requested. + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + } else { + size = res.ContentLength + } + + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var ( + checkCRC bool + crc uint32 + ) + // Even if there is a CRC header, we can't compute the hash on partial data. + if remain == size { + crc, checkCRC = parseCRC32c(res) + } + return &Reader{ + body: body, + size: size, + remain: remain, + contentType: res.Header.Get("Content-Type"), + contentEncoding: res.Header.Get("Content-Encoding"), + cacheControl: res.Header.Get("Cache-Control"), + wantCRC: crc, + checkCRC: checkCRC, + }, nil +} + +func parseCRC32c(res *http.Response) (uint32, bool) { + const prefix = "crc32c=" + for _, spec := range res.Header["X-Goog-Hash"] { + if strings.HasPrefix(spec, prefix) { + c, err := decodeUint32(spec[len(prefix):]) + if err == nil { + return c, true + } + } + } + return 0, false +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// Reader reads a Cloud Storage object. +// It implements io.Reader. +// +// Typically, a Reader computes the CRC of the downloaded content and compares it to +// the stored CRC, returning an error from Read if there is a mismatch. This integrity check +// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. +type Reader struct { + body io.ReadCloser + remain, size int64 + contentType string + contentEncoding string + cacheControl string + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc + checkedCRC bool // did we check the CRC? (For tests.) +} + +// Close closes the Reader. It must be called when done reading. +func (r *Reader) Close() error { + return r.body.Close() +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.body.Read(p) + if r.remain != -1 { + r.remain -= int64(n) + } + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if r.remain == 0 { // Only check if we have Content-Length. + r.checkedCRC = true + if r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } + } + return n, err +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +func (r *Reader) Size() int64 { + return r.size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +func (r *Reader) ContentType() string { + return r.contentType +} + +// ContentEncoding returns the content encoding of the object. +func (r *Reader) ContentEncoding() string { + return r.contentEncoding +} + +// CacheControl returns the cache control of the object. +func (r *Reader) CacheControl() string { + return r.cacheControl +} diff --git a/vendor/cloud.google.com/go/storage/reader_test.go b/vendor/cloud.google.com/go/storage/reader_test.go new file mode 100644 index 0000000..de734c6 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader_test.go @@ -0,0 +1,112 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "io/ioutil" + "net/http" + "strconv" + "strings" + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +const readData = "0123456789" + +func TestRangeReader(t *testing.T) { + hc, close := newTestServer(handleRangeRead) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + obj := c.Bucket("b").Object("o") + for _, test := range []struct { + offset, length int64 + want string + }{ + {0, -1, readData}, + {0, 10, readData}, + {0, 5, readData[:5]}, + {1, 3, readData[1:4]}, + {6, -1, readData[6:]}, + {4, 20, readData[4:]}, + } { + r, err := obj.NewRangeReader(ctx, test.offset, test.length) + if err != nil { + t.Errorf("%d/%d: %v", test.offset, test.length, err) + continue + } + gotb, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("%d/%d: %v", test.offset, test.length, err) + continue + } + if got := string(gotb); got != test.want { + t.Errorf("%d/%d: got %q, want %q", test.offset, test.length, got, test.want) + } + } +} + +func handleRangeRead(w http.ResponseWriter, r *http.Request) { + rh := strings.TrimSpace(r.Header.Get("Range")) + data := readData + var from, to int + if rh == "" { + from = 0 + to = len(data) + } else { + // assume "bytes=N-" or "bytes=N-M" + var err error + i := strings.IndexRune(rh, '=') + j := strings.IndexRune(rh, '-') + from, err = strconv.Atoi(rh[i+1 : j]) + if err != nil { + w.WriteHeader(500) + return + } + to = len(data) + if j+1 < len(rh) { + to, err = strconv.Atoi(rh[j+1:]) + if err != nil { + w.WriteHeader(500) + return + } + to++ // Range header is inclusive, Go slice is exclusive + } + if from >= len(data) && to != from { + w.WriteHeader(416) + return + } + if from > len(data) { + from = len(data) + } + if to > len(data) { + to = len(data) + } + } + data = data[from:to] + if data != readData { + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", from, to-1, len(readData))) + w.WriteHeader(http.StatusPartialContent) + } + if _, err := w.Write([]byte(data)); err != nil { + panic(err) + } +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 0000000..c6a2704 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,1067 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "cloud.google.com/go/internal/trace" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/version" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +var ( + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const userAgent = "gcloud-golang-storage/20151204" + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogHeader) +} + +// Client is a client for interacting with Google Cloud Storage. +// +// Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +type Client struct { + hc *http.Client + raw *raw.Service +} + +// NewClient creates a new Google Cloud Storage client. +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), + } + opts = append(o, opts...) + hc, ep, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + rawService, err := raw.New(hc) + if err != nil { + return nil, fmt.Errorf("storage client: %v", err) + } + if ep != "" { + rawService.BasePath = ep + } + return &Client{ + hc: hc, + raw: rawService, + }, nil +} + +// Close closes the Client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + // Set fields to nil so that subsequent uses + // will panic. + c.hc = nil + c.raw = nil + return nil +} + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. + // If your application is running on Google App Engine, you can use appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extension headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 string +} + +var ( + canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`) + excludedCanonicalHeaders = map[string]bool{ + "x-goog-encryption-key": true, + "x-goog-encryption-key-sha256": true, + } +) + +// sanitizeHeaders applies the specifications for canonical extension headers at +// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +func sanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + // Only keep canonical headers, discard any others. + headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader) + if len(headerMatches) == 0 { + continue + } + + header := strings.ToLower(strings.TrimSpace(headerMatches[1])) + if excludedCanonicalHeaders[headerMatches[1]] { + // Do not keep any deliberately excluded canonical headers when signing. + continue + } + value := strings.TrimSpace(headerMatches[2]) + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[header] = append(headerMap[header], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the + // header name from the header value or around the values + // themselves. The values should be separated by commas. + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases + // anyway and we should assume that there will be no + // canonical headers using empty values. Any such headers + // are discarded at the regexp stage above. + sanitizedHeaders = append( + sanitizedHeaders, + fmt.Sprintf("%s:%s", header, strings.Join(values, ",")), + ) + } + sort.Strings(sanitizedHeaders) + return sanitizedHeaders +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + if opts == nil { + return "", errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return "", errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Method == "" { + return "", errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return "", errors.New("storage: missing required expires option") + } + if opts.MD5 != "" { + md5, err := base64.StdEncoding.DecodeString(opts.MD5) + if err != nil || len(md5) != 16 { + return "", errors.New("storage: invalid MD5 checksum") + } + } + opts.Headers = sanitizeHeaders(opts.Headers) + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + if len(opts.Headers) > 0 { + fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) + } + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = "storage.googleapis.com" + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key + userProject string // for requester-pays buckets + readCompressed bool // Accept-Encoding: gzip +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return &o.acl +} + +// Generation returns a new ObjectHandle that operates on a specific generation +// of the object. +// By default, the handle operates on the latest generation. Not +// all operations work when given a specific generation; check the API +// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. +func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { + o2 := *o + o2.gen = gen + return &o2 +} + +// If returns a new ObjectHandle that applies a set of preconditions. +// Preconditions already set on the ObjectHandle are ignored. +// Operations on the new handle will only occur if the preconditions are +// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions +// for more details. +func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { + o2 := *o + o2.conds = &conds + return &o2 +} + +// Key returns a new ObjectHandle that uses the supplied encryption +// key to encrypt and decrypt the object's contents. +// +// Encryption key must be a 32-byte AES-256 key. +// See https://cloud.google.com/storage/docs/encryption for details. +func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { + o2 := *o + o2.encryptionKey = encryptionKey + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Update updates an object with the provided attributes. +// All zero-value attributes are ignored. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(o.bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// ObjectAttrsToUpdate is used to update the attributes of an object. +// Only fields set to non-nil values will be updated. +// Set a field to its zero value to delete it. +// +// For example, to change ContentType and delete ContentEncoding and +// Metadata, use +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } +type ObjectAttrsToUpdate struct { + ContentType optional.String + ContentLanguage optional.String + ContentEncoding optional.String + ContentDisposition optional.String + CacheControl optional.String + Metadata map[string]string // set to map[string]string{} to delete + ACL []ACLRule +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if err := o.validate(); err != nil { + return err + } + call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) + if err := applyConds("Delete", o.gen, o.conds, call); err != nil { + return err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + // Encryption doesn't apply to Delete. + setClientHeader(call.Header()) + err := runWithRetry(ctx, func() error { return call.Do() }) + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err +} + +// ReadCompressed when true causes the read to happen without decompressing. +func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { + o2 := *o + o2.readCompressed = compressed + return &o2 +} + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + ChunkSize: googleapi.DefaultUploadChunkSize, + } +} + +func (o *ObjectHandle) validate() error { + if o.bucket == "" { + return errors.New("storage: bucket name is empty") + } + if o.object == "" { + return errors.New("storage: object name is empty") + } + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + return nil +} + +// parseKey converts the binary contents of a private key file to an +// *rsa.PrivateKey. It detects whether the private key is in a PEM container or +// not. If so, it extracts the private key from PEM container before +// conversion. It only supports PEM containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { + var acl []*raw.ObjectAccessControl + if len(oldACL) > 0 { + acl = make([]*raw.ObjectAccessControl, len(oldACL)) + for i, rule := range oldACL { + acl[i] = &raw.ObjectAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + return acl +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { + acl := toRawObjectACL(o.ACL) + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: acl, + Metadata: o.Metadata, + } +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only, + // except when used from a Writer. If set on a Writer, the uploaded + // data is rejected if its MD5 hash does not match this field. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. This field is read-only, except when + // used from a Writer. If set on a Writer and Writer.SendCRC32C + // is true, the uploaded data is rejected if its CRC32c hash does not + // match this field. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // Metageneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + Metageneration int64 + + // StorageClass is the storage class of the object. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" + // and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" + // or "REGIONAL" depending on the bucket's location settings. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the + // customer-supplied encryption key for the object. It is empty if there is + // no customer-supplied encryption key. + // See // https://cloud.google.com/storage/docs/encryption for more about + // encryption in Google Cloud Storage. + CustomerKeySHA256 string + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + acl := make([]ACLRule, len(o.Acl)) + for i, rule := range o.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + crc32c, _ := decodeUint32(o.Crc32c) + var sha256 string + if o.CustomerEncryption != nil { + sha256 = o.CustomerEncryption.KeySha256 + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ACL: acl, + Owner: owner, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + CustomerKeySHA256: sha256, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + } +} + +// Decode a uint32 encoded in Base64 in big-endian byte order. +func decodeUint32(b64 string) (uint32, error) { + d, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return 0, err + } + if len(d) != 4 { + return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) + } + return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil +} + +// Encode a uint32 as Base64 in big-endian byte order. +func encodeUint32(u uint32) string { + b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} + return base64.StdEncoding.EncodeToString(b) +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool +} + +// contentTyper implements ContentTyper to enable an +// io.ReadCloser to specify its MIME type. +type contentTyper struct { + io.Reader + t string +} + +func (c *contentTyper) ContentType() string { + return c.t +} + +// Conditions constrain methods to act on specific generations of +// objects. +// +// The zero value is an empty set of constraints. Not all conditions or +// combinations of conditions are applicable to all methods. +// See https://cloud.google.com/storage/docs/generations-preconditions +// for details on how these operate. +type Conditions struct { + // Generation constraints. + // At most one of the following can be set to a non-zero value. + + // GenerationMatch specifies that the object must have the given generation + // for the operation to occur. + // If GenerationMatch is zero, it has no effect. + // Use DoesNotExist to specify that the object does not exist in the bucket. + GenerationMatch int64 + + // GenerationNotMatch specifies that the object must not have the given + // generation for the operation to occur. + // If GenerationNotMatch is zero, it has no effect. + GenerationNotMatch int64 + + // DoesNotExist specifies that the object must not exist in the bucket for + // the operation to occur. + // If DoesNotExist is false, it has no effect. + DoesNotExist bool + + // Metadata generation constraints. + // At most one of the following can be set to a non-zero value. + + // MetagenerationMatch specifies that the object must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the object must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *Conditions) validate(method string) error { + if *c == (Conditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if !c.isGenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) + } + if !c.isMetagenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +func (c *Conditions) isGenerationValid() bool { + n := 0 + if c.GenerationMatch != 0 { + n++ + } + if c.GenerationNotMatch != 0 { + n++ + } + if c.DoesNotExist { + n++ + } + return n <= 1 +} + +func (c *Conditions) isMetagenerationValid() bool { + return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { + cval := reflect.ValueOf(call) + if gen >= 0 { + if !setConditionField(cval, "Generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionField(cval, "IfGenerationMatch", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) + } + return nil +} + +// setConditionField sets a field on a *raw.WhateverCall. +// We can't use anonymous interfaces because the return type is +// different, since the field setters are builders. +func setConditionField(call reflect.Value, name string, value interface{}) bool { + m := call.MethodByName(name) + if !m.IsValid() { + return false + } + m.Call([]reflect.Value{reflect.ValueOf(value)}) + return true +} + +// conditionsQuery returns the generation and conditions as a URL query +// string suitable for URL.RawQuery. It assumes that the conditions +// have been validated. +func conditionsQuery(gen int64, conds *Conditions) string { + // URL escapes are elided because integer strings are URL-safe. + var buf []byte + + appendParam := func(s string, n int64) { + if len(buf) > 0 { + buf = append(buf, '&') + } + buf = append(buf, s...) + buf = strconv.AppendInt(buf, n, 10) + } + + if gen >= 0 { + appendParam("generation=", gen) + } + if conds == nil { + return string(buf) + } + switch { + case conds.GenerationMatch != 0: + appendParam("ifGenerationMatch=", conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) + case conds.DoesNotExist: + appendParam("ifGenerationMatch=", 0) + } + switch { + case conds.MetagenerationMatch != 0: + appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) + } + return string(buf) +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { + if key == nil { + return nil + } + // TODO(jbd): Ask the API team to return a more user-friendly error + // and avoid doing this check at the client level. + if len(key) != 32 { + return errors.New("storage: not a 32-byte AES-256 key") + } + var cs string + if copySource { + cs = "copy-source-" + } + headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) + keyHash := sha256.Sum256(key) + headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) + return nil +} + +// TODO(jbd): Add storage.objects.watch. diff --git a/vendor/cloud.google.com/go/storage/storage_test.go b/vendor/cloud.google.com/go/storage/storage_test.go new file mode 100644 index 0000000..23622a0 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage_test.go @@ -0,0 +1,912 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "regexp" + "strings" + "testing" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + raw "google.golang.org/api/storage/v1" +) + +func TestHeaderSanitization(t *testing.T) { + t.Parallel() + var tests = []struct { + desc string + in []string + want []string + }{ + { + desc: "already sanitized headers should not be modified", + in: []string{"x-goog-header1:true", "x-goog-header2:0"}, + want: []string{"x-goog-header1:true", "x-goog-header2:0"}, + }, + { + desc: "sanitized headers should be sorted", + in: []string{"x-goog-header2:0", "x-goog-header1:true"}, + want: []string{"x-goog-header1:true", "x-goog-header2:0"}, + }, + { + desc: "non-canonical headers should be removed", + in: []string{"x-goog-header1:true", "x-goog-no-value", "non-canonical-header:not-of-use"}, + want: []string{"x-goog-header1:true"}, + }, + { + desc: "excluded canonical headers should be removed", + in: []string{"x-goog-header1:true", "x-goog-encryption-key:my_key", "x-goog-encryption-key-sha256:my_sha256"}, + want: []string{"x-goog-header1:true"}, + }, + { + desc: "dirty headers should be formatted correctly", + in: []string{" x-goog-header1 : \textra-spaces ", "X-Goog-Header2:CamelCaseValue"}, + want: []string{"x-goog-header1:extra-spaces", "x-goog-header2:CamelCaseValue"}, + }, + { + desc: "duplicate headers should be merged", + in: []string{"x-goog-header1:value1", "X-Goog-Header1:value2"}, + want: []string{"x-goog-header1:value1,value2"}, + }, + } + for _, test := range tests { + got := sanitizeHeaders(test.in) + if !testutil.Equal(got, test.want) { + t.Errorf("%s: got %v, want %v", test.desc, got, test.want) + } + } +} + +func TestSignedURL(t *testing.T) { + t.Parallel() + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-goog-header1:true", "x-goog-header2:false"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "RfsHlPtbB2JUYjzCgNr2Mi%2BjggdEuL1V7E6N9o6aaqwVLBDuTv3I0%2B9" + + "x94E6rmmr%2FVgnmZigkIUxX%2Blfl7LgKf30uPGLt0mjKGH2p7r9ey1ONJ" + + "%2BhVec23FnTRcSgopglvHPuCMWU2oNJE%2F1y8EwWE27baHrG1RhRHbLVF" + + "bPpLZ9xTRFK20pluIkfHV00JGljB1imqQHXM%2B2XPWqBngLr%2FwqxLN7i" + + "FcUiqR8xQEOHF%2F2e7fbkTHPNq4TazaLZ8X0eZ3eFdJ55A5QmNi8atlN4W" + + "5q7Hvs0jcxElG3yqIbx439A995BkspLiAcA%2Fo4%2BxAwEMkGLICdbvakq" + + "3eEprNCojw%3D%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_PEMPrivateKey(t *testing.T) { + t.Parallel() + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-goog-header1:true", "x-goog-header2:false"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "TiyKD%2FgGb6Kh0kkb2iF%2FfF%2BnTx7L0J4YiZua8AcTmnidutePEGIU5" + + "NULYlrGl6l52gz4zqFb3VFfIRTcPXMdXnnFdMCDhz2QuJBUpsU1Ai9zlyTQ" + + "dkb6ShG03xz9%2BEXWAUQO4GBybJw%2FULASuv37xA00SwLdkqj8YdyS5II" + + "1lro%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_SignBytes(t *testing.T) { + t.Parallel() + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + SignBytes: func(b []byte) ([]byte, error) { + return []byte("signed"), nil + }, + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-goog-header1:true", "x-goog-header2:false"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "c2lnbmVk" // base64('signed') == 'c2lnbmVk' + if url != want { + t.Fatalf("Unexpected signed URL\ngot: %q\nwant: %q", url, want) + } +} + +func TestSignedURL_URLUnsafeObjectName(t *testing.T) { + t.Parallel() + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object name界", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-goog-header1:true", "x-goog-header2:false"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object%20name%E7%95%8C?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=bxVH1%2Bl%2" + + "BSxpnj3XuqKz6mOFk6M94Y%2B4w85J6FCmJan%2FNhGSpndP6fAw1uLHlOn%2F8xUaY%2F" + + "SfZ5GzcQ%2BbxOL1WA37yIwZ7xgLYlO%2ByAi3GuqMUmHZiNCai28emODXQ8RtWHvgv6dE" + + "SQ%2F0KpDMIWW7rYCaUa63UkUyeSQsKhrVqkIA%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_MissingOptions(t *testing.T) { + t.Parallel() + pk := dummyKey("rsa") + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + var tests = []struct { + opts *SignedURLOptions + errMsg string + }{ + { + &SignedURLOptions{}, + "missing required GoogleAccessID", + }, + { + &SignedURLOptions{GoogleAccessID: "access_id"}, + "exactly one of PrivateKey or SignedBytes must be set", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + SignBytes: func(b []byte) ([]byte, error) { return b, nil }, + PrivateKey: pk, + }, + "exactly one of PrivateKey or SignedBytes must be set", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + SignBytes: func(b []byte) ([]byte, error) { return b, nil }, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + Method: "PUT", + }, + "missing required expires", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + Method: "PUT", + Expires: expires, + MD5: "invalid", + }, + "invalid MD5 checksum", + }, + } + for _, test := range tests { + _, err := SignedURL("bucket", "name", test.opts) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected err: %v, found: %v", test.errMsg, err) + } + } +} + +func dummyKey(kind string) []byte { + slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind)) + if err != nil { + log.Fatal(err) + } + return slurp +} + +func TestCopyToMissingFields(t *testing.T) { + t.Parallel() + var tests = []struct { + srcBucket, srcName, destBucket, destName string + errMsg string + }{ + { + "mybucket", "", "mybucket", "destname", + "name is empty", + }, + { + "mybucket", "srcname", "mybucket", "", + "name is empty", + }, + { + "", "srcfile", "mybucket", "destname", + "name is empty", + }, + { + "mybucket", "srcfile", "", "destname", + "name is empty", + }, + } + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}})) + if err != nil { + panic(err) + } + for i, test := range tests { + src := client.Bucket(test.srcBucket).Object(test.srcName) + dst := client.Bucket(test.destBucket).Object(test.destName) + _, err := dst.CopierFrom(src).Run(ctx) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("CopyTo test #%v:\ngot err %q\nwant err %q", i, err, test.errMsg) + } + } +} + +func TestObjectNames(t *testing.T) { + t.Parallel() + // Naming requirements: https://cloud.google.com/storage/docs/bucket-naming + const maxLegalLength = 1024 + + type testT struct { + name, want string + } + tests := []testT{ + // Embedded characters important in URLs. + {"foo % bar", "foo%20%25%20bar"}, + {"foo ? bar", "foo%20%3F%20bar"}, + {"foo / bar", "foo%20/%20bar"}, + {"foo %?/ bar", "foo%20%25%3F/%20bar"}, + + // Non-Roman scripts + {"타코", "%ED%83%80%EC%BD%94"}, + {"世界", "%E4%B8%96%E7%95%8C"}, + + // Longest legal name + {strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)}, + + // Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode + {"foo \u000b bar", "foo%20%0B%20bar"}, + {"foo \u000c bar", "foo%20%0C%20bar"}, + {"foo \u0085 bar", "foo%20%C2%85%20bar"}, + {"foo \u2028 bar", "foo%20%E2%80%A8%20bar"}, + {"foo \u2029 bar", "foo%20%E2%80%A9%20bar"}, + + // Null byte. + {"foo \u0000 bar", "foo%20%00%20bar"}, + + // Non-control characters that are discouraged, but not forbidden, according to the documentation. + {"foo # bar", "foo%20%23%20bar"}, + {"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"}, + + // Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/ + {"foo \u212b bar", "foo%20%E2%84%AB%20bar"}, + {"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"}, + {"foo \u00c5 bar", "foo%20%C3%85%20bar"}, + + // Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10) + {"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"}, + {"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"}, + {"foo \uac00 bar", "foo%20%EA%B0%80%20bar"}, + } + + // C0 control characters not forbidden by the docs. + var runes []rune + for r := rune(0x01); r <= rune(0x1f); r++ { + if r != '\u000a' && r != '\u000d' { + runes = append(runes, r) + } + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"}) + + // C1 control characters, plus DEL. + runes = nil + for r := rune(0x7f); r <= rune(0x9f); r++ { + runes = append(runes, r) + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"}) + + opts := &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC), + ContentType: "application/json", + Headers: []string{"x-goog-header1", "x-goog-header2"}, + } + + for _, test := range tests { + g, err := SignedURL("bucket-name", test.name, opts) + if err != nil { + t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err) + } + if w := "/bucket-name/" + test.want; !strings.Contains(g, w) { + t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w) + } + } +} + +func TestCondition(t *testing.T) { + t.Parallel() + gotReq := make(chan *http.Request, 1) + hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + gotReq <- r + w.WriteHeader(200) + }) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + + obj := c.Bucket("buck").Object("obj") + dst := c.Bucket("dstbuck").Object("dst") + tests := []struct { + fn func() + want string + }{ + { + func() { obj.Generation(1234).NewReader(ctx) }, + "GET /buck/obj?generation=1234", + }, + { + func() { obj.If(Conditions{GenerationMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifGenerationMatch=1234", + }, + { + func() { obj.If(Conditions{GenerationNotMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifGenerationNotMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifMetagenerationMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifMetagenerationNotMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).Attrs(ctx) }, + "GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full", + }, + + { + func() { obj.If(Conditions{MetagenerationMatch: 1234}).Update(ctx, ObjectAttrsToUpdate{}) }, + "PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full", + }, + { + func() { obj.Generation(1234).Delete(ctx) }, + "DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234", + }, + { + func() { + w := obj.If(Conditions{GenerationMatch: 1234}).NewWriter(ctx) + w.ContentType = "text/plain" + w.Close() + }, + "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart", + }, + { + func() { + w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx) + w.ContentType = "text/plain" + w.Close() + }, + "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=0&projection=full&uploadType=multipart", + }, + { + func() { + dst.If(Conditions{MetagenerationMatch: 5678}).CopierFrom(obj.If(Conditions{GenerationMatch: 1234})).Run(ctx) + }, + "POST /storage/v1/b/buck/o/obj/rewriteTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full", + }, + } + + for i, tt := range tests { + tt.fn() + select { + case r := <-gotReq: + got := r.Method + " " + r.RequestURI + if got != tt.want { + t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want) + } + case <-time.After(5 * time.Second): + t.Fatalf("%d. timeout", i) + } + if err != nil { + t.Fatal(err) + } + } + + // Test an error, too: + err = obj.Generation(1234).NewWriter(ctx).Close() + if err == nil || !strings.Contains(err.Error(), "NewWriter: generation not supported") { + t.Errorf("want error about unsupported generation; got %v", err) + } +} + +func TestConditionErrors(t *testing.T) { + t.Parallel() + for _, conds := range []Conditions{ + {GenerationMatch: 0}, + {DoesNotExist: false}, // same as above, actually + {GenerationMatch: 1, GenerationNotMatch: 2}, + {GenerationNotMatch: 2, DoesNotExist: true}, + {MetagenerationMatch: 1, MetagenerationNotMatch: 2}, + } { + if err := conds.validate(""); err == nil { + t.Errorf("%+v: got nil, want error", conds) + } + } +} + +// Test object compose. +func TestObjectCompose(t *testing.T) { + t.Parallel() + gotURL := make(chan string, 1) + gotBody := make(chan []byte, 1) + hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + body, _ := ioutil.ReadAll(r.Body) + gotURL <- r.URL.String() + gotBody <- body + w.Write([]byte("{}")) + }) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + desc string + dst *ObjectHandle + srcs []*ObjectHandle + attrs *ObjectAttrs + wantReq raw.ComposeRequest + wantURL string + wantErr bool + }{ + { + desc: "basic case", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + c.Bucket("foo").Object("quux"), + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{Bucket: "foo"}, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + {Name: "baz"}, + {Name: "quux"}, + }, + }, + }, + { + desc: "with object attrs", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + c.Bucket("foo").Object("quux"), + }, + attrs: &ObjectAttrs{ + Name: "not-bar", + ContentType: "application/json", + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{ + Bucket: "foo", + Name: "not-bar", + ContentType: "application/json", + }, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + {Name: "baz"}, + {Name: "quux"}, + }, + }, + }, + { + desc: "with conditions", + dst: c.Bucket("foo").Object("bar").If(Conditions{ + GenerationMatch: 12, + MetagenerationMatch: 34, + }), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz").Generation(56), + c.Bucket("foo").Object("quux").If(Conditions{GenerationMatch: 78}), + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{Bucket: "foo"}, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + { + Name: "baz", + Generation: 56, + }, + { + Name: "quux", + ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: 78, + }, + }, + }, + }, + }, + { + desc: "no sources", + dst: c.Bucket("foo").Object("bar"), + wantErr: true, + }, + { + desc: "destination, no bucket", + dst: c.Bucket("").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "destination, no object", + dst: c.Bucket("foo").Object(""), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, different bucket", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("otherbucket").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, no object", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object(""), + }, + wantErr: true, + }, + { + desc: "destination, bad condition", + dst: c.Bucket("foo").Object("bar").Generation(12), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, bad condition", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz").If(Conditions{MetagenerationMatch: 12}), + }, + wantErr: true, + }, + } + + for _, tt := range testCases { + composer := tt.dst.ComposerFrom(tt.srcs...) + if tt.attrs != nil { + composer.ObjectAttrs = *tt.attrs + } + _, err := composer.Run(ctx) + if gotErr := err != nil; gotErr != tt.wantErr { + t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr) + continue + } + if tt.wantErr { + continue + } + url, body := <-gotURL, <-gotBody + if url != tt.wantURL { + t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, url, tt.wantURL) + } + var req raw.ComposeRequest + if err := json.Unmarshal(body, &req); err != nil { + t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body) + } + if !testutil.Equal(req, tt.wantReq) { + // Print to JSON. + wantReq, _ := json.Marshal(tt.wantReq) + t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq) + } + } +} + +// Test that ObjectIterator's Next and NextPage methods correctly terminate +// if there is nothing to iterate over. +func TestEmptyObjectIterator(t *testing.T) { + t.Parallel() + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + fmt.Fprintf(w, "{}") + }) + defer close() + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + it := client.Bucket("b").Objects(ctx, nil) + _, err = it.Next() + if err != iterator.Done { + t.Errorf("got %v, want Done", err) + } +} + +// Test that BucketIterator's Next method correctly terminates if there is +// nothing to iterate over. +func TestEmptyBucketIterator(t *testing.T) { + t.Parallel() + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + fmt.Fprintf(w, "{}") + }) + defer close() + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + it := client.Buckets(ctx, "project") + _, err = it.Next() + if err != iterator.Done { + t.Errorf("got %v, want Done", err) + } + +} + +func TestCodecUint32(t *testing.T) { + t.Parallel() + for _, u := range []uint32{0, 1, 256, 0xFFFFFFFF} { + s := encodeUint32(u) + d, err := decodeUint32(s) + if err != nil { + t.Fatal(err) + } + if d != u { + t.Errorf("got %d, want input %d", d, u) + } + } +} + +func TestBucketAttrs(t *testing.T) { + for _, c := range []struct { + attrs BucketAttrs + raw raw.Bucket + }{{ + attrs: BucketAttrs{ + Lifecycle: Lifecycle{ + Rules: []LifecycleRule{{ + Action: LifecycleAction{ + Type: SetStorageClassAction, + StorageClass: "NEARLINE", + }, + Condition: LifecycleCondition{ + AgeInDays: 10, + Liveness: Live, + CreatedBefore: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC), + MatchesStorageClasses: []string{"MULTI_REGIONAL", "REGIONAL", "STANDARD"}, + NumNewerVersions: 3, + }, + }, { + Action: LifecycleAction{ + Type: DeleteAction, + }, + Condition: LifecycleCondition{ + AgeInDays: 30, + Liveness: Live, + CreatedBefore: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC), + MatchesStorageClasses: []string{"NEARLINE"}, + NumNewerVersions: 10, + }, + }, { + Action: LifecycleAction{ + Type: DeleteAction, + }, + Condition: LifecycleCondition{ + Liveness: Archived, + }, + }}, + }, + }, + raw: raw.Bucket{ + Lifecycle: &raw.BucketLifecycle{ + Rule: []*raw.BucketLifecycleRule{{ + Action: &raw.BucketLifecycleRuleAction{ + Type: SetStorageClassAction, + StorageClass: "NEARLINE", + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: 10, + IsLive: googleapi.Bool(true), + CreatedBefore: "2017-01-02", + MatchesStorageClass: []string{"MULTI_REGIONAL", "REGIONAL", "STANDARD"}, + NumNewerVersions: 3, + }, + }, { + Action: &raw.BucketLifecycleRuleAction{ + Type: DeleteAction, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: 30, + IsLive: googleapi.Bool(true), + CreatedBefore: "2017-01-02", + MatchesStorageClass: []string{"NEARLINE"}, + NumNewerVersions: 10, + }, + }, { + Action: &raw.BucketLifecycleRuleAction{ + Type: DeleteAction, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + IsLive: googleapi.Bool(false), + }, + }}, + }, + }, + }} { + if got := c.attrs.toRawBucket(); !testutil.Equal(*got, c.raw) { + t.Errorf("toRawBucket: got %v, want %v", *got, c.raw) + } + } +} + +func TestUserProject(t *testing.T) { + // Verify that the userProject query param is sent. + t.Parallel() + ctx := context.Background() + gotURL := make(chan *url.URL, 1) + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + gotURL <- r.URL + if strings.Contains(r.URL.String(), "/rewriteTo/") { + res := &raw.RewriteResponse{Done: true} + bytes, err := res.MarshalJSON() + if err != nil { + t.Fatal(err) + } + w.Write(bytes) + } else { + fmt.Fprintf(w, "{}") + } + }) + defer close() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + + re := regexp.MustCompile(`\buserProject=p\b`) + b := client.Bucket("b").UserProject("p") + o := b.Object("o") + + check := func(msg string, f func()) { + f() + select { + case u := <-gotURL: + if !re.MatchString(u.RawQuery) { + t.Errorf("%s: query string %q does not contain userProject", msg, u.RawQuery) + } + case <-time.After(2 * time.Second): + t.Errorf("%s: timed out", msg) + } + } + + check("buckets.delete", func() { b.Delete(ctx) }) + check("buckets.get", func() { b.Attrs(ctx) }) + check("buckets.patch", func() { b.Update(ctx, BucketAttrsToUpdate{}) }) + check("storage.objects.compose", func() { o.ComposerFrom(b.Object("x")).Run(ctx) }) + check("storage.objects.delete", func() { o.Delete(ctx) }) + check("storage.objects.get", func() { o.Attrs(ctx) }) + check("storage.objects.insert", func() { o.NewWriter(ctx).Close() }) + check("storage.objects.list", func() { b.Objects(ctx, nil).Next() }) + check("storage.objects.patch", func() { o.Update(ctx, ObjectAttrsToUpdate{}) }) + check("storage.objects.rewrite", func() { o.CopierFrom(b.Object("x")).Run(ctx) }) + check("storage.objectAccessControls.list", func() { o.ACL().List(ctx) }) + check("storage.objectAccessControls.update", func() { o.ACL().Set(ctx, "", "") }) + check("storage.objectAccessControls.delete", func() { o.ACL().Delete(ctx, "") }) + check("storage.bucketAccessControls.list", func() { b.ACL().List(ctx) }) + check("storage.bucketAccessControls.update", func() { b.ACL().Set(ctx, "", "") }) + check("storage.bucketAccessControls.delete", func() { b.ACL().Delete(ctx, "") }) + check("storage.defaultObjectAccessControls.list", + func() { b.DefaultObjectACL().List(ctx) }) + check("storage.defaultObjectAccessControls.update", + func() { b.DefaultObjectACL().Set(ctx, "", "") }) + check("storage.defaultObjectAccessControls.delete", + func() { b.DefaultObjectACL().Delete(ctx, "") }) + check("buckets.getIamPolicy", func() { b.IAM().Policy(ctx) }) + check("buckets.setIamPolicy", func() { + p := &iam.Policy{} + p.Add("m", iam.Owner) + b.IAM().SetPolicy(ctx, p) + }) + check("buckets.testIamPermissions", func() { b.IAM().TestPermissions(ctx, nil) }) + check("storage.notifications.insert", func() { + b.AddNotification(ctx, &Notification{TopicProjectID: "p", TopicID: "t"}) + }) + check("storage.notifications.delete", func() { b.DeleteNotification(ctx, "n") }) + check("storage.notifications.list", func() { b.Notifications(ctx) }) +} + +func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) { + ts := httptest.NewTLSServer(http.HandlerFunc(handler)) + tlsConf := &tls.Config{InsecureSkipVerify: true} + tr := &http.Transport{ + TLSClientConfig: tlsConf, + DialTLS: func(netw, addr string) (net.Conn, error) { + return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf) + }, + } + return &http.Client{Transport: tr}, func() { + tr.CloseIdleConnections() + ts.Close() + } +} diff --git a/vendor/cloud.google.com/go/storage/testdata/dummy_pem b/vendor/cloud.google.com/go/storage/testdata/dummy_pem new file mode 100644 index 0000000..3428d44 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/testdata/dummy_pem @@ -0,0 +1,39 @@ +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +Key Attributes: +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQCtCWMoJ2Bok2QoGFyU7A6IlGprO9QfUTT0jNrLkIbM5OWNIuDx +64+PEaTS5g5m+2Hz/lmd5jJKanAH4dY9LZzsaYAPq1K17Gcmg1hEisYeKsgOcjYY +kwRkV+natCTsC+tfWmS0voRh0jA1rI1J4MikceoHtgWdEuoHrrptRVpWKwIDAQAB +AoGAKp3uQvx3vSnX+BwP6Um+RpsvHpwMoW3xue1bEdnVqW8SrlERz+NxZw40ZxDs +KSbuuBZD4iTI7BUM5JQVnNm4FQY1YrPlWZLyI73Bj8RKTXrPdJheM/0r7xjiIXbQ +7w4cUSM9rVugnI/rxF2kPIQTGYI+EG/6+P+k6VvgPmC0T/ECQQDUPskiS18WaY+i +Koalbrb3GakaBoHrC1b4ln4CAv7fq7H4WvFvqi/2rxLhHYq31iwxYy8s7J7Sba1+ +5vwJ2TxZAkEA0LVfs3Q2VWZ+cM3bv0aYTalMXg6wT+LoNvk9HnOb0zQYajF3qm4G +ZFdfEqvOkje0zQ4fcihARKyda/VY84UGIwJBAIZa0FvjNmgrnn7bSKzEbxHwrnkJ +EYjGfuGR8mY3mzvfpiM+/oLfSslvfhX+62cALq18yco4ZzlxsFgaxAU//NECQDcS +NN94YcHlGqYPW9W7/gI4EwOaoqFhwV6II71+SfbP/0U+KlJZV+xwNZEKrqZcdqPI +/zkzL8ovNha/laokRrsCQQCyoPHGcBWj+VFbNoyQnX4tghc6rOY7n4pmpgQvU825 +TAM9vnYtSkKK/V56kEDNBO5LwiRsir95IUNclqqMKR1C +-----END RSA PRIVATE KEY----- +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +subject=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +issuer=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +-----BEGIN CERTIFICATE----- +MIICXTCCAcagAwIBAgIIHxTMQUVJRZ0wDQYJKoZIhvcNAQEFBQAwVDFSMFAGA1UE +AxNJMTA3OTQzMjM1MDY1OS1udm9nMHZtbjlzNnBxcjNrcjR2MmF2YmM3bmtob2Ex +MS5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbTAeFw0xNDExMjQxODAwMDRaFw0y +NDExMjExODAwMDRaMFQxUjBQBgNVBAMTSTEwNzk0MzIzNTA2NTktbnZvZzB2bW45 +czZwcXIza3I0djJhdmJjN25raG9hMTEuYXBwcy5nb29nbGV1c2VyY29udGVudC5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK0JYygnYGiTZCgYXJTsDoiU +ams71B9RNPSM2suQhszk5Y0i4PHrj48RpNLmDmb7YfP+WZ3mMkpqcAfh1j0tnOxp +gA+rUrXsZyaDWESKxh4qyA5yNhiTBGRX6dq0JOwL619aZLS+hGHSMDWsjUngyKRx +6ge2BZ0S6geuum1FWlYrAgMBAAGjODA2MAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/ +BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4GB +ACVvKkZkomHq3uffOQwdZ4VJYuxrvDGnZu/ExW9WngO2teEsjxABL41TNnRYHN5T +lMC19poFA2tR/DySDLJ2XNs/hSvyQUL6HHCncVdR4Srpie88j48peY1MZSMP51Jv +qagbbP5K5DSEu02/zZaV0kaCvLEN0KAtj/noDuOOnQU2 +-----END CERTIFICATE----- diff --git a/vendor/cloud.google.com/go/storage/testdata/dummy_rsa b/vendor/cloud.google.com/go/storage/testdata/dummy_rsa new file mode 100644 index 0000000..4ce6678 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/testdata/dummy_rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 0000000..21d146f --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,218 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + "unicode/utf8" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Writer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data written does not match the checksum, + // the write will be rejected. + SendCRC32C bool + + // ChunkSize controls the maximum number of bytes of the object that the + // Writer will attempt to send to the server in a single request. Objects + // smaller than the size will be sent in a single request, while larger + // objects will be split over multiple requests. The size will be rounded up + // to the nearest multiple of 256K. If zero, chunking will be disabled and + // the object will be uploaded in a single request. + // + // ChunkSize will default to a reasonable value. If you perform many concurrent + // writes of small objects, you may wish set ChunkSize to a value that matches + // your objects' sizes to avoid consuming large amounts of memory. + // + // ChunkSize must be set before the first Write call. + ChunkSize int + + // ProgressFunc can be used to monitor the progress of a large write. + // operation. If ProgressFunc is not nil and writing requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), + // then ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far. + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(int64) + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + obj *ObjectAttrs + + mu sync.Mutex + err error +} + +func (w *Writer) open() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + pr, pw := io.Pipe() + w.pw = pw + w.opened = true + + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must be non-negative") + } + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(w.ChunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + + go func() { + defer close(w.donec) + + rawObj := attrs.toRawObject(w.o.bucket) + if w.SendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if w.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) + } + call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(w.ctx) + if w.ProgressFunc != nil { + call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) + } + if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { + w.mu.Lock() + w.err = err + w.mu.Unlock() + pr.CloseWithError(err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", w.o.gen, w.o.conds, call) + if err == nil { + if w.o.userProject != "" { + call.UserProject(w.o.userProject) + } + setClientHeader(call.Header()) + // If the chunk size is zero, then no chunking is done on the Reader, + // which means we cannot retry: the first call will read the data, and if + // it fails, there is no way to re-read. + if w.ChunkSize == 0 { + resp, err = call.Do() + } else { + // We will only retry here if the initial POST, which obtains a URI for + // the resumable upload, fails with a retryable error. The upload itself + // has its own retry logic. + err = runWithRetry(w.ctx, func() error { + var err2 error + resp, err2 = call.Do() + return err2 + }) + } + } + if err != nil { + w.mu.Lock() + w.err = err + w.mu.Unlock() + pr.CloseWithError(err) + return + } + w.obj = newObject(resp) + }() + return nil +} + +// Write appends to w. It implements the io.Writer interface. +// +// Since writes happen asynchronously, Write may return a nil +// error even though the write failed (or will fail). Always +// use the error returned from Writer.Close to determine if +// the upload was successful. +func (w *Writer) Write(p []byte) (n int, err error) { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + if werr != nil { + return 0, werr + } + if !w.opened { + if err := w.open(); err != nil { + return 0, err + } + } + return w.pw.Write(p) +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *Writer) Close() error { + if !w.opened { + if err := w.open(); err != nil { + return err + } + } + if err := w.pw.Close(); err != nil { + return err + } + <-w.donec + w.mu.Lock() + defer w.mu.Unlock() + return w.err +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +// +// Deprecated: cancel the context passed to NewWriter instead. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Attrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} diff --git a/vendor/cloud.google.com/go/storage/writer_test.go b/vendor/cloud.google.com/go/storage/writer_test.go new file mode 100644 index 0000000..c1f7380 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer_test.go @@ -0,0 +1,174 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + + "google.golang.org/api/googleapi" + "google.golang.org/api/option" +) + +type fakeTransport struct { + gotReq *http.Request + gotBody []byte + results []transportResult +} + +type transportResult struct { + res *http.Response + err error +} + +func (t *fakeTransport) addResult(res *http.Response, err error) { + t.results = append(t.results, transportResult{res, err}) +} + +func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + t.gotReq = req + t.gotBody = nil + if req.Body != nil { + bytes, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + t.gotBody = bytes + } + if len(t.results) == 0 { + return nil, fmt.Errorf("error handling request") + } + result := t.results[0] + t.results = t.results[1:] + return result.res, result.err +} + +func TestErrorOnObjectsInsertCall(t *testing.T) { + t.Parallel() + ctx := context.Background() + const contents = "hello world" + + doWrite := func(hc *http.Client) *Writer { + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatalf("error when creating client: %v", err) + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.ContentType = "text/plain" + + // We can't check that the Write fails, since it depends on the write to the + // underling fakeTransport failing which is racy. + wc.Write([]byte(contents)) + return wc + } + + wc := doWrite(&http.Client{Transport: &fakeTransport{}}) + // Close must always return an error though since it waits for the transport to + // have closed. + if err := wc.Close(); err == nil { + t.Errorf("expected error on close, got nil") + } + + // Retry on 5xx + ft := &fakeTransport{} + ft.addResult(&http.Response{ + StatusCode: 503, + Body: ioutil.NopCloser(&bytes.Buffer{}), + }, nil) + ft.addResult(&http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + }, nil) + wc = doWrite(&http.Client{Transport: ft}) + if err := wc.Close(); err != nil { + t.Errorf("got %v, want nil", err) + } + got := string(ft.gotBody) + if !strings.Contains(got, contents) { + t.Errorf("got body %q, which does not contain %q", got, contents) + } +} + +func TestEncryption(t *testing.T) { + t.Parallel() + ctx := context.Background() + ft := &fakeTransport{} + hc := &http.Client{Transport: ft} + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatalf("error when creating client: %v", err) + } + obj := client.Bucket("bucketname").Object("filename1") + key := []byte("secret-key-that-is-32-bytes-long") + wc := obj.Key(key).NewWriter(ctx) + // TODO(jba): use something other than fakeTransport, which always returns error. + wc.Write([]byte("hello world")) + wc.Close() + if got, want := ft.gotReq.Header.Get("x-goog-encryption-algorithm"), "AES256"; got != want { + t.Errorf("algorithm: got %q, want %q", got, want) + } + gotKey, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key")) + if err != nil { + t.Fatalf("decoding key: %v", err) + } + if !testutil.Equal(gotKey, key) { + t.Errorf("key: got %v, want %v", gotKey, key) + } + wantHash := sha256.Sum256(key) + gotHash, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key-sha256")) + if err != nil { + t.Fatalf("decoding hash: %v", err) + } + if !testutil.Equal(gotHash, wantHash[:]) { // wantHash is an array + t.Errorf("hash: got\n%v, want\n%v", gotHash, wantHash) + } +} + +// This test demonstrates the data race on Writer.err that can happen when the +// Writer's context is cancelled. To see the race, comment out the w.mu.Lock/Unlock +// lines in writer.go and run this test with -race. +func TestRaceOnCancel(t *testing.T) { + ctx := context.Background() + ft := &fakeTransport{} + hc := &http.Client{Transport: ft} + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatalf("error when creating client: %v", err) + } + + cctx, cancel := context.WithCancel(ctx) + w := client.Bucket("b").Object("o").NewWriter(cctx) + w.ChunkSize = googleapi.MinUploadChunkSize + buf := make([]byte, w.ChunkSize) + // This Write starts the goroutine in Writer.open. That reads the first chunk in its entirety + // before sending the request (see google.golang.org/api/gensupport.PrepareUpload), + // so to exhibit the race we must provide ChunkSize bytes. The goroutine then makes the RPC (L137). + w.Write(buf) + // Canceling the context causes the call to return context.Canceled, which makes the open goroutine + // write to w.err (L151). + cancel() + // This call to Write concurrently reads w.err (L169). + w.Write([]byte(nil)) +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/ListTraces_smoke_test.go b/vendor/cloud.google.com/go/trace/apiv1/ListTraces_smoke_test.go new file mode 100644 index 0000000..1619ca1 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/ListTraces_smoke_test.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestTraceServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var projectId2 string = projectId + var request = &cloudtracepb.ListTracesRequest{ + ProjectId: projectId2, + } + + iter := c.ListTraces(ctx, request) + if _, err := iter.Next(); err != nil && err != iterator.Done { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/doc.go b/vendor/cloud.google.com/go/trace/apiv1/doc.go new file mode 100644 index 0000000..29024af --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/doc.go @@ -0,0 +1,54 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package trace is an auto-generated package for the +// Stackdriver Trace API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Sends application trace data to Stackdriver Trace for viewing. Trace data +// is +// collected for all App Engine applications by default. Trace data from +// other +// applications can be provided using this API. +// +// Use the client at cloud.google.com/go/trace in preference to this. +package trace // import "cloud.google.com/go/trace/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/trace.readonly", + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/mock_test.go b/vendor/cloud.google.com/go/trace/apiv1/mock_test.go new file mode 100644 index 0000000..631281f --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/mock_test.go @@ -0,0 +1,321 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockTraceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + cloudtracepb.TraceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockTraceServer) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest) (*cloudtracepb.ListTracesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*cloudtracepb.ListTracesResponse), nil +} + +func (s *mockTraceServer) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest) (*cloudtracepb.Trace, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*cloudtracepb.Trace), nil +} + +func (s *mockTraceServer) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockTrace mockTraceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + cloudtracepb.RegisterTraceServiceServer(serv, &mockTrace) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestTraceServicePatchTraces(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var traces *cloudtracepb.Traces = &cloudtracepb.Traces{} + var request = &cloudtracepb.PatchTracesRequest{ + ProjectId: projectId, + Traces: traces, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.PatchTraces(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestTraceServicePatchTracesError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var traces *cloudtracepb.Traces = &cloudtracepb.Traces{} + var request = &cloudtracepb.PatchTracesRequest{ + ProjectId: projectId, + Traces: traces, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.PatchTraces(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestTraceServiceGetTrace(t *testing.T) { + var projectId2 string = "projectId2939242356" + var traceId2 string = "traceId2987826376" + var expectedResponse = &cloudtracepb.Trace{ + ProjectId: projectId2, + TraceId: traceId2, + } + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var traceId string = "traceId1270300245" + var request = &cloudtracepb.GetTraceRequest{ + ProjectId: projectId, + TraceId: traceId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTrace(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestTraceServiceGetTraceError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var traceId string = "traceId1270300245" + var request = &cloudtracepb.GetTraceRequest{ + ProjectId: projectId, + TraceId: traceId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTrace(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestTraceServiceListTraces(t *testing.T) { + var nextPageToken string = "" + var tracesElement *cloudtracepb.Trace = &cloudtracepb.Trace{} + var traces = []*cloudtracepb.Trace{tracesElement} + var expectedResponse = &cloudtracepb.ListTracesResponse{ + NextPageToken: nextPageToken, + Traces: traces, + } + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var request = &cloudtracepb.ListTracesRequest{ + ProjectId: projectId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTraces(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Traces[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestTraceServiceListTracesError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var request = &cloudtracepb.ListTracesRequest{ + ProjectId: projectId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTraces(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/trace_client.go b/vendor/cloud.google.com/go/trace/apiv1/trace_client.go new file mode 100644 index 0000000..cad9ab2 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/trace_client.go @@ -0,0 +1,235 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + PatchTraces []gax.CallOption + GetTrace []gax.CallOption + ListTraces []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudtrace.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + PatchTraces: retry[[2]string{"default", "idempotent"}], + GetTrace: retry[[2]string{"default", "idempotent"}], + ListTraces: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Trace API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client cloudtracepb.TraceServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new trace service client. +// +// This file describes an API for collecting and viewing traces and spans +// within a trace. A Trace is a collection of spans corresponding to a single +// operation or set of operations for an application. A span is an individual +// timed event which forms a node of the trace tree. Spans for a single trace +// may span multiple services. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: cloudtracepb.NewTraceServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// PatchTraces sends new traces to Stackdriver Trace or updates existing traces. If the ID +// of a trace that you send matches that of an existing trace, any fields +// in the existing trace and its spans are overwritten by the provided values, +// and any new fields provided are merged with the existing trace data. If the +// ID does not match, a new trace is created. +func (c *Client) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.PatchTraces[0:len(c.CallOptions.PatchTraces):len(c.CallOptions.PatchTraces)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.PatchTraces(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetTrace gets a single trace by its ID. +func (c *Client) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest, opts ...gax.CallOption) (*cloudtracepb.Trace, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetTrace[0:len(c.CallOptions.GetTrace):len(c.CallOptions.GetTrace)], opts...) + var resp *cloudtracepb.Trace + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetTrace(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTraces returns of a list of traces that match the specified filter conditions. +func (c *Client) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest, opts ...gax.CallOption) *TraceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTraces[0:len(c.CallOptions.ListTraces):len(c.CallOptions.ListTraces)], opts...) + it := &TraceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtracepb.Trace, string, error) { + var resp *cloudtracepb.ListTracesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListTraces(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Traces, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// TraceIterator manages a stream of *cloudtracepb.Trace. +type TraceIterator struct { + items []*cloudtracepb.Trace + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*cloudtracepb.Trace, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TraceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TraceIterator) Next() (*cloudtracepb.Trace, error) { + var item *cloudtracepb.Trace + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TraceIterator) bufLen() int { + return len(it.items) +} + +func (it *TraceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go b/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go new file mode 100644 index 0000000..2fe01b0 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go @@ -0,0 +1,92 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace_test + +import ( + "cloud.google.com/go/trace/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_PatchTraces() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.PatchTracesRequest{ + // TODO: Fill request struct fields. + } + err = c.PatchTraces(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_GetTrace() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.GetTraceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTrace(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListTraces() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.ListTracesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTraces(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/BatchWriteSpans_smoke_test.go b/vendor/cloud.google.com/go/trace/apiv2/BatchWriteSpans_smoke_test.go new file mode 100644 index 0000000..0c0c118 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/BatchWriteSpans_smoke_test.go @@ -0,0 +1,66 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestTraceServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var formattedName string = fmt.Sprintf("projects/%s", projectId) + var request = &cloudtracepb.BatchWriteSpansRequest{ + Name: formattedName, + } + + if err := c.BatchWriteSpans(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/doc.go b/vendor/cloud.google.com/go/trace/apiv2/doc.go new file mode 100644 index 0000000..2f7ffdc --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/doc.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package trace is an auto-generated package for the +// Stackdriver Trace API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Sends application trace data to Stackdriver Trace for viewing. Trace data +// is +// collected for all App Engine applications by default. Trace data from +// other +// applications can be provided using this API. +package trace // import "cloud.google.com/go/trace/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append", + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/mock_test.go b/vendor/cloud.google.com/go/trace/apiv2/mock_test.go new file mode 100644 index 0000000..c7fff5c --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/mock_test.go @@ -0,0 +1,252 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockTraceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + cloudtracepb.TraceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockTraceServer) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockTraceServer) CreateSpan(ctx context.Context, req *cloudtracepb.Span) (*cloudtracepb.Span, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*cloudtracepb.Span), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockTrace mockTraceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + cloudtracepb.RegisterTraceServiceServer(serv, &mockTrace) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestTraceServiceBatchWriteSpans(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var spans []*cloudtracepb.Span = nil + var request = &cloudtracepb.BatchWriteSpansRequest{ + Name: formattedName, + Spans: spans, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.BatchWriteSpans(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestTraceServiceBatchWriteSpansError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var spans []*cloudtracepb.Span = nil + var request = &cloudtracepb.BatchWriteSpansRequest{ + Name: formattedName, + Spans: spans, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.BatchWriteSpans(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestTraceServiceCreateSpan(t *testing.T) { + var name2 string = "name2-1052831874" + var spanId2 string = "spanId2-643891741" + var parentSpanId string = "parentSpanId-1757797477" + var expectedResponse = &cloudtracepb.Span{ + Name: name2, + SpanId: spanId2, + ParentSpanId: parentSpanId, + } + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/traces/%s/spans/%s", "[PROJECT]", "[TRACE]", "[SPAN]") + var spanId string = "spanId-2011840976" + var displayName *cloudtracepb.TruncatableString = &cloudtracepb.TruncatableString{} + var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var request = &cloudtracepb.Span{ + Name: formattedName, + SpanId: spanId, + DisplayName: displayName, + StartTime: startTime, + EndTime: endTime, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSpan(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestTraceServiceCreateSpanError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/traces/%s/spans/%s", "[PROJECT]", "[TRACE]", "[SPAN]") + var spanId string = "spanId-2011840976" + var displayName *cloudtracepb.TruncatableString = &cloudtracepb.TruncatableString{} + var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var request = &cloudtracepb.Span{ + Name: formattedName, + SpanId: spanId, + DisplayName: displayName, + StartTime: startTime, + EndTime: endTime, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSpan(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go new file mode 100644 index 0000000..80b8d40 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// ProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// SpanPath returns the path for the span resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/traces/%s/spans/%s", project, trace, span) +// instead. +func SpanPath(project, trace, span string) string { + return "" + + "projects/" + + project + + "/traces/" + + trace + + "/spans/" + + span + + "" +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go new file mode 100644 index 0000000..7ff39b1 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go @@ -0,0 +1,151 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + BatchWriteSpans []gax.CallOption + CreateSpan []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudtrace.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + BatchWriteSpans: retry[[2]string{"default", "non_idempotent"}], + CreateSpan: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Trace API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client cloudtracepb.TraceServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new trace service client. +// +// This file describes an API for collecting and viewing traces and spans +// within a trace. A Trace is a collection of spans corresponding to a single +// operation or set of operations for an application. A span is an individual +// timed event which forms a node of the trace tree. A single trace may +// contain span(s) from multiple services. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: cloudtracepb.NewTraceServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// BatchWriteSpans sends new spans to new or existing traces. You cannot update +// existing spans. +func (c *Client) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchWriteSpans[0:len(c.CallOptions.BatchWriteSpans):len(c.CallOptions.BatchWriteSpans)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.BatchWriteSpans(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateSpan creates a new span. +func (c *Client) CreateSpan(ctx context.Context, req *cloudtracepb.Span, opts ...gax.CallOption) (*cloudtracepb.Span, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSpan[0:len(c.CallOptions.CreateSpan):len(c.CallOptions.CreateSpan)], opts...) + var resp *cloudtracepb.Span + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateSpan(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client_example_test.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client_example_test.go new file mode 100644 index 0000000..d070ccf --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/trace_client_example_test.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace_test + +import ( + "cloud.google.com/go/trace/apiv2" + "golang.org/x/net/context" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_BatchWriteSpans() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.BatchWriteSpansRequest{ + // TODO: Fill request struct fields. + } + err = c.BatchWriteSpans(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateSpan() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.Span{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSpan(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/trace/grpc.go b/vendor/cloud.google.com/go/trace/grpc.go new file mode 100644 index 0000000..e78f4a2 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/grpc.go @@ -0,0 +1,108 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/hex" + "fmt" + + "cloud.google.com/go/internal/tracecontext" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const grpcMetadataKey = "grpc-trace-bin" + +// GRPCClientInterceptor returns a grpc.UnaryClientInterceptor that traces all outgoing requests from a gRPC client. +// The calling context should already have a *trace.Span; a child span will be +// created for the outgoing gRPC call. If the calling context doesn't have a span, +// the call will not be traced. If the client is nil, then the interceptor just +// passes through the request. +// +// The functionality in gRPC that this feature relies on is currently experimental. +func (c *Client) GRPCClientInterceptor() grpc.UnaryClientInterceptor { + if c == nil { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return invoker(ctx, method, req, reply, cc, opts...) + } + } + return grpc.UnaryClientInterceptor(c.grpcUnaryInterceptor) +} + +func (c *Client) grpcUnaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + // TODO: also intercept streams. + span := FromContext(ctx).NewChild(method) + if span == nil { + span = c.NewSpan(method) + } + defer span.Finish() + + traceContext := make([]byte, tracecontext.Len) + // traceID is a hex-encoded 128-bit value. + // TODO(jbd): Decode trace IDs upon arrival and + // represent trace IDs with 16 bytes internally. + tid, err := hex.DecodeString(span.trace.traceID) + if err != nil { + return invoker(ctx, method, req, reply, cc, opts...) + } + tracecontext.Encode(traceContext, tid, span.span.SpanId, byte(span.trace.globalOptions)) + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.Pairs(grpcMetadataKey, string(traceContext)) + } else { + md = md.Copy() // metadata is immutable, copy. + md[grpcMetadataKey] = []string{string(traceContext)} + } + ctx = metadata.NewOutgoingContext(ctx, md) + + err = invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + // TODO: standardize gRPC label names? + span.SetLabel("error", err.Error()) + } + return err +} + +// GRPCServerInterceptor returns a grpc.UnaryServerInterceptor that enables the tracing of the incoming +// gRPC calls. Incoming call's context can be used to extract the span on servers that enabled this option: +// +// span := trace.FromContext(ctx) +// +// If the client is nil, then the interceptor just invokes the handler. +// +// The functionality in gRPC that this feature relies on is currently experimental. +func (c *Client) GRPCServerInterceptor() grpc.UnaryServerInterceptor { + if c == nil { + return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return handler(ctx, req) + } + } + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + md, _ := metadata.FromIncomingContext(ctx) + var traceHeader string + if header, ok := md[grpcMetadataKey]; ok { + traceID, spanID, opts, ok := tracecontext.Decode([]byte(header[0])) + if ok { + // TODO(jbd): Generate a span directly from string(traceID), spanID and opts. + traceHeader = fmt.Sprintf("%x/%d;o=%d", traceID, spanID, opts) + } + } + span := c.SpanFromHeader(info.FullMethod, traceHeader) + defer span.Finish() + ctx = NewContext(ctx, span) + return handler(ctx, req) + } +} diff --git a/vendor/cloud.google.com/go/trace/grpc_test.go b/vendor/cloud.google.com/go/trace/grpc_test.go new file mode 100644 index 0000000..bf4733c --- /dev/null +++ b/vendor/cloud.google.com/go/trace/grpc_test.go @@ -0,0 +1,180 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "io/ioutil" + "log" + "net" + "net/http" + "strings" + "testing" + + pb "cloud.google.com/go/trace/testdata/helloworld" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +func TestGRPCInterceptors(t *testing.T) { + t.Skip("hangs forever for go < 1.9") + + tc := newTestClient(&noopTransport{}) + + // default sampling with global=1. + parent := tc.SpanFromHeader("parent", "7f27601f17b7a2873739efd18ff83872/123;o=1") + testGRPCInterceptor(t, tc, parent, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if got, want := in.TraceID(), out.TraceID(); got != want { + t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) + } + if !in.Traced() { + t.Errorf("incoming span is not traced; want traced") + } + }) + + // default sampling with global=0. + parent = tc.SpanFromHeader("parent", "7f27601f17b7a2873739efd18ff83872/123;o=0") + testGRPCInterceptor(t, tc, parent, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if got, want := in.TraceID(), out.TraceID(); got != want { + t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) + } + if in.Traced() { + t.Errorf("incoming span is traced; want not traced") + } + }) + + // sampling all with global=1. + all, _ := NewLimitedSampler(1.0, 1<<32) + tc.SetSamplingPolicy(all) + parent = tc.SpanFromHeader("parent", "7f27601f17b7a2873739efd18ff83872/123;o=1") + testGRPCInterceptor(t, tc, parent, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if got, want := in.TraceID(), out.TraceID(); got != want { + t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) + } + if !in.Traced() { + t.Errorf("incoming span is not traced; want traced") + } + }) + + // sampling none with global=1. + none, _ := NewLimitedSampler(0, 0) + tc.SetSamplingPolicy(none) + parent = tc.SpanFromHeader("parent", "7f27601f17b7a2873739efd18ff83872/123;o=1") + testGRPCInterceptor(t, tc, parent, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if got, want := in.TraceID(), out.TraceID(); got != want { + t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) + } + if in.Traced() { + t.Errorf("incoming span is traced; want not traced") + } + }) + + // sampling all with no parent span. + tc.SetSamplingPolicy(all) + testGRPCInterceptor(t, tc, nil, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if in.TraceID() == "" { + t.Errorf("incoming call TraceID is empty") + } + if !in.Traced() { + t.Errorf("incoming span is not traced; want traced") + } + }) + + // sampling none with no parent span. + tc.SetSamplingPolicy(none) + testGRPCInterceptor(t, tc, nil, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if in.TraceID() == "" { + t.Errorf("incoming call TraceID is empty") + } + if in.Traced() { + t.Errorf("incoming span is traced; want not traced") + } + }) +} + +func testGRPCInterceptor(t *testing.T, tc *Client, parent *Span, assert func(t *testing.T, out, in *Span)) { + incomingCh := make(chan *Span, 1) + addrCh := make(chan net.Addr, 1) + go func() { + lis, err := net.Listen("tcp", "") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + addrCh <- lis.Addr() + + s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) + pb.RegisterGreeterServer(s, &grpcServer{ + fn: func(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + incomingCh <- FromContext(ctx) + return &pb.HelloReply{}, nil + }, + }) + if err := s.Serve(lis); err != nil { + t.Fatalf("Failed to serve: %v", err) + } + }() + + addr := <-addrCh + conn, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + if err != nil { + t.Fatalf("Did not connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + outgoingCtx := NewContext(context.Background(), parent) + _, err = c.SayHello(outgoingCtx, &pb.HelloRequest{}) + if err != nil { + log.Fatalf("Could not SayHello: %v", err) + } + + assert(t, parent, <-incomingCh) +} + +type noopTransport struct{} + +func (rt *noopTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + } + return resp, nil +} + +type grpcServer struct { + fn func(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) +} + +func (s *grpcServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + return s.fn(ctx, in) +} diff --git a/vendor/cloud.google.com/go/trace/http.go b/vendor/cloud.google.com/go/trace/http.go new file mode 100644 index 0000000..290d139 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/http.go @@ -0,0 +1,107 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package trace + +import ( + "net/http" +) + +// Transport is an http.RoundTripper that traces the outgoing requests. +// +// Transport is safe for concurrent usage. +type Transport struct { + // Base is the base http.RoundTripper to be used to do the actual request. + // + // Optional. If nil, http.DefaultTransport is used. + Base http.RoundTripper +} + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t Transport) RoundTrip(req *http.Request) (*http.Response, error) { + span := FromContext(req.Context()).NewRemoteChild(req) + resp, err := t.base().RoundTrip(req) + + // TODO(jbd): Is it possible to defer the span.Finish? + // In cases where RoundTrip panics, we still can finish the span. + span.Finish(WithResponse(resp)) + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} + +func (t Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// HTTPHandler returns a http.Handler from the given handler +// that is aware of the incoming request's span. +// The span can be extracted from the incoming request in handler +// functions from incoming request's context: +// +// span := trace.FromContext(r.Context()) +// +// The span will be auto finished by the handler. +func (c *Client) HTTPHandler(h http.Handler) http.Handler { + if c == nil { + return h + } + return &handler{traceClient: c, handler: h} +} + +type handler struct { + traceClient *Client + handler http.Handler +} + +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + traceID, parentSpanID, options, optionsOk, ok := traceInfoFromHeader(r.Header.Get(httpHeader)) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: h.traceClient, + globalOptions: options, + localOptions: options, + } + span := startNewChildWithRequest(r, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, h.traceClient.policy, ok) + defer span.Finish() + + r = r.WithContext(NewContext(r.Context(), span)) + if ok && !optionsOk { + // Inject the trace context back to the response with the sampling options. + // TODO(jbd): Remove when there is a better way to report the client's sampling. + w.Header().Set(httpHeader, spanHeader(traceID, parentSpanID, span.trace.localOptions)) + } + h.handler.ServeHTTP(w, r) +} diff --git a/vendor/cloud.google.com/go/trace/http_test.go b/vendor/cloud.google.com/go/trace/http_test.go new file mode 100644 index 0000000..54c2e17 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/http_test.go @@ -0,0 +1,151 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package trace + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +type recorderTransport struct { + ch chan *http.Request +} + +func (rt *recorderTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt.ch <- req + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + } + return resp, nil +} + +func TestNewHTTPClient(t *testing.T) { + rt := &recorderTransport{ + ch: make(chan *http.Request, 1), + } + + tc := newTestClient(&noopTransport{}) + client := &http.Client{ + Transport: &Transport{ + Base: rt, + }, + } + req, _ := http.NewRequest("GET", "http://example.com", nil) + + t.Run("NoTrace", func(t *testing.T) { + _, err := client.Do(req) + if err != nil { + t.Error(err) + } + outgoing := <-rt.ch + if got, want := outgoing.Header.Get(httpHeader), ""; want != got { + t.Errorf("got trace header = %q; want none", got) + } + }) + + t.Run("Trace", func(t *testing.T) { + span := tc.NewSpan("/foo") + + req = req.WithContext(NewContext(req.Context(), span)) + _, err := client.Do(req) + if err != nil { + t.Error(err) + } + outgoing := <-rt.ch + + s := tc.SpanFromHeader("/foo", outgoing.Header.Get(httpHeader)) + if got, want := s.TraceID(), span.TraceID(); got != want { + t.Errorf("trace ID = %q; want %q", got, want) + } + }) +} + +func TestHTTPHandlerNoTrace(t *testing.T) { + tc := newTestClient(&noopTransport{}) + client := &http.Client{ + Transport: &Transport{}, + } + handler := tc.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + span := FromContext(r.Context()) + if span == nil { + t.Errorf("span is nil; want non-nil span") + } + })) + + ts := httptest.NewServer(handler) + defer ts.Close() + + req, _ := http.NewRequest("GET", ts.URL, nil) + _, err := client.Do(req) + if err != nil { + t.Fatal(err) + } +} + +func TestHTTPHandler_response(t *testing.T) { + tc := newTestClient(&noopTransport{}) + p, _ := NewLimitedSampler(1, 1<<32) // all + tc.SetSamplingPolicy(p) + handler := tc.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + ts := httptest.NewServer(handler) + defer ts.Close() + + tests := []struct { + name string + traceHeader string + wantTraceHeader string + }{ + { + name: "no global", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/123", + wantTraceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=1", + }, + { + name: "global=1", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=1", + wantTraceHeader: "", + }, + { + name: "global=0", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=0", + wantTraceHeader: "", + }, + { + name: "no trace context", + traceHeader: "", + wantTraceHeader: "", + }, + } + + for _, tt := range tests { + req, _ := http.NewRequest("GET", ts.URL, nil) + req.Header.Set(httpHeader, tt.traceHeader) + + res, err := http.DefaultClient.Do(req) + if err != nil { + t.Errorf("failed to request: %v", err) + } + if got, want := res.Header.Get(httpHeader), tt.wantTraceHeader; got != want { + t.Errorf("%v: response context header = %q; want %q", tt.name, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/trace/httpexample_test.go b/vendor/cloud.google.com/go/trace/httpexample_test.go new file mode 100644 index 0000000..4c29036 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/httpexample_test.go @@ -0,0 +1,57 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package trace_test + +import ( + "log" + "net/http" + + "cloud.google.com/go/trace" +) + +var traceClient *trace.Client + +func ExampleHTTPClient_Do() { + client := http.Client{ + Transport: &trace.Transport{}, + } + span := traceClient.NewSpan("/foo") // traceClient is a *trace.Client + + req, _ := http.NewRequest("GET", "https://metadata/users", nil) + req = req.WithContext(trace.NewContext(req.Context(), span)) + + if _, err := client.Do(req); err != nil { + log.Fatal(err) + } +} + +func ExampleClient_HTTPHandler() { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + client := http.Client{ + Transport: &trace.Transport{}, + } + + req, _ := http.NewRequest("GET", "https://metadata/users", nil) + req = req.WithContext(r.Context()) + + // The outgoing request will be traced with r's trace ID. + if _, err := client.Do(req); err != nil { + log.Fatal(err) + } + }) + http.Handle("/foo", traceClient.HTTPHandler(handler)) // traceClient is a *trace.Client +} diff --git a/vendor/cloud.google.com/go/trace/sampling.go b/vendor/cloud.google.com/go/trace/sampling.go new file mode 100644 index 0000000..d609290 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/sampling.go @@ -0,0 +1,117 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "time" + + "golang.org/x/time/rate" +) + +type SamplingPolicy interface { + // Sample returns a Decision. + // If Trace is false in the returned Decision, then the Decision should be + // the zero value. + Sample(p Parameters) Decision +} + +// Parameters contains the values passed to a SamplingPolicy's Sample method. +type Parameters struct { + HasTraceHeader bool // whether the incoming request has a valid X-Cloud-Trace-Context header. +} + +// Decision is the value returned by a call to a SamplingPolicy's Sample method. +type Decision struct { + Trace bool // Whether to trace the request. + Sample bool // Whether the trace is included in the random sample. + Policy string // Name of the sampling policy. + Weight float64 // Sample weight to be used in statistical calculations. +} + +type sampler struct { + fraction float64 + skipped float64 + *rate.Limiter + *rand.Rand + sync.Mutex +} + +func (s *sampler) Sample(p Parameters) Decision { + s.Lock() + x := s.Float64() + d := s.sample(p, time.Now(), x) + s.Unlock() + return d +} + +// sample contains the a deterministic, time-independent logic of Sample. +func (s *sampler) sample(p Parameters, now time.Time, x float64) (d Decision) { + d.Sample = x < s.fraction + d.Trace = p.HasTraceHeader || d.Sample + if !d.Trace { + // We have no reason to trace this request. + return Decision{} + } + // We test separately that the rate limit is not tiny before calling AllowN, + // because of overflow problems in x/time/rate. + if s.Limit() < 1e-9 || !s.AllowN(now, 1) { + // Rejected by the rate limit. + if d.Sample { + s.skipped++ + } + return Decision{} + } + if d.Sample { + d.Policy, d.Weight = "default", (1.0+s.skipped)/s.fraction + s.skipped = 0.0 + } + return +} + +// NewLimitedSampler returns a sampling policy that randomly samples a given +// fraction of requests. It also enforces a limit on the number of traces per +// second. It tries to trace every request with a trace header, but will not +// exceed the qps limit to do it. +func NewLimitedSampler(fraction, maxqps float64) (SamplingPolicy, error) { + if !(fraction >= 0) { + return nil, fmt.Errorf("invalid fraction %f", fraction) + } + if !(maxqps >= 0) { + return nil, fmt.Errorf("invalid maxqps %f", maxqps) + } + // Set a limit on the number of accumulated "tokens", to limit bursts of + // traced requests. Use one more than a second's worth of tokens, or 100, + // whichever is smaller. + // See https://godoc.org/golang.org/x/time/rate#NewLimiter. + maxTokens := 100 + if maxqps < 99.0 { + maxTokens = 1 + int(maxqps) + } + var seed int64 + if err := binary.Read(crand.Reader, binary.LittleEndian, &seed); err != nil { + seed = time.Now().UnixNano() + } + s := sampler{ + fraction: fraction, + Limiter: rate.NewLimiter(rate.Limit(maxqps), maxTokens), + Rand: rand.New(rand.NewSource(seed)), + } + return &s, nil +} diff --git a/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.pb.go b/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.pb.go new file mode 100644 index 0000000..5fb40e3 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.pb.go @@ -0,0 +1,161 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package helloworld is a generated protocol buffer package. + +It is generated from these files: + helloworld.proto + +It has these top-level messages: + HelloRequest + HelloReply +*/ +package helloworld + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The request message containing the user's name. +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} +func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// The response message containing the greetings +type HelloReply struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *HelloReply) Reset() { *m = HelloReply{} } +func (m *HelloReply) String() string { return proto.CompactTextString(m) } +func (*HelloReply) ProtoMessage() {} +func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") + proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Greeter service + +type GreeterClient interface { + // Sends a greeting + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Greeter service + +type GreeterServer interface { + // Sends a greeting + SayHello(context.Context, *HelloRequest) (*HelloReply, error) +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/SayHello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "helloworld.proto", +} + +func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 174 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, + 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, + 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, + 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, + 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, + 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, + 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, + 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x00, 0xad, 0x50, 0x62, 0x70, 0x32, 0xe0, 0x92, 0xce, 0xcc, 0xd7, + 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x4b, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0x46, 0x52, 0xeb, + 0xc4, 0x0f, 0x56, 0x1c, 0x0e, 0x62, 0x07, 0x80, 0xbc, 0x14, 0xc0, 0x98, 0xc4, 0x06, 0xf6, 0x9b, + 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, +} diff --git a/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.proto b/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.proto new file mode 100644 index 0000000..5be9f5b --- /dev/null +++ b/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.proto @@ -0,0 +1,37 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.helloworld"; +option java_outer_classname = "HelloWorldProto"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/vendor/cloud.google.com/go/trace/trace.go b/vendor/cloud.google.com/go/trace/trace.go new file mode 100644 index 0000000..df70202 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/trace.go @@ -0,0 +1,845 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package is OBSOLETE. See https://godoc.org/go.opencensus.io/trace; and use +// OpenCensus Stackdriver exporter, https://godoc.org/go.opencensus.io/exporter/stackdriver. +// +// Package trace is a Google Stackdriver Trace library. +// +// This package is still experimental and subject to change. +// See https://cloud.google.com/trace/api/#data_model for a discussion of traces +// and spans. +// +// To initialize a client that connects to the Stackdriver Trace server, use the +// NewClient function. Generally you will want to do this on program +// initialization. +// +// import "cloud.google.com/go/trace" +// ... +// traceClient, err = trace.NewClient(ctx, projectID) +// +// Calling SpanFromRequest will create a new trace span for an incoming HTTP +// request. If the request contains a trace context header, it is used to +// determine the trace ID. Otherwise, a new trace ID is created. +// +// func handler(w http.ResponseWriter, r *http.Request) { +// span := traceClient.SpanFromRequest(r) +// defer span.Finish() +// ... +// } +// +// SpanFromRequest and NewSpan returns nil if the *Client is nil, so you can disable +// tracing by not initializing your *Client variable. All of the exported +// functions on *Span do nothing when the *Span is nil. +// +// If you need to start traces that don't correspond to an incoming HTTP request, +// you can use NewSpan to create a root-level span. +// +// span := traceClient.NewSpan("span name") +// defer span.Finish() +// +// Although a trace span object is created for every request, only a subset of +// traces are uploaded to the server, for efficiency. By default, the requests +// that are traced are those with the tracing bit set in the options field of +// the trace context header. Ideally, you should override this behaviour by +// calling SetSamplingPolicy. NewLimitedSampler returns an implementation of +// SamplingPolicy which traces requests that have the tracing bit set, and also +// randomly traces a specified fraction of requests. Additionally, it sets a +// limit on the number of requests traced per second. The following example +// traces one in every thousand requests, up to a limit of 5 per second. +// +// p, err := trace.NewLimitedSampler(0.001, 5) +// traceClient.SetSamplingPolicy(p) +// +// You can create a new span as a child of an existing span with NewChild. +// +// childSpan := span.NewChild(name) +// ... +// childSpan.Finish() +// +// When sending an HTTP request to another server, NewRemoteChild will create +// a span to represent the time the current program waits for the request to +// complete, and attach a header to the outgoing request so that the trace will +// be propagated to the destination server. +// +// childSpan := span.NewRemoteChild(&httpRequest) +// ... +// childSpan.Finish() +// +// Alternatively, if you have access to the X-Cloud-Trace-Context header value +// but not the underlying HTTP request (this can happen if you are using a +// different transport or messaging protocol, such as gRPC), you can use +// SpanFromHeader instead of SpanFromRequest. In that case, you will need to +// specify the span name explicility, since it cannot be constructed from the +// HTTP request's URL and method. +// +// func handler(r *somepkg.Request) { +// span := traceClient.SpanFromHeader("span name", r.TraceContext()) +// defer span.Finish() +// ... +// } +// +// Spans can contain a map from keys to values that have useful information +// about the span. The elements of this map are called labels. Some labels, +// whose keys all begin with the string "trace.cloud.google.com/", are set +// automatically in the following ways: +// +// - SpanFromRequest sets some labels to data about the incoming request. +// +// - NewRemoteChild sets some labels to data about the outgoing request. +// +// - Finish sets a label to a stack trace, if the stack trace option is enabled +// in the incoming trace header. +// +// - The WithResponse option sets some labels to data about a response. +// You can also set labels using SetLabel. If a label is given a value +// automatically and by SetLabel, the automatically-set value is used. +// +// span.SetLabel(key, value) +// +// The WithResponse option can be used when Finish is called. +// +// childSpan := span.NewRemoteChild(outgoingReq) +// resp, err := http.DefaultClient.Do(outgoingReq) +// ... +// childSpan.Finish(trace.WithResponse(resp)) +// +// When a span created by SpanFromRequest or SpanFromHeader is finished, the +// finished spans in the corresponding trace -- the span itself and its +// descendants -- are uploaded to the Stackdriver Trace server using the +// *Client that created the span. Finish returns immediately, and uploading +// occurs asynchronously. You can use the FinishWait function instead to wait +// until uploading has finished. +// +// err := span.FinishWait() +// +// Using contexts to pass *trace.Span objects through your program will often +// be a better approach than passing them around explicitly. This allows trace +// spans, and other request-scoped or part-of-request-scoped values, to be +// easily passed through API boundaries. Various Google Cloud libraries will +// retrieve trace spans from contexts and automatically create child spans for +// API requests. +// See https://blog.golang.org/context for more discussion of contexts. +// A derived context containing a trace span can be created using NewContext. +// +// span := traceClient.SpanFromRequest(r) +// ctx = trace.NewContext(ctx, span) +// +// The span can be retrieved from a context elsewhere in the program using +// FromContext. +// +// func foo(ctx context.Context) { +// span := trace.FromContext(ctx).NewChild("in foo") +// defer span.Finish() +// ... +// } +// +package trace // import "cloud.google.com/go/trace" + +import ( + "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "log" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + api "google.golang.org/api/cloudtrace/v1" + "google.golang.org/api/gensupport" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + htransport "google.golang.org/api/transport/http" +) + +const ( + httpHeader = `X-Cloud-Trace-Context` + userAgent = `gcloud-golang-trace/20160501` + cloudPlatformScope = `https://www.googleapis.com/auth/cloud-platform` + spanKindClient = `RPC_CLIENT` + spanKindServer = `RPC_SERVER` + spanKindUnspecified = `SPAN_KIND_UNSPECIFIED` + maxStackFrames = 20 + labelAgent = `trace.cloud.google.com/agent` +) + +// Stackdriver Trace API predefined labels. +const ( + LabelComponent = `trace.cloud.google.com/component` + LabelErrorMessage = `trace.cloud.google.com/error/message` + LabelErrorName = `trace.cloud.google.com/error/name` + LabelHTTPClientCity = `trace.cloud.google.com/http/client_city` + LabelHTTPClientCountry = `trace.cloud.google.com/http/client_country` + LabelHTTPClientProtocol = `trace.cloud.google.com/http/client_protocol` + LabelHTTPClientRegion = `trace.cloud.google.com/http/client_region` + LabelHTTPHost = `trace.cloud.google.com/http/host` + LabelHTTPMethod = `trace.cloud.google.com/http/method` + LabelHTTPRedirectedURL = `trace.cloud.google.com/http/redirected_url` + LabelHTTPRequestSize = `trace.cloud.google.com/http/request/size` + LabelHTTPResponseSize = `trace.cloud.google.com/http/response/size` + LabelHTTPStatusCode = `trace.cloud.google.com/http/status_code` + LabelHTTPURL = `trace.cloud.google.com/http/url` + LabelHTTPUserAgent = `trace.cloud.google.com/http/user_agent` + LabelPID = `trace.cloud.google.com/pid` + LabelSamplingPolicy = `trace.cloud.google.com/sampling_policy` + LabelSamplingWeight = `trace.cloud.google.com/sampling_weight` + LabelStackTrace = `trace.cloud.google.com/stacktrace` + LabelTID = `trace.cloud.google.com/tid` +) + +const ( + // ScopeTraceAppend grants permissions to write trace data for a project. + ScopeTraceAppend = "https://www.googleapis.com/auth/trace.append" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +type contextKey struct{} + +type stackLabelValue struct { + Frames []stackFrame `json:"stack_frame"` +} + +type stackFrame struct { + Class string `json:"class_name,omitempty"` + Method string `json:"method_name"` + Filename string `json:"file_name"` + Line int64 `json:"line_number"` +} + +var ( + spanIDCounter uint64 + spanIDIncrement uint64 +) + +func init() { + // Set spanIDCounter and spanIDIncrement to random values. nextSpanID will + // return an arithmetic progression using these values, skipping zero. We set + // the LSB of spanIDIncrement to 1, so that the cycle length is 2^64. + binary.Read(rand.Reader, binary.LittleEndian, &spanIDCounter) + binary.Read(rand.Reader, binary.LittleEndian, &spanIDIncrement) + spanIDIncrement |= 1 + // Attach hook for autogenerated Google API calls. This will automatically + // create trace spans for API calls if there is a trace in the context. + gensupport.RegisterHook(requestHook) +} + +func requestHook(ctx context.Context, req *http.Request) func(resp *http.Response) { + span := FromContext(ctx) + if span == nil || req == nil { + return nil + } + span = span.NewRemoteChild(req) + return func(resp *http.Response) { + if resp != nil { + span.Finish(WithResponse(resp)) + } else { + span.Finish() + } + } +} + +// nextSpanID returns a new span ID. It will never return zero. +func nextSpanID() uint64 { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&spanIDCounter, spanIDIncrement) + } + return id +} + +// nextTraceID returns a new trace ID. +func nextTraceID() string { + id1 := nextSpanID() + id2 := nextSpanID() + return fmt.Sprintf("%016x%016x", id1, id2) +} + +// Client is a client for uploading traces to the Google Stackdriver Trace service. +// A nil Client will no-op for all of its methods. +type Client struct { + service *api.Service + projectID string + policy SamplingPolicy + bundler *bundler.Bundler +} + +// NewClient creates a new Google Stackdriver Trace client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(cloudPlatformScope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + hc, basePath, err := htransport.NewClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("creating HTTP client for Google Stackdriver Trace API: %v", err) + } + apiService, err := api.New(hc) + if err != nil { + return nil, fmt.Errorf("creating Google Stackdriver Trace API client: %v", err) + } + if basePath != "" { + // An option set a basepath, so override api.New's default. + apiService.BasePath = basePath + } + c := &Client{ + service: apiService, + projectID: projectID, + } + bundler := bundler.NewBundler((*api.Trace)(nil), func(bundle interface{}) { + traces := bundle.([]*api.Trace) + err := c.upload(traces) + if err != nil { + log.Printf("failed to upload %d traces to the Cloud Trace server: %v", len(traces), err) + } + }) + bundler.DelayThreshold = 2 * time.Second + bundler.BundleCountThreshold = 100 + // We're not measuring bytes here, we're counting traces and spans as one "byte" each. + bundler.BundleByteThreshold = 1000 + bundler.BundleByteLimit = 1000 + bundler.BufferedByteLimit = 10000 + c.bundler = bundler + return c, nil +} + +// SetSamplingPolicy sets the SamplingPolicy that determines how often traces +// are initiated by this client. +func (c *Client) SetSamplingPolicy(p SamplingPolicy) { + if c != nil { + c.policy = p + } +} + +// SpanFromHeader returns a new trace span based on a provided request header +// value or nil iff the client is nil. +// +// The trace information and identifiers will be read from the header value. +// Otherwise, a new trace ID is made and the parent span ID is zero. +// For the exact format of the header value, see +// https://cloud.google.com/trace/docs/support#how_do_i_force_a_request_to_be_traced +// +// The name of the new span is provided as an argument. +// +// If a non-nil sampling policy has been set in the client, it can override +// the options set in the header and choose whether to trace the request. +// +// If the header doesn't have existing tracing information, then a *Span is +// returned anyway, but it will not be uploaded to the server, just as when +// calling SpanFromRequest on an untraced request. +// +// Most users using HTTP should use SpanFromRequest, rather than +// SpanFromHeader, since it provides additional functionality for HTTP +// requests. In particular, it will set various pieces of request information +// as labels on the *Span, which is not available from the header alone. +func (c *Client) SpanFromHeader(name string, header string) *Span { + if c == nil { + return nil + } + traceID, parentSpanID, options, _, ok := traceInfoFromHeader(header) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: c, + globalOptions: options, + localOptions: options, + } + span := startNewChild(name, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, ok) + return span +} + +// SpanFromRequest returns a new trace span for an HTTP request or nil +// iff the client is nil. +// +// If the incoming HTTP request contains a trace context header, the trace ID, +// parent span ID, and tracing options will be read from that header. +// Otherwise, a new trace ID is made and the parent span ID is zero. +// +// If a non-nil sampling policy has been set in the client, it can override the +// options set in the header and choose whether to trace the request. +// +// If the request is not being traced, then a *Span is returned anyway, but it +// will not be uploaded to the server -- it is only useful for propagating +// trace context to child requests and for getting the TraceID. All its +// methods can still be called -- the Finish, FinishWait, and SetLabel methods +// do nothing. NewChild does nothing, and returns the same *Span. TraceID +// works as usual. +func (c *Client) SpanFromRequest(r *http.Request) *Span { + if c == nil { + return nil + } + traceID, parentSpanID, options, _, ok := traceInfoFromHeader(r.Header.Get(httpHeader)) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: c, + globalOptions: options, + localOptions: options, + } + span := startNewChildWithRequest(r, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, ok) + return span +} + +// NewSpan returns a new trace span with the given name or nil iff the +// client is nil. +// +// A new trace and span ID is generated to trace the span. +// Returned span need to be finished by calling Finish or FinishWait. +func (c *Client) NewSpan(name string) *Span { + if c == nil { + return nil + } + t := &trace{ + traceID: nextTraceID(), + client: c, + localOptions: optionTrace, + globalOptions: optionTrace, + } + span := startNewChild(name, t, 0) + span.span.Kind = spanKindUnspecified + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, false) + return span +} + +func configureSpanFromPolicy(s *Span, p SamplingPolicy, ok bool) { + if p == nil { + return + } + d := p.Sample(Parameters{HasTraceHeader: ok}) + if d.Trace { + // Turn on tracing locally, and in child requests. + s.trace.localOptions |= optionTrace + s.trace.globalOptions |= optionTrace + } else { + // Turn off tracing locally. + s.trace.localOptions = 0 + return + } + if d.Sample { + // This trace is in the random sample, so set the labels. + s.SetLabel(LabelSamplingPolicy, d.Policy) + s.SetLabel(LabelSamplingWeight, fmt.Sprint(d.Weight)) + } +} + +// NewContext returns a derived context containing the span. +func NewContext(ctx context.Context, s *Span) context.Context { + if s == nil { + return ctx + } + return context.WithValue(ctx, contextKey{}, s) +} + +// FromContext returns the span contained in the context, or nil. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +func traceInfoFromHeader(h string) (traceID string, spanID uint64, options optionFlags, optionsOk bool, ok bool) { + // See https://cloud.google.com/trace/docs/faq for the header format. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > 200 { + return "", 0, 0, false, false + + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return "", 0, 0, false, false + + } + traceID, h = h[:slash], h[slash+1:] + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + spanID, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return "", 0, 0, false, false + + } + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return traceID, spanID, 0, false, true + + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return "", 0, 0, false, false + + } + options = optionFlags(o) + return traceID, spanID, options, true, true +} + +type optionFlags uint32 + +const ( + optionTrace optionFlags = 1 << iota + optionStack +) + +type trace struct { + mu sync.Mutex + client *Client + traceID string + globalOptions optionFlags // options that will be passed to any child requests + localOptions optionFlags // options applied in this server + spans []*Span // finished spans for this trace. +} + +// finish appends s to t.spans. If s is the root span, uploads the trace to the +// server. +func (t *trace) finish(s *Span, wait bool, opts ...FinishOption) error { + for _, o := range opts { + o.modifySpan(s) + } + s.end = time.Now() + t.mu.Lock() + t.spans = append(t.spans, s) + spans := t.spans + t.mu.Unlock() + if s.rootSpan { + if wait { + return t.client.upload([]*api.Trace{t.constructTrace(spans)}) + } + go func() { + tr := t.constructTrace(spans) + err := t.client.bundler.Add(tr, 1+len(spans)) + if err == bundler.ErrOversizedItem { + err = t.client.upload([]*api.Trace{tr}) + } + if err != nil { + log.Println("error uploading trace:", err) + } + }() + } + return nil +} + +func (t *trace) constructTrace(spans []*Span) *api.Trace { + apiSpans := make([]*api.TraceSpan, len(spans)) + for i, sp := range spans { + sp.span.StartTime = sp.start.In(time.UTC).Format(time.RFC3339Nano) + sp.span.EndTime = sp.end.In(time.UTC).Format(time.RFC3339Nano) + if t.localOptions&optionStack != 0 { + sp.setStackLabel() + } + if sp.host != "" { + sp.SetLabel(LabelHTTPHost, sp.host) + } + if sp.url != "" { + sp.SetLabel(LabelHTTPURL, sp.url) + } + if sp.method != "" { + sp.SetLabel(LabelHTTPMethod, sp.method) + } + if sp.statusCode != 0 { + sp.SetLabel(LabelHTTPStatusCode, strconv.Itoa(sp.statusCode)) + } + sp.SetLabel(labelAgent, userAgent) + apiSpans[i] = &sp.span + } + + return &api.Trace{ + ProjectId: t.client.projectID, + TraceId: t.traceID, + Spans: apiSpans, + } +} + +func (c *Client) upload(traces []*api.Trace) error { + _, err := c.service.Projects.PatchTraces(c.projectID, &api.Traces{Traces: traces}).Do() + return err +} + +// Span contains information about one span of a trace. +type Span struct { + trace *trace + + spanMu sync.Mutex // guards span.Labels + span api.TraceSpan + + start time.Time + end time.Time + rootSpan bool + stack [maxStackFrames]uintptr + host string + method string + url string + statusCode int +} + +// Traced reports whether the current span is sampled to be traced. +func (s *Span) Traced() bool { + if s == nil { + return false + } + return s.trace.localOptions&optionTrace != 0 +} + +// NewChild creates a new span with the given name as a child of s. +// If s is nil, does nothing and returns nil. +func (s *Span) NewChild(name string) *Span { + if s == nil { + return nil + } + if !s.Traced() { + // TODO(jbd): Document this behavior in godoc here and elsewhere. + return s + } + return startNewChild(name, s.trace, s.span.SpanId) +} + +// NewRemoteChild creates a new span as a child of s. +// +// Some labels in the span are set from the outgoing *http.Request r. +// +// A header is set in r so that the trace context is propagated to the +// destination. The parent span ID in that header is set as follows: +// - If the request is being traced, then the ID of s is used. +// - If the request is not being traced, but there was a trace context header +// in the incoming request for this trace (the request passed to +// SpanFromRequest), the parent span ID in that header is used. +// - Otherwise, the parent span ID is zero. +// The tracing bit in the options is set if tracing is enabled, or if it was +// set in the incoming request. +// +// If s is nil, does nothing and returns nil. +func (s *Span) NewRemoteChild(r *http.Request) *Span { + if s == nil { + return nil + } + if !s.Traced() { + r.Header[httpHeader] = []string{spanHeader(s.trace.traceID, s.span.ParentSpanId, s.trace.globalOptions)} + return s + } + newSpan := startNewChildWithRequest(r, s.trace, s.span.SpanId) + r.Header[httpHeader] = []string{spanHeader(s.trace.traceID, newSpan.span.SpanId, s.trace.globalOptions)} + return newSpan +} + +// Header returns the value of the X-Cloud-Trace-Context header that +// should be used to propagate the span. This is the inverse of +// SpanFromHeader. +// +// Most users should use NewRemoteChild unless they have specific +// propagation needs or want to control the naming of their span. +// Header() does not create a new span. +func (s *Span) Header() string { + if s == nil { + return "" + } + return spanHeader(s.trace.traceID, s.span.SpanId, s.trace.globalOptions) +} + +func startNewChildWithRequest(r *http.Request, trace *trace, parentSpanID uint64) *Span { + name := r.URL.Host + r.URL.Path // drop scheme and query params + newSpan := startNewChild(name, trace, parentSpanID) + if r.Host == "" { + newSpan.host = r.URL.Host + } else { + newSpan.host = r.Host + } + newSpan.method = r.Method + newSpan.url = r.URL.String() + return newSpan +} + +func startNewChild(name string, trace *trace, parentSpanID uint64) *Span { + spanID := nextSpanID() + for spanID == parentSpanID { + spanID = nextSpanID() + } + newSpan := &Span{ + trace: trace, + span: api.TraceSpan{ + Kind: spanKindClient, + Name: name, + ParentSpanId: parentSpanID, + SpanId: spanID, + }, + start: time.Now(), + } + if trace.localOptions&optionStack != 0 { + _ = runtime.Callers(1, newSpan.stack[:]) + } + return newSpan +} + +// TraceID returns the ID of the trace to which s belongs. +func (s *Span) TraceID() string { + if s == nil { + return "" + } + return s.trace.traceID +} + +// SetLabel sets the label for the given key to the given value. +// If the value is empty, the label for that key is deleted. +// If a label is given a value automatically and by SetLabel, the +// automatically-set value is used. +// If s is nil, does nothing. +// +// SetLabel shouldn't be called after Finish or FinishWait. +func (s *Span) SetLabel(key, value string) { + if s == nil { + return + } + if !s.Traced() { + return + } + s.spanMu.Lock() + defer s.spanMu.Unlock() + + if value == "" { + if s.span.Labels != nil { + delete(s.span.Labels, key) + } + return + } + if s.span.Labels == nil { + s.span.Labels = make(map[string]string) + } + s.span.Labels[key] = value +} + +type FinishOption interface { + modifySpan(s *Span) +} + +type withResponse struct { + *http.Response +} + +// WithResponse returns an option that can be passed to Finish that indicates +// that some labels for the span should be set using the given *http.Response. +func WithResponse(resp *http.Response) FinishOption { + return withResponse{resp} +} +func (u withResponse) modifySpan(s *Span) { + if u.Response != nil { + s.statusCode = u.StatusCode + } +} + +// Finish declares that the span has finished. +// +// If s is nil, Finish does nothing and returns nil. +// +// If the option trace.WithResponse(resp) is passed, then some labels are set +// for s using information in the given *http.Response. This is useful when the +// span is for an outgoing http request; s will typically have been created by +// NewRemoteChild in this case. +// +// If s is a root span (one created by SpanFromRequest) then s, and all its +// descendant spans that have finished, are uploaded to the Google Stackdriver +// Trace server asynchronously. +func (s *Span) Finish(opts ...FinishOption) { + if s == nil { + return + } + if !s.Traced() { + return + } + s.trace.finish(s, false, opts...) +} + +// FinishWait is like Finish, but if s is a root span, it waits until uploading +// is finished, then returns an error if one occurred. +func (s *Span) FinishWait(opts ...FinishOption) error { + if s == nil { + return nil + } + if !s.Traced() { + return nil + } + return s.trace.finish(s, true, opts...) +} + +func spanHeader(traceID string, spanID uint64, options optionFlags) string { + // See https://cloud.google.com/trace/docs/faq for the header format. + return fmt.Sprintf("%s/%d;o=%d", traceID, spanID, options) +} + +func (s *Span) setStackLabel() { + var stack stackLabelValue + lastSigPanic, inTraceLibrary := false, true + for _, pc := range s.stack { + if pc == 0 { + break + } + if !lastSigPanic { + pc-- + } + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + // Name has one of the following forms: + // path/to/package.Foo + // path/to/package.(Type).Foo + // For the first form, we store the whole name in the Method field of the + // stack frame. For the second form, we set the Method field to "Foo" and + // the Class field to "path/to/package.(Type)". + name := fn.Name() + if inTraceLibrary && !strings.HasPrefix(name, "cloud.google.com/go/trace.") { + inTraceLibrary = false + } + var class string + if i := strings.Index(name, ")."); i != -1 { + class, name = name[:i+1], name[i+2:] + } + frame := stackFrame{ + Class: class, + Method: name, + Filename: file, + Line: int64(line), + } + if inTraceLibrary && len(stack.Frames) == 1 { + stack.Frames[0] = frame + } else { + stack.Frames = append(stack.Frames, frame) + } + lastSigPanic = fn.Name() == "runtime.sigpanic" + } + if label, err := json.Marshal(stack); err == nil { + s.SetLabel(LabelStackTrace, string(label)) + } +} diff --git a/vendor/cloud.google.com/go/trace/trace_test.go b/vendor/cloud.google.com/go/trace/trace_test.go new file mode 100644 index 0000000..805184c --- /dev/null +++ b/vendor/cloud.google.com/go/trace/trace_test.go @@ -0,0 +1,969 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "regexp" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/datastore" + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + api "google.golang.org/api/cloudtrace/v1" + compute "google.golang.org/api/compute/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + dspb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +const testProjectID = "testproject" + +type fakeRoundTripper struct { + reqc chan *http.Request +} + +func newFakeRoundTripper() *fakeRoundTripper { + return &fakeRoundTripper{reqc: make(chan *http.Request)} +} + +func (rt *fakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + rt.reqc <- r + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + } + return resp, nil +} + +func newTestClient(rt http.RoundTripper) *Client { + t, err := NewClient(context.Background(), testProjectID, option.WithHTTPClient(&http.Client{Transport: rt})) + if err != nil { + panic(err) + } + return t +} + +type fakeDatastoreServer struct { + dspb.DatastoreServer + fail bool +} + +func (f *fakeDatastoreServer) Lookup(ctx context.Context, req *dspb.LookupRequest) (*dspb.LookupResponse, error) { + if f.fail { + return nil, errors.New("lookup failed") + } + return &dspb.LookupResponse{}, nil +} + +// makeRequests makes some requests. +// span is the root span. rt is the trace client's http client's transport. +// This is used to retrieve the trace uploaded by the client, if any. If +// expectTrace is true, we expect a trace will be uploaded. If synchronous is +// true, the call to Finish is expected not to return before the client has +// uploaded any traces. +func makeRequests(t *testing.T, span *Span, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { + ctx := NewContext(context.Background(), span) + tc := newTestClient(&noopTransport{}) + + // An HTTP request. + { + req2, err := http.NewRequest("GET", "http://example.com/bar", nil) + if err != nil { + t.Fatal(err) + } + resp := &http.Response{StatusCode: 200} + s := span.NewRemoteChild(req2) + s.Finish(WithResponse(resp)) + } + + // An autogenerated API call. + { + rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} + hc := &http.Client{Transport: rt} + computeClient, err := compute.New(hc) + if err != nil { + t.Fatal(err) + } + _, err = computeClient.Zones.List(testProjectID).Context(ctx).Do() + if err != nil { + t.Fatal(err) + } + } + + // A cloud library call that uses the autogenerated API. + { + rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} + hc := &http.Client{Transport: rt} + storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + var objAttrsList []*storage.ObjectAttrs + it := storageClient.Bucket("testbucket").Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != iterator.Done { + t.Fatal(err) + } + if err == iterator.Done { + break + } + objAttrsList = append(objAttrsList, objAttrs) + } + } + + // A cloud library call that uses grpc internally. + for _, fail := range []bool{false, true} { + srv, err := testutil.NewServer() + if err != nil { + t.Fatalf("creating test datastore server: %v", err) + } + dspb.RegisterDatastoreServer(srv.Gsrv, &fakeDatastoreServer{fail: fail}) + srv.Start() + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + if err != nil { + t.Fatalf("connecting to test datastore server: %v", err) + } + datastoreClient, err := datastore.NewClient(ctx, testProjectID, option.WithGRPCConn(conn)) + if err != nil { + t.Fatalf("creating datastore client: %v", err) + } + k := datastore.NameKey("Entity", "stringID", nil) + e := new(datastore.Entity) + datastoreClient.Get(ctx, k, e) + } + + done := make(chan struct{}) + go func() { + if synchronous { + err := span.FinishWait() + if err != nil { + t.Errorf("Unexpected error from span.FinishWait: %v", err) + } + } else { + span.Finish() + } + done <- struct{}{} + }() + if !expectTrace { + <-done + select { + case <-rt.reqc: + t.Errorf("Got a trace, expected none.") + case <-time.After(5 * time.Millisecond): + } + return nil + } else if !synchronous { + <-done + return <-rt.reqc + } else { + select { + case <-done: + t.Errorf("Synchronous Finish didn't wait for trace upload.") + return <-rt.reqc + case <-time.After(5 * time.Millisecond): + r := <-rt.reqc + <-done + return r + } + } +} + +func TestHeader(t *testing.T) { + tests := []struct { + header string + wantTraceID string + wantSpanID uint64 + wantOpts optionFlags + wantOK bool + }{ + { + header: "0123456789ABCDEF0123456789ABCDEF/1;o=1", + wantTraceID: "0123456789ABCDEF0123456789ABCDEF", + wantSpanID: 1, + wantOpts: 1, + wantOK: true, + }, + { + header: "0123456789ABCDEF0123456789ABCDEF/1;o=0", + wantTraceID: "0123456789ABCDEF0123456789ABCDEF", + wantSpanID: 1, + wantOpts: 0, + wantOK: true, + }, + { + header: "0123456789ABCDEF0123456789ABCDEF/1", + wantTraceID: "0123456789ABCDEF0123456789ABCDEF", + wantSpanID: 1, + wantOpts: 0, + wantOK: true, + }, + { + header: "", + wantTraceID: "", + wantSpanID: 0, + wantOpts: 0, + wantOK: false, + }, + } + for _, tt := range tests { + traceID, parentSpanID, opts, _, ok := traceInfoFromHeader(tt.header) + if got, want := traceID, tt.wantTraceID; got != want { + t.Errorf("TraceID(%v) = %q; want %q", tt.header, got, want) + } + if got, want := parentSpanID, tt.wantSpanID; got != want { + t.Errorf("SpanID(%v) = %v; want %v", tt.header, got, want) + } + if got, want := opts, tt.wantOpts; got != want { + t.Errorf("Options(%v) = %v; want %v", tt.header, got, want) + } + if got, want := ok, tt.wantOK; got != want { + t.Errorf("Header exists (%v) = %v; want %v", tt.header, got, want) + } + } +} + +func TestOutgoingReqHeader(t *testing.T) { + all, _ := NewLimitedSampler(1, 1<<16) // trace every request + + tests := []struct { + desc string + traceHeader string + samplingPolicy SamplingPolicy + + wantHeaderRe *regexp.Regexp + }{ + { + desc: "Parent span without sampling options, client samples all", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/1", + samplingPolicy: all, + wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=1"), + }, + { + desc: "Parent span without sampling options, without client sampling", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/1", + samplingPolicy: nil, + wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=0"), + }, + { + desc: "Parent span with o=1, client samples none", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/1;o=1", + samplingPolicy: nil, + wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=1"), + }, + { + desc: "Parent span with o=0, without client sampling", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/1;o=0", + samplingPolicy: nil, + wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=0"), + }, + } + + tc := newTestClient(nil) + for _, tt := range tests { + tc.SetSamplingPolicy(tt.samplingPolicy) + span := tc.SpanFromHeader("/foo", tt.traceHeader) + + req, _ := http.NewRequest("GET", "http://localhost", nil) + span.NewRemoteChild(req) + + if got, re := req.Header.Get(httpHeader), tt.wantHeaderRe; !re.MatchString(got) { + t.Errorf("%v (parent=%q): got header %q; want in format %q", tt.desc, tt.traceHeader, got, re) + } + } +} + +func TestTrace(t *testing.T) { + t.Parallel() + testTrace(t, false, true) +} + +func TestTraceWithWait(t *testing.T) { + testTrace(t, true, true) +} + +func TestTraceFromHeader(t *testing.T) { + t.Parallel() + testTrace(t, false, false) +} + +func TestTraceFromHeaderWithWait(t *testing.T) { + testTrace(t, false, true) +} + +func TestNewSpan(t *testing.T) { + t.Skip("flaky") + const traceID = "0123456789ABCDEF0123456789ABCDEF" + + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + span := traceClient.NewSpan("/foo") + span.trace.traceID = traceID + + uploaded := makeRequests(t, span, rt, true, true) + + if uploaded == nil { + t.Fatalf("No trace uploaded, expected one.") + } + + expected := api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: testProjectID, + Spans: []*api.TraceSpan{ + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "http://example.com/bar", + }, + Name: "example.com/bar", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", + }, + Name: "www.googleapis.com/compute/v1/projects/testproject/zones", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", + }, + Name: "www.googleapis.com/storage/v1/b/testbucket/o", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: nil, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + { + Kind: "SPAN_KIND_UNSPECIFIED", + Labels: map[string]string{}, + Name: "/foo", + }, + }, + TraceId: traceID, + }, + }, + } + + body, err := ioutil.ReadAll(uploaded.Body) + if err != nil { + t.Fatal(err) + } + var patch api.Traces + err = json.Unmarshal(body, &patch) + if err != nil { + t.Fatal(err) + } + + checkTraces(t, patch, expected) + + n := len(patch.Traces[0].Spans) + rootSpan := patch.Traces[0].Spans[n-1] + for i, s := range patch.Traces[0].Spans { + if a, b := s.StartTime, s.EndTime; a > b { + t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) + } + if a, b := rootSpan.StartTime, s.StartTime; a > b { + t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) + } + if a, b := s.EndTime, rootSpan.EndTime; a > b { + t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) + } + if i > 1 && i < n-1 { + if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { + t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) + } + } + } + + if x := rootSpan.ParentSpanId; x != 0 { + t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 0) + } + for i, s := range patch.Traces[0].Spans { + if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { + t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) + } + } + for i, s := range patch.Traces[0].Spans { + s.EndTime = "" + labels := &expected.Traces[0].Spans[i].Labels + for key, value := range *labels { + if v, ok := s.Labels[key]; !ok { + t.Errorf("Span %d is missing Label %q:%q", i, key, value) + } else if key == "trace.cloud.google.com/http/url" { + if !strings.HasPrefix(v, value) { + t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) + } + } else if v != value { + t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) + } + } + for key := range s.Labels { + if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { + t.Errorf("Span %d: unexpected label %q", i, key) + } + } + *labels = nil + s.Labels = nil + s.ParentSpanId = 0 + if s.SpanId == 0 { + t.Errorf("Incorrect SpanId: got 0 want nonzero") + } + s.SpanId = 0 + s.StartTime = "" + } + if !testutil.Equal(patch, expected) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Errorf("PatchTraces request: got %s want %s", got, want) + } +} + +func testTrace(t *testing.T, synchronous bool, fromRequest bool) { + t.Skip("flaky") + const header = `0123456789ABCDEF0123456789ABCDEF/42;o=3` + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + + span := traceClient.SpanFromHeader("/foo", header) + headerOrReqLabels := map[string]string{} + headerOrReqName := "/foo" + + if fromRequest { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("X-Cloud-Trace-Context", header) + span = traceClient.SpanFromRequest(req) + headerOrReqLabels = map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/url": "http://example.com/foo", + } + headerOrReqName = "example.com/foo" + } + + uploaded := makeRequests(t, span, rt, synchronous, true) + if uploaded == nil { + t.Fatalf("No trace uploaded, expected one.") + } + + expected := api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: testProjectID, + Spans: []*api.TraceSpan{ + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "http://example.com/bar", + }, + Name: "example.com/bar", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", + }, + Name: "www.googleapis.com/compute/v1/projects/testproject/zones", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", + }, + Name: "www.googleapis.com/storage/v1/b/testbucket/o", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: nil, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + { + Kind: "RPC_SERVER", + Labels: headerOrReqLabels, + Name: headerOrReqName, + }, + }, + TraceId: "0123456789ABCDEF0123456789ABCDEF", + }, + }, + } + + body, err := ioutil.ReadAll(uploaded.Body) + if err != nil { + t.Fatal(err) + } + var patch api.Traces + err = json.Unmarshal(body, &patch) + if err != nil { + t.Fatal(err) + } + + checkTraces(t, patch, expected) + + n := len(patch.Traces[0].Spans) + rootSpan := patch.Traces[0].Spans[n-1] + for i, s := range patch.Traces[0].Spans { + if a, b := s.StartTime, s.EndTime; a > b { + t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) + } + if a, b := rootSpan.StartTime, s.StartTime; a > b { + t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) + } + if a, b := s.EndTime, rootSpan.EndTime; a > b { + t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) + } + if i > 1 && i < n-1 { + if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { + t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) + } + } + } + + if x := rootSpan.ParentSpanId; x != 42 { + t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 42) + } + for i, s := range patch.Traces[0].Spans { + if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { + t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) + } + } + for i, s := range patch.Traces[0].Spans { + s.EndTime = "" + labels := &expected.Traces[0].Spans[i].Labels + for key, value := range *labels { + if v, ok := s.Labels[key]; !ok { + t.Errorf("Span %d is missing Label %q:%q", i, key, value) + } else if key == "trace.cloud.google.com/http/url" { + if !strings.HasPrefix(v, value) { + t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) + } + } else if v != value { + t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) + } + } + for key := range s.Labels { + if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { + t.Errorf("Span %d: unexpected label %q", i, key) + } + } + *labels = nil + s.Labels = nil + s.ParentSpanId = 0 + if s.SpanId == 0 { + t.Errorf("Incorrect SpanId: got 0 want nonzero") + } + s.SpanId = 0 + s.StartTime = "" + } + if !testutil.Equal(patch, expected) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Errorf("PatchTraces request: got %s \n\n want %s", got, want) + } +} + +func TestNoTrace(t *testing.T) { + testNoTrace(t, false, true) +} + +func TestNoTraceWithWait(t *testing.T) { + testNoTrace(t, true, true) +} + +func TestNoTraceFromHeader(t *testing.T) { + testNoTrace(t, false, false) +} + +func TestNoTraceFromHeaderWithWait(t *testing.T) { + testNoTrace(t, true, false) +} + +func testNoTrace(t *testing.T, synchronous bool, fromRequest bool) { + for _, header := range []string{ + `0123456789ABCDEF0123456789ABCDEF/42;o=2`, + `0123456789ABCDEF0123456789ABCDEF/42;o=0`, + `0123456789ABCDEF0123456789ABCDEF/42`, + `0123456789ABCDEF0123456789ABCDEF`, + ``, + } { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + var span *Span + if fromRequest { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if header != "" { + req.Header.Set("X-Cloud-Trace-Context", header) + } + if err != nil { + t.Fatal(err) + } + span = traceClient.SpanFromRequest(req) + } else { + span = traceClient.SpanFromHeader("/foo", header) + } + uploaded := makeRequests(t, span, rt, synchronous, false) + if uploaded != nil { + t.Errorf("Got a trace, expected none.") + } + } +} + +func TestSample(t *testing.T) { + // A deterministic test of the sampler logic. + type testCase struct { + rate float64 + maxqps float64 + want int + } + const delta = 25 * time.Millisecond + for _, test := range []testCase{ + // qps won't matter, so we will sample half of the 79 calls + {0.50, 100, 40}, + // with 1 qps and a burst of 2, we will sample twice in second #1, once in the partial second #2 + {0.50, 1, 3}, + } { + sp, err := NewLimitedSampler(test.rate, test.maxqps) + if err != nil { + t.Fatal(err) + } + s := sp.(*sampler) + sampled := 0 + tm := time.Now() + for i := 0; i < 80; i++ { + if s.sample(Parameters{}, tm, float64(i%2)).Sample { + sampled++ + } + tm = tm.Add(delta) + } + if sampled != test.want { + t.Errorf("rate=%f, maxqps=%f: got %d samples, want %d", test.rate, test.maxqps, sampled, test.want) + } + } +} + +func TestSampling(t *testing.T) { + t.Parallel() + // This scope tests sampling in a larger context, with real time and randomness. + wg := sync.WaitGroup{} + type testCase struct { + rate float64 + maxqps float64 + expectedRange [2]int + } + for _, test := range []testCase{ + {0, 5, [2]int{0, 0}}, + {5, 0, [2]int{0, 0}}, + {0.50, 100, [2]int{20, 60}}, + {0.50, 1, [2]int{3, 4}}, // Windows, with its less precise clock, sometimes gives 4. + } { + wg.Add(1) + go func(test testCase) { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + traceClient.bundler.BundleByteLimit = 1 + p, err := NewLimitedSampler(test.rate, test.maxqps) + if err != nil { + t.Fatalf("NewLimitedSampler: %v", err) + } + traceClient.SetSamplingPolicy(p) + ticker := time.NewTicker(25 * time.Millisecond) + sampled := 0 + for i := 0; i < 79; i++ { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + span := traceClient.SpanFromRequest(req) + span.Finish() + select { + case <-rt.reqc: + <-ticker.C + sampled++ + case <-ticker.C: + } + } + ticker.Stop() + if test.expectedRange[0] > sampled || sampled > test.expectedRange[1] { + t.Errorf("rate=%f, maxqps=%f: got %d samples want ∈ %v", test.rate, test.maxqps, sampled, test.expectedRange) + } + wg.Done() + }(test) + } + wg.Wait() +} + +func TestBundling(t *testing.T) { + t.Parallel() + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + traceClient.bundler.DelayThreshold = time.Second / 2 + traceClient.bundler.BundleCountThreshold = 10 + p, err := NewLimitedSampler(1, 99) // sample every request. + if err != nil { + t.Fatalf("NewLimitedSampler: %v", err) + } + traceClient.SetSamplingPolicy(p) + + for i := 0; i < 35; i++ { + go func() { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + span := traceClient.SpanFromRequest(req) + span.Finish() + }() + } + + // Read the first three bundles. + <-rt.reqc + <-rt.reqc + <-rt.reqc + + // Test that the fourth bundle isn't sent early. + select { + case <-rt.reqc: + t.Errorf("bundle sent too early") + case <-time.After(time.Second / 4): + <-rt.reqc + } + + // Test that there aren't extra bundles. + select { + case <-rt.reqc: + t.Errorf("too many bundles sent") + case <-time.After(time.Second): + } +} + +func TestWeights(t *testing.T) { + const ( + expectedNumTraced = 10100 + numTracedEpsilon = 100 + expectedTotalWeight = 50000 + totalWeightEpsilon = 5000 + ) + rng := rand.New(rand.NewSource(1)) + const delta = 2 * time.Millisecond + for _, headerRate := range []float64{0.0, 0.5, 1.0} { + // Simulate 10 seconds of requests arriving at 500qps. + // + // The sampling policy tries to sample 25% of them, but has a qps limit of + // 100, so it will not be able to. The returned weight should be higher + // for some sampled requests to compensate. + // + // headerRate is the fraction of incoming requests that have a trace header + // set. The qps limit should not be exceeded, even if headerRate is high. + sp, err := NewLimitedSampler(0.25, 100) + if err != nil { + t.Fatal(err) + } + s := sp.(*sampler) + tm := time.Now() + totalWeight := 0.0 + numTraced := 0 + seenLargeWeight := false + for i := 0; i < 50000; i++ { + d := s.sample(Parameters{HasTraceHeader: rng.Float64() < headerRate}, tm, rng.Float64()) + if d.Trace { + numTraced++ + } + if d.Sample { + totalWeight += d.Weight + if x := int(d.Weight) / 4; x <= 0 || x >= 100 || d.Weight != float64(x)*4.0 { + t.Errorf("weight: got %f, want a small positive multiple of 4", d.Weight) + } + if d.Weight > 4 { + seenLargeWeight = true + } + } + tm = tm.Add(delta) + } + if !seenLargeWeight { + t.Errorf("headerRate %f: never saw sample weight higher than 4.", headerRate) + } + if numTraced < expectedNumTraced-numTracedEpsilon || expectedNumTraced+numTracedEpsilon < numTraced { + t.Errorf("headerRate %f: got %d traced requests, want ∈ [%d, %d]", headerRate, numTraced, expectedNumTraced-numTracedEpsilon, expectedNumTraced+numTracedEpsilon) + } + if totalWeight < expectedTotalWeight-totalWeightEpsilon || expectedTotalWeight+totalWeightEpsilon < totalWeight { + t.Errorf("headerRate %f: got total weight %f want ∈ [%d, %d]", headerRate, totalWeight, expectedTotalWeight-totalWeightEpsilon, expectedTotalWeight+totalWeightEpsilon) + } + } +} + +type alwaysTrace struct{} + +func (a alwaysTrace) Sample(p Parameters) Decision { + return Decision{Trace: true} +} + +type neverTrace struct{} + +func (a neverTrace) Sample(p Parameters) Decision { + return Decision{Trace: false} +} + +func TestPropagation(t *testing.T) { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + for _, header := range []string{ + `0123456789ABCDEF0123456789ABCDEF/42;o=0`, + `0123456789ABCDEF0123456789ABCDEF/42;o=1`, + `0123456789ABCDEF0123456789ABCDEF/42;o=2`, + `0123456789ABCDEF0123456789ABCDEF/42;o=3`, + `0123456789ABCDEF0123456789ABCDEF/0;o=0`, + `0123456789ABCDEF0123456789ABCDEF/0;o=1`, + `0123456789ABCDEF0123456789ABCDEF/0;o=2`, + `0123456789ABCDEF0123456789ABCDEF/0;o=3`, + ``, + } { + for _, policy := range []SamplingPolicy{ + nil, + alwaysTrace{}, + neverTrace{}, + } { + traceClient.SetSamplingPolicy(policy) + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + if header != "" { + req.Header.Set("X-Cloud-Trace-Context", header) + } + + span := traceClient.SpanFromRequest(req) + + req2, err := http.NewRequest("GET", "http://example.com/bar", nil) + if err != nil { + t.Fatal(err) + } + req3, err := http.NewRequest("GET", "http://example.com/baz", nil) + if err != nil { + t.Fatal(err) + } + span.NewRemoteChild(req2) + span.NewRemoteChild(req3) + + var ( + t1, t2, t3 string + s1, s2, s3 uint64 + o1, o2, o3 uint64 + ) + fmt.Sscanf(header, "%32s/%d;o=%d", &t1, &s1, &o1) + fmt.Sscanf(req2.Header.Get("X-Cloud-Trace-Context"), "%32s/%d;o=%d", &t2, &s2, &o2) + fmt.Sscanf(req3.Header.Get("X-Cloud-Trace-Context"), "%32s/%d;o=%d", &t3, &s3, &o3) + + if header == "" { + if t2 != t3 { + t.Errorf("expected the same trace ID in child requests, got %q %q", t2, t3) + } + } else { + if t2 != t1 || t3 != t1 { + t.Errorf("trace IDs should be passed to child requests") + } + } + trace := policy == alwaysTrace{} || policy == nil && (o1&1) != 0 + if header == "" { + if trace && (s2 == 0 || s3 == 0) { + t.Errorf("got span IDs %d %d in child requests, want nonzero", s2, s3) + } + if trace && s2 == s3 { + t.Errorf("got span IDs %d %d in child requests, should be different", s2, s3) + } + if !trace && (s2 != 0 || s3 != 0) { + t.Errorf("got span IDs %d %d in child requests, want zero", s2, s3) + } + } else { + if trace && (s2 == s1 || s3 == s1 || s2 == s3) { + t.Errorf("parent span IDs in input and outputs should be all different, got %d %d %d", s1, s2, s3) + } + if !trace && (s2 != s1 || s3 != s1) { + t.Errorf("parent span ID in input, %d, should have been equal to parent span IDs in output: %d %d", s1, s2, s3) + } + } + expectTraceOption := policy == alwaysTrace{} || (o1&1) != 0 + if expectTraceOption != ((o2&1) != 0) || expectTraceOption != ((o3&1) != 0) { + t.Errorf("tracing flag in child requests should be %t, got options %d %d", expectTraceOption, o2, o3) + } + } + } +} + +func BenchmarkSpanFromHeader(b *testing.B) { + const header = `0123456789ABCDEF0123456789ABCDEF/42;o=0` + const name = "/foo" + + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + for n := 0; n < b.N; n++ { + traceClient.SpanFromHeader(name, header) + } +} + +func checkTraces(t *testing.T, patch, expected api.Traces) { + if len(patch.Traces) != len(expected.Traces) || len(patch.Traces[0].Spans) != len(expected.Traces[0].Spans) { + diff := testutil.Diff(patch.Traces, expected.Traces) + t.Logf("diff:\n%s", diff) + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Fatalf("PatchTraces request: got %s want %s", got, want) + } +} diff --git a/vendor/cloud.google.com/go/translate/examples_test.go b/vendor/cloud.google.com/go/translate/examples_test.go new file mode 100644 index 0000000..8746bd0 --- /dev/null +++ b/vendor/cloud.google.com/go/translate/examples_test.go @@ -0,0 +1,81 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package translate_test + +import ( + "fmt" + + "cloud.google.com/go/translate" + "golang.org/x/net/context" + "golang.org/x/text/language" +) + +func Example_NewClient() { + ctx := context.Background() + client, err := translate.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func Example_Translate() { + ctx := context.Background() + client, err := translate.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + translations, err := client.Translate(ctx, + []string{"Le singe est sur la branche"}, language.English, + &translate.Options{ + Source: language.French, + Format: translate.Text, + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(translations[0].Text) +} + +func Example_DetectLanguage() { + ctx := context.Background() + client, err := translate.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + ds, err := client.DetectLanguage(ctx, []string{"Today is Monday"}) + if err != nil { + // TODO: handle error. + } + fmt.Println(ds) +} + +func Example_SupportedLanguages() { + ctx := context.Background() + client, err := translate.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + langs, err := client.SupportedLanguages(ctx, language.English) + if err != nil { + // TODO: handle error. + } + fmt.Println(langs) +} diff --git a/vendor/cloud.google.com/go/translate/internal/translate/v2/README b/vendor/cloud.google.com/go/translate/internal/translate/v2/README new file mode 100644 index 0000000..a4f22c6 --- /dev/null +++ b/vendor/cloud.google.com/go/translate/internal/translate/v2/README @@ -0,0 +1,12 @@ +translate-nov2016-api.json is a hand-modified version of translate-api.json. +It correctly reflects the API as of 2016-11-15. + +Differences: + +- Change to base URL +- Addition of OAuth scopes + +To generate: + + + diff --git a/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh b/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh new file mode 100755 index 0000000..3aec1d8 --- /dev/null +++ b/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh @@ -0,0 +1,29 @@ +#!/bin/bash -e + + +(cd $GOPATH/src/google.golang.org/api; make generator) + +$GOPATH/bin/google-api-go-generator \ + -api_json_file translate-nov2016-api.json \ + -api_pkg_base cloud.google.com/go/translate/internal \ + -output translate-nov2016-gen.nolicense + +cat - translate-nov2016-gen.nolicense > translate-nov2016-gen.go <" + s + "" + } + tr = translate(htmlify(test.input), test.target, nil) + if got, want := tr.Text, htmlify(test.output); got != want { + t.Errorf("html: got %q, want %q", got, want) + } + // Using the HTML format behaves the same. + tr = translate(htmlify(test.input), test.target, &Options{Format: HTML}) + if got, want := tr.Text, htmlify(test.output); got != want { + t.Errorf("html: got %q, want %q", got, want) + } + } +} + +// This tests the beta "nmt" model. +func TestTranslateModel(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + trs, err := c.Translate(ctx, []string{"Hello"}, language.French, &Options{Model: "nmt"}) + if err != nil { + t.Fatal(err) + } + if len(trs) != 1 { + t.Fatalf("wanted one Translation, got %d", len(trs)) + } + tr := trs[0] + if got, want := tr.Text, "Bonjour"; got != want { + t.Errorf("text: got %q, want %q", got, want) + } + if got, want := tr.Model, "nmt"; got != want { + t.Errorf("model: got %q, want %q", got, want) + } +} + +func TestTranslateMultipleInputs(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + inputs := []string{ + "When you're a Jet, you're a Jet all the way", + "From your first cigarette to your last dying day", + "When you're a Jet if the spit hits the fan", + "You got brothers around, you're a family man", + } + ts, err := c.Translate(ctx, inputs, language.French, nil) + if err != nil { + t.Fatal(err) + } + if got, want := len(ts), len(inputs); got != want { + t.Fatalf("got %d Translations, wanted %d", got, want) + } +} + +func TestTranslateErrors(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + for _, test := range []struct { + ctx context.Context + target language.Tag + inputs []string + opts *Options + }{ + {ctx, language.English, nil, nil}, + {ctx, language.Und, []string{"input"}, nil}, + {ctx, language.English, []string{}, nil}, + {ctx, language.English, []string{"input"}, &Options{Format: "random"}}, + } { + _, err := c.Translate(test.ctx, test.inputs, test.target, test.opts) + if err == nil { + t.Errorf("%+v: got nil, want error", test) + } + } +} + +func TestDetectLanguage(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + ds, err := c.DetectLanguage(ctx, []string{ + "Today is Monday", + "Aujourd'hui est lundi", + }) + if err != nil { + t.Fatal(err) + } + if len(ds) != 2 { + t.Fatalf("got %d detection lists, want 2", len(ds)) + } + checkDetections(t, ds[0], language.English) + checkDetections(t, ds[1], language.French) +} + +func checkDetections(t *testing.T, ds []Detection, want language.Tag) { + for _, d := range ds { + if d.Language == want { + return + } + } + t.Errorf("%v: missing %s", ds, want) +} + +// A small subset of the supported languages. +var supportedLangs = []Language{ + {Name: "Danish", Tag: language.Danish}, + {Name: "English", Tag: language.English}, + {Name: "French", Tag: language.French}, + {Name: "German", Tag: language.German}, + {Name: "Greek", Tag: language.Greek}, + {Name: "Hindi", Tag: language.Hindi}, + {Name: "Hungarian", Tag: language.Hungarian}, + {Name: "Italian", Tag: language.Italian}, + {Name: "Russian", Tag: language.Russian}, + {Name: "Turkish", Tag: language.Turkish}, +} + +func TestSupportedLanguages(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + got, err := c.SupportedLanguages(ctx, language.English) + if err != nil { + t.Fatal(err) + } + want := map[language.Tag]Language{} + for _, sl := range supportedLangs { + want[sl.Tag] = sl + } + for _, g := range got { + w, ok := want[g.Tag] + if !ok { + continue + } + if g != w { + t.Errorf("got %+v, want %+v", g, w) + } + delete(want, g.Tag) + } + if len(want) > 0 { + t.Errorf("missing: %+v", want) + } +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1/doc.go b/vendor/cloud.google.com/go/videointelligence/apiv1/doc.go new file mode 100644 index 0000000..15652bf --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package videointelligence is an auto-generated package for the +// Cloud Video Intelligence API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes. +// +// Cloud Video Intelligence API. +package videointelligence // import "cloud.google.com/go/videointelligence/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1/mock_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1/mock_test.go new file mode 100644 index 0000000..b18f31e --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1/mock_test.go @@ -0,0 +1,180 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockVideoIntelligenceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + videointelligencepb.VideoIntelligenceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockVideoIntelligenceServer) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockVideoIntelligence mockVideoIntelligenceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + videointelligencepb.RegisterVideoIntelligenceServiceServer(serv, &mockVideoIntelligence) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestVideoIntelligenceServiceAnnotateVideo(t *testing.T) { + var expectedResponse *videointelligencepb.AnnotateVideoResponse = &videointelligencepb.AnnotateVideoResponse{} + + mockVideoIntelligence.err = nil + mockVideoIntelligence.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var request *videointelligencepb.AnnotateVideoRequest = &videointelligencepb.AnnotateVideoRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockVideoIntelligence.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestVideoIntelligenceServiceAnnotateVideoError(t *testing.T) { + errCode := codes.PermissionDenied + mockVideoIntelligence.err = nil + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var request *videointelligencepb.AnnotateVideoRequest = &videointelligencepb.AnnotateVideoRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client.go b/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client.go new file mode 100644 index 0000000..5393001 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client.go @@ -0,0 +1,225 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnnotateVideo []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("videointelligence.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 120000 * time.Millisecond, + Multiplier: 2.5, + }) + }), + }, + } + return &CallOptions{ + AnnotateVideo: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Cloud Video Intelligence API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client videointelligencepb.VideoIntelligenceServiceClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + Metadata metadata.MD +} + +// NewClient creates a new video intelligence service client. +// +// Service that implements Google Cloud Video Intelligence API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: videointelligencepb.NewVideoIntelligenceServiceClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnnotateVideo performs asynchronous video annotation. Progress and results can be +// retrieved through the google.longrunning.Operations interface. +// Operation.metadata contains AnnotateVideoProgress (progress). +// Operation.response contains AnnotateVideoResponse (results). +func (c *Client) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest, opts ...gax.CallOption) (*AnnotateVideoOperation, error) { + ctx = insertMetadata(ctx, c.Metadata) + opts = append(c.CallOptions.AnnotateVideo[0:len(c.CallOptions.AnnotateVideo):len(c.CallOptions.AnnotateVideo)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateVideo(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// AnnotateVideoOperation manages a long-running operation from AnnotateVideo. +type AnnotateVideoOperation struct { + lro *longrunning.Operation +} + +// AnnotateVideoOperation returns a new AnnotateVideoOperation from a given name. +// The name must be that of a previously created AnnotateVideoOperation, possibly from a different process. +func (c *Client) AnnotateVideoOperation(name string) *AnnotateVideoOperation { + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AnnotateVideoOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AnnotateVideoOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AnnotateVideoOperation) Metadata() (*videointelligencepb.AnnotateVideoProgress, error) { + var meta videointelligencepb.AnnotateVideoProgress + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AnnotateVideoOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AnnotateVideoOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client_example_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client_example_test.go new file mode 100644 index 0000000..296ea3d --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client_example_test.go @@ -0,0 +1,56 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence_test + +import ( + "cloud.google.com/go/videointelligence/apiv1" + "golang.org/x/net/context" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnnotateVideo() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &videointelligencepb.AnnotateVideoRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AnnotateVideo(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta1/doc.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/doc.go new file mode 100644 index 0000000..f003315 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package videointelligence is an auto-generated package for the +// Google Cloud Video Intelligence API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Google Cloud Video Intelligence API. +package videointelligence // import "cloud.google.com/go/videointelligence/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/mock_test.go new file mode 100644 index 0000000..bb9f3c2 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/mock_test.go @@ -0,0 +1,192 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockVideoIntelligenceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + videointelligencepb.VideoIntelligenceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockVideoIntelligenceServer) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockVideoIntelligence mockVideoIntelligenceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + videointelligencepb.RegisterVideoIntelligenceServiceServer(serv, &mockVideoIntelligence) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestVideoIntelligenceServiceAnnotateVideo(t *testing.T) { + var expectedResponse *videointelligencepb.AnnotateVideoResponse = &videointelligencepb.AnnotateVideoResponse{} + + mockVideoIntelligence.err = nil + mockVideoIntelligence.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var inputUri string = "gs://demomaker/cat.mp4" + var featuresElement videointelligencepb.Feature = videointelligencepb.Feature_LABEL_DETECTION + var features = []videointelligencepb.Feature{featuresElement} + var request = &videointelligencepb.AnnotateVideoRequest{ + InputUri: inputUri, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockVideoIntelligence.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestVideoIntelligenceServiceAnnotateVideoError(t *testing.T) { + errCode := codes.PermissionDenied + mockVideoIntelligence.err = nil + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var inputUri string = "gs://demomaker/cat.mp4" + var featuresElement videointelligencepb.Feature = videointelligencepb.Feature_LABEL_DETECTION + var features = []videointelligencepb.Feature{featuresElement} + var request = &videointelligencepb.AnnotateVideoRequest{ + InputUri: inputUri, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client.go new file mode 100644 index 0000000..f0920b0 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client.go @@ -0,0 +1,225 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnnotateVideo []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("videointelligence.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 120000 * time.Millisecond, + Multiplier: 2.5, + }) + }), + }, + } + return &CallOptions{ + AnnotateVideo: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Video Intelligence API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client videointelligencepb.VideoIntelligenceServiceClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new video intelligence service client. +// +// Service that implements Google Cloud Video Intelligence API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: videointelligencepb.NewVideoIntelligenceServiceClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnnotateVideo performs asynchronous video annotation. Progress and results can be +// retrieved through the google.longrunning.Operations interface. +// Operation.metadata contains AnnotateVideoProgress (progress). +// Operation.response contains AnnotateVideoResponse (results). +func (c *Client) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest, opts ...gax.CallOption) (*AnnotateVideoOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnnotateVideo[0:len(c.CallOptions.AnnotateVideo):len(c.CallOptions.AnnotateVideo)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateVideo(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// AnnotateVideoOperation manages a long-running operation from AnnotateVideo. +type AnnotateVideoOperation struct { + lro *longrunning.Operation +} + +// AnnotateVideoOperation returns a new AnnotateVideoOperation from a given name. +// The name must be that of a previously created AnnotateVideoOperation, possibly from a different process. +func (c *Client) AnnotateVideoOperation(name string) *AnnotateVideoOperation { + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AnnotateVideoOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AnnotateVideoOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AnnotateVideoOperation) Metadata() (*videointelligencepb.AnnotateVideoProgress, error) { + var meta videointelligencepb.AnnotateVideoProgress + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AnnotateVideoOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AnnotateVideoOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client_example_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client_example_test.go new file mode 100644 index 0000000..1c693ff --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client_example_test.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence_test + +import ( + "cloud.google.com/go/videointelligence/apiv1beta1" + "golang.org/x/net/context" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnnotateVideo() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &videointelligencepb.AnnotateVideoRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AnnotateVideo(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/doc.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/doc.go new file mode 100644 index 0000000..6545493 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/doc.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package videointelligence is an auto-generated package for the +// Google Cloud Video Intelligence API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Google Cloud Video Intelligence API. +package videointelligence // import "cloud.google.com/go/videointelligence/apiv1beta2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/mock_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/mock_test.go new file mode 100644 index 0000000..a6494a3 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/mock_test.go @@ -0,0 +1,192 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockVideoIntelligenceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + videointelligencepb.VideoIntelligenceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockVideoIntelligenceServer) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockVideoIntelligence mockVideoIntelligenceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + videointelligencepb.RegisterVideoIntelligenceServiceServer(serv, &mockVideoIntelligence) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestVideoIntelligenceServiceAnnotateVideo(t *testing.T) { + var expectedResponse *videointelligencepb.AnnotateVideoResponse = &videointelligencepb.AnnotateVideoResponse{} + + mockVideoIntelligence.err = nil + mockVideoIntelligence.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var inputUri string = "gs://demomaker/cat.mp4" + var featuresElement videointelligencepb.Feature = videointelligencepb.Feature_LABEL_DETECTION + var features = []videointelligencepb.Feature{featuresElement} + var request = &videointelligencepb.AnnotateVideoRequest{ + InputUri: inputUri, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockVideoIntelligence.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestVideoIntelligenceServiceAnnotateVideoError(t *testing.T) { + errCode := codes.PermissionDenied + mockVideoIntelligence.err = nil + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var inputUri string = "gs://demomaker/cat.mp4" + var featuresElement videointelligencepb.Feature = videointelligencepb.Feature_LABEL_DETECTION + var features = []videointelligencepb.Feature{featuresElement} + var request = &videointelligencepb.AnnotateVideoRequest{ + InputUri: inputUri, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client.go new file mode 100644 index 0000000..f7f6aaa --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client.go @@ -0,0 +1,225 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnnotateVideo []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("videointelligence.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 120000 * time.Millisecond, + Multiplier: 2.5, + }) + }), + }, + } + return &CallOptions{ + AnnotateVideo: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Video Intelligence API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client videointelligencepb.VideoIntelligenceServiceClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new video intelligence service client. +// +// Service that implements Google Cloud Video Intelligence API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: videointelligencepb.NewVideoIntelligenceServiceClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnnotateVideo performs asynchronous video annotation. Progress and results can be +// retrieved through the google.longrunning.Operations interface. +// Operation.metadata contains AnnotateVideoProgress (progress). +// Operation.response contains AnnotateVideoResponse (results). +func (c *Client) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest, opts ...gax.CallOption) (*AnnotateVideoOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnnotateVideo[0:len(c.CallOptions.AnnotateVideo):len(c.CallOptions.AnnotateVideo)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateVideo(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// AnnotateVideoOperation manages a long-running operation from AnnotateVideo. +type AnnotateVideoOperation struct { + lro *longrunning.Operation +} + +// AnnotateVideoOperation returns a new AnnotateVideoOperation from a given name. +// The name must be that of a previously created AnnotateVideoOperation, possibly from a different process. +func (c *Client) AnnotateVideoOperation(name string) *AnnotateVideoOperation { + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AnnotateVideoOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AnnotateVideoOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AnnotateVideoOperation) Metadata() (*videointelligencepb.AnnotateVideoProgress, error) { + var meta videointelligencepb.AnnotateVideoProgress + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AnnotateVideoOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AnnotateVideoOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client_example_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client_example_test.go new file mode 100644 index 0000000..6a8024f --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client_example_test.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence_test + +import ( + "cloud.google.com/go/videointelligence/apiv1beta2" + "golang.org/x/net/context" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnnotateVideo() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &videointelligencepb.AnnotateVideoRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AnnotateVideo(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/whitelist.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/whitelist.go new file mode 100644 index 0000000..d0e057e --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/whitelist.go @@ -0,0 +1,16 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS API IS CURRENTLY UNDER WHITELIST. +package videointelligence diff --git a/vendor/cloud.google.com/go/vision/apiv1/BatchAnnotateImages_smoke_test.go b/vendor/cloud.google.com/go/vision/apiv1/BatchAnnotateImages_smoke_test.go new file mode 100644 index 0000000..606c711 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/BatchAnnotateImages_smoke_test.go @@ -0,0 +1,82 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestImageAnnotatorSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewImageAnnotatorClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var gcsImageUri string = "gs://gapic-toolkit/President_Barack_Obama.jpg" + var source = &visionpb.ImageSource{ + GcsImageUri: gcsImageUri, + } + var image = &visionpb.Image{ + Source: source, + } + var type_ visionpb.Feature_Type = visionpb.Feature_FACE_DETECTION + var featuresElement = &visionpb.Feature{ + Type: type_, + } + var features = []*visionpb.Feature{featuresElement} + var requestsElement = &visionpb.AnnotateImageRequest{ + Image: image, + Features: features, + } + var requests = []*visionpb.AnnotateImageRequest{requestsElement} + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + if _, err := c.BatchAnnotateImages(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/README.md b/vendor/cloud.google.com/go/vision/apiv1/README.md new file mode 100644 index 0000000..bcfa08d --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/README.md @@ -0,0 +1,9 @@ +Auto-generated vision v1 clients +================================= + +This package includes auto-generated clients for the vision v1 API. + +Use the handwritten client (in the parent directory, +cloud.google.com/go/vision) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. diff --git a/vendor/cloud.google.com/go/vision/apiv1/client.go b/vendor/cloud.google.com/go/vision/apiv1/client.go new file mode 100644 index 0000000..1bfdbfa --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/client.go @@ -0,0 +1,151 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "github.com/googleapis/gax-go" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// AnnotateImage runs image detection and annotation for a single image. +func (c *ImageAnnotatorClient) AnnotateImage(ctx context.Context, req *pb.AnnotateImageRequest, opts ...gax.CallOption) (*pb.AnnotateImageResponse, error) { + res, err := c.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{ + Requests: []*pb.AnnotateImageRequest{req}, + }, opts...) + if err != nil { + return nil, err + } + return res.Responses[0], nil +} + +// Called for a single image and a single feature. +func (c *ImageAnnotatorClient) annotateOne(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, ftype pb.Feature_Type, maxResults int, opts []gax.CallOption) (*pb.AnnotateImageResponse, error) { + res, err := c.AnnotateImage(ctx, &pb.AnnotateImageRequest{ + Image: img, + ImageContext: ictx, + Features: []*pb.Feature{{Type: ftype, MaxResults: int32(maxResults)}}, + }, opts...) + if err != nil { + return nil, err + } + // When there is only one image and one feature, the response's Error field is + // unambiguously about that one detection, so we "promote" it to the error return + // value. + // res.Error is a google.rpc.Status. Convert to a Go error. Use a gRPC + // error because it preserves the code as a separate field. + // TODO(jba): preserve the details field. + if res.Error != nil { + return nil, status.Errorf(codes.Code(res.Error.Code), "%s", res.Error.Message) + } + return res, nil +} + +// DetectFaces performs face detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectFaces(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.FaceAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_FACE_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.FaceAnnotations, nil +} + +// DetectLandmarks performs landmark detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectLandmarks(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LANDMARK_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.LandmarkAnnotations, nil +} + +// DetectLogos performs logo detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectLogos(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LOGO_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.LogoAnnotations, nil +} + +// DetectLabels performs label detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectLabels(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LABEL_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.LabelAnnotations, nil +} + +// DetectTexts performs text detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectTexts(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_TEXT_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.TextAnnotations, nil +} + +// DetectDocumentText performs full text (OCR) detection on the image. +func (c *ImageAnnotatorClient) DetectDocumentText(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.TextAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_DOCUMENT_TEXT_DETECTION, 0, opts) + if err != nil { + return nil, err + } + return res.FullTextAnnotation, nil +} + +// DetectSafeSearch performs safe-search detection on the image. +func (c *ImageAnnotatorClient) DetectSafeSearch(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.SafeSearchAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_SAFE_SEARCH_DETECTION, 0, opts) + if err != nil { + return nil, err + } + return res.SafeSearchAnnotation, nil +} + +// DetectImageProperties computes properties of the image. +func (c *ImageAnnotatorClient) DetectImageProperties(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.ImageProperties, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_IMAGE_PROPERTIES, 0, opts) + if err != nil { + return nil, err + } + return res.ImagePropertiesAnnotation, nil +} + +// DetectWeb computes a web annotation on the image. +func (c *ImageAnnotatorClient) DetectWeb(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.WebDetection, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_WEB_DETECTION, 0, opts) + if err != nil { + return nil, err + } + return res.WebDetection, nil +} + +// CropHints computes crop hints for the image. +func (c *ImageAnnotatorClient) CropHints(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.CropHintsAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_CROP_HINTS, 0, opts) + if err != nil { + return nil, err + } + return res.CropHintsAnnotation, nil +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/client_test.go b/vendor/cloud.google.com/go/vision/apiv1/client_test.go new file mode 100644 index 0000000..d656afe --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/client_test.go @@ -0,0 +1,200 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var batchResponse = &pb.BatchAnnotateImagesResponse{ + Responses: []*pb.AnnotateImageResponse{{ + FaceAnnotations: []*pb.FaceAnnotation{ + {RollAngle: 1}, {RollAngle: 2}}, + LandmarkAnnotations: []*pb.EntityAnnotation{{Mid: "landmark"}}, + LogoAnnotations: []*pb.EntityAnnotation{{Mid: "logo"}}, + LabelAnnotations: []*pb.EntityAnnotation{{Mid: "label"}}, + TextAnnotations: []*pb.EntityAnnotation{{Mid: "text"}}, + FullTextAnnotation: &pb.TextAnnotation{Text: "full"}, + SafeSearchAnnotation: &pb.SafeSearchAnnotation{Spoof: pb.Likelihood_POSSIBLE}, + ImagePropertiesAnnotation: &pb.ImageProperties{DominantColors: &pb.DominantColorsAnnotation{}}, + CropHintsAnnotation: &pb.CropHintsAnnotation{CropHints: []*pb.CropHint{{Confidence: 0.5}}}, + WebDetection: &pb.WebDetection{WebEntities: []*pb.WebDetection_WebEntity{{EntityId: "web"}}}, + }}, +} + +// Verify that all the "shortcut" methods use the underlying +// BatchAnnotateImages RPC correctly. +func TestClientMethods(t *testing.T) { + ctx := context.Background() + c, err := NewImageAnnotatorClient(ctx, clientOpt) + if err != nil { + t.Fatal(err) + } + + mockImageAnnotator.resps = []proto.Message{batchResponse} + img := &pb.Image{Source: &pb.ImageSource{ImageUri: "http://foo.jpg"}} + ictx := &pb.ImageContext{LanguageHints: []string{"en", "fr"}} + req := &pb.AnnotateImageRequest{ + Image: img, + ImageContext: ictx, + Features: []*pb.Feature{ + {Type: pb.Feature_LABEL_DETECTION, MaxResults: 3}, + {Type: pb.Feature_FACE_DETECTION, MaxResults: 4}, + }, + } + + for i, test := range []struct { + call func() (interface{}, error) + wantFeatures []*pb.Feature + wantRes interface{} + }{ + { + func() (interface{}, error) { return c.AnnotateImage(ctx, req) }, + req.Features, batchResponse.Responses[0], + }, + { + func() (interface{}, error) { return c.DetectFaces(ctx, img, ictx, 2) }, + []*pb.Feature{{Type: pb.Feature_FACE_DETECTION, MaxResults: 2}}, + batchResponse.Responses[0].FaceAnnotations, + }, + { + func() (interface{}, error) { return c.DetectLandmarks(ctx, img, ictx, 2) }, + []*pb.Feature{{Type: pb.Feature_LANDMARK_DETECTION, MaxResults: 2}}, + batchResponse.Responses[0].LandmarkAnnotations, + }, + { + func() (interface{}, error) { return c.DetectLogos(ctx, img, ictx, 2) }, + []*pb.Feature{{Type: pb.Feature_LOGO_DETECTION, MaxResults: 2}}, + batchResponse.Responses[0].LogoAnnotations, + }, + { + func() (interface{}, error) { return c.DetectLabels(ctx, img, ictx, 2) }, + []*pb.Feature{{Type: pb.Feature_LABEL_DETECTION, MaxResults: 2}}, + batchResponse.Responses[0].LabelAnnotations, + }, + { + func() (interface{}, error) { return c.DetectTexts(ctx, img, ictx, 2) }, + []*pb.Feature{{Type: pb.Feature_TEXT_DETECTION, MaxResults: 2}}, + batchResponse.Responses[0].TextAnnotations, + }, + { + func() (interface{}, error) { return c.DetectDocumentText(ctx, img, ictx) }, + []*pb.Feature{{Type: pb.Feature_DOCUMENT_TEXT_DETECTION, MaxResults: 0}}, + batchResponse.Responses[0].FullTextAnnotation, + }, + { + func() (interface{}, error) { return c.DetectSafeSearch(ctx, img, ictx) }, + []*pb.Feature{{Type: pb.Feature_SAFE_SEARCH_DETECTION, MaxResults: 0}}, + batchResponse.Responses[0].SafeSearchAnnotation, + }, + { + func() (interface{}, error) { return c.DetectImageProperties(ctx, img, ictx) }, + []*pb.Feature{{Type: pb.Feature_IMAGE_PROPERTIES, MaxResults: 0}}, + batchResponse.Responses[0].ImagePropertiesAnnotation, + }, + { + func() (interface{}, error) { return c.DetectWeb(ctx, img, ictx) }, + []*pb.Feature{{Type: pb.Feature_WEB_DETECTION, MaxResults: 0}}, + batchResponse.Responses[0].WebDetection, + }, + { + func() (interface{}, error) { return c.CropHints(ctx, img, ictx) }, + []*pb.Feature{{Type: pb.Feature_CROP_HINTS, MaxResults: 0}}, + batchResponse.Responses[0].CropHintsAnnotation, + }, + } { + mockImageAnnotator.reqs = nil + res, err := test.call() + if err != nil { + t.Fatal(err) + } + got := mockImageAnnotator.reqs[0] + want := &pb.BatchAnnotateImagesRequest{ + Requests: []*pb.AnnotateImageRequest{{ + Image: img, + ImageContext: ictx, + Features: test.wantFeatures, + }}, + } + if !testEqual(got, want) { + t.Errorf("#%d:\ngot %v\nwant %v", i, got, want) + } + if got, want := res, test.wantRes; !testEqual(got, want) { + t.Errorf("#%d:\ngot %v\nwant %v", i, got, want) + } + } + +} + +func testEqual(a, b interface{}) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + t := reflect.TypeOf(a) + if t != reflect.TypeOf(b) { + return false + } + if am, ok := a.(proto.Message); ok { + return proto.Equal(am, b.(proto.Message)) + } + if t.Kind() != reflect.Slice { + panic(fmt.Sprintf("testEqual can only handle proto.Message and slices, got %s", t)) + } + va := reflect.ValueOf(a) + vb := reflect.ValueOf(b) + if va.Len() != vb.Len() { + return false + } + for i := 0; i < va.Len(); i++ { + if !testEqual(va.Index(i).Interface(), vb.Index(i).Interface()) { + return false + } + } + return true +} + +func TestAnnotateOneError(t *testing.T) { + ctx := context.Background() + c, err := NewImageAnnotatorClient(ctx, clientOpt) + if err != nil { + t.Fatal(err) + } + mockImageAnnotator.resps = []proto.Message{ + &pb.BatchAnnotateImagesResponse{ + Responses: []*pb.AnnotateImageResponse{{ + Error: &status.Status{Code: int32(codes.NotFound), Message: "not found"}, + }}, + }, + } + + _, err = c.annotateOne(ctx, + &pb.Image{Source: &pb.ImageSource{ImageUri: "http://foo.jpg"}}, + nil, pb.Feature_LOGO_DETECTION, 1, nil) + if c := grpc.Code(err); c != codes.NotFound { + t.Errorf("got %v, want NotFound", c) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/doc.go b/vendor/cloud.google.com/go/vision/apiv1/doc.go new file mode 100644 index 0000000..43d7e04 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/doc.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package vision is an auto-generated package for the +// Google Cloud Vision API. + +// +// Integrates Google Vision features, including image labeling, face, logo, +// and +// landmark detection, optical character recognition (OCR), and detection of +// explicit content, into applications. +package vision // import "cloud.google.com/go/vision/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-vision", + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/examples_test.go b/vendor/cloud.google.com/go/vision/apiv1/examples_test.go new file mode 100644 index 0000000..84b3ad8 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/examples_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision_test + +import ( + "fmt" + "os" + + vision "cloud.google.com/go/vision/apiv1" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func Example_NewImageFromReader() { + f, err := os.Open("path/to/image.jpg") + if err != nil { + // TODO: handle error. + } + img, err := vision.NewImageFromReader(f) + if err != nil { + // TODO: handle error. + } + fmt.Println(img) +} + +func Example_NewImageFromURI() { + img := vision.NewImageFromURI("gs://my-bucket/my-image.png") + fmt.Println(img) +} + +func ExampleImageAnnotatorClient_AnnotateImage() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + res, err := c.AnnotateImage(ctx, &pb.AnnotateImageRequest{ + Image: vision.NewImageFromURI("gs://my-bucket/my-image.png"), + Features: []*pb.Feature{ + {Type: pb.Feature_LANDMARK_DETECTION, MaxResults: 5}, + {Type: pb.Feature_LABEL_DETECTION, MaxResults: 3}, + }, + }) + if err != nil { + // TODO: Handle error. + } + // TODO: Use res. + _ = res +} + +func Example_FaceFromLandmarks() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + resp, err := c.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{ + Requests: []*pb.AnnotateImageRequest{ + { + Image: vision.NewImageFromURI("gs://bucket/image.jpg"), + Features: []*pb.Feature{{ + Type: pb.Feature_FACE_DETECTION, + MaxResults: 5, + }}, + }, + }, + }) + if err != nil { + // TODO: Handle error. + } + res := resp.Responses[0] + if res.Error != nil { + // TODO: Handle error. + } + for _, a := range res.FaceAnnotations { + face := vision.FaceFromLandmarks(a.Landmarks) + fmt.Println(face.Nose.Tip) + fmt.Println(face.Eyes.Left.Pupil) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/face.go b/vendor/cloud.google.com/go/vision/apiv1/face.go new file mode 100644 index 0000000..943d49f --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/face.go @@ -0,0 +1,153 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "log" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +// FaceLandmarks contains the positions of facial features detected by the service. +type FaceLandmarks struct { + Eyebrows Eyebrows + Eyes Eyes + Ears Ears + Nose Nose + Mouth Mouth + Chin Chin + Forehead *pb.Position +} + +// Eyebrows represents a face's eyebrows. +type Eyebrows struct { + Left, Right Eyebrow +} + +// Eyebrow represents a face's eyebrow. +type Eyebrow struct { + Top, Left, Right *pb.Position +} + +// Eyes represents a face's eyes. +type Eyes struct { + Left, Right Eye +} + +// Eye represents a face's eye. +type Eye struct { + Left, Right, Top, Bottom, Center, Pupil *pb.Position +} + +// Ears represents a face's ears. +type Ears struct { + Left, Right *pb.Position +} + +// Nose represents a face's nose. +type Nose struct { + Left, Right, Top, Bottom, Tip *pb.Position +} + +// Mouth represents a face's mouth. +type Mouth struct { + Left, Center, Right, UpperLip, LowerLip *pb.Position +} + +// Chin represents a face's chin. +type Chin struct { + Left, Center, Right *pb.Position +} + +// FaceFromLandmarks converts the list of face landmarks returned by the service +// to a FaceLandmarks struct. +func FaceFromLandmarks(landmarks []*pb.FaceAnnotation_Landmark) *FaceLandmarks { + face := &FaceLandmarks{} + for _, lm := range landmarks { + switch lm.Type { + case pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW: + face.Eyebrows.Left.Left = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW: + face.Eyebrows.Left.Right = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW: + face.Eyebrows.Right.Left = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW: + face.Eyebrows.Right.Right = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT: + face.Eyebrows.Left.Top = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT: + face.Eyebrows.Right.Top = lm.Position + case pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES: + face.Nose.Top = lm.Position + case pb.FaceAnnotation_Landmark_NOSE_TIP: + face.Nose.Tip = lm.Position + case pb.FaceAnnotation_Landmark_UPPER_LIP: + face.Mouth.UpperLip = lm.Position + case pb.FaceAnnotation_Landmark_LOWER_LIP: + face.Mouth.LowerLip = lm.Position + case pb.FaceAnnotation_Landmark_MOUTH_LEFT: + face.Mouth.Left = lm.Position + case pb.FaceAnnotation_Landmark_MOUTH_RIGHT: + face.Mouth.Right = lm.Position + case pb.FaceAnnotation_Landmark_MOUTH_CENTER: + face.Mouth.Center = lm.Position + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT: + face.Nose.Right = lm.Position + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT: + face.Nose.Left = lm.Position + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER: + face.Nose.Bottom = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE: + face.Eyes.Left.Center = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE: + face.Eyes.Right.Center = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY: + face.Eyes.Left.Top = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER: + face.Eyes.Left.Right = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY: + face.Eyes.Left.Bottom = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER: + face.Eyes.Left.Left = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY: + face.Eyes.Right.Top = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER: + face.Eyes.Right.Right = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY: + face.Eyes.Right.Bottom = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER: + face.Eyes.Right.Left = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL: + face.Eyes.Left.Pupil = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL: + face.Eyes.Right.Pupil = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION: + face.Ears.Left = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION: + face.Ears.Right = lm.Position + case pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA: + face.Forehead = lm.Position + case pb.FaceAnnotation_Landmark_CHIN_GNATHION: + face.Chin.Center = lm.Position + case pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION: + face.Chin.Left = lm.Position + case pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION: + face.Chin.Right = lm.Position + default: + log.Printf("vision: ignoring unknown face annotation landmark %s", lm.Type) + } + } + return face +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/face_test.go b/vendor/cloud.google.com/go/vision/apiv1/face_test.go new file mode 100644 index 0000000..2ae5e2e --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/face_test.go @@ -0,0 +1,225 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func TestFaceFromLandmarks(t *testing.T) { + landmarks := []*pb.FaceAnnotation_Landmark{ + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE, + Position: &pb.Position{X: 1192, Y: 575, Z: 0}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE, + Position: &pb.Position{X: 1479, Y: 571, Z: -9}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW, + Position: &pb.Position{X: 1097, Y: 522, Z: 27}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW, + Position: &pb.Position{X: 1266, Y: 521, Z: -61}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW, + Position: &pb.Position{X: 1402, Y: 520, Z: -66}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW, + Position: &pb.Position{X: 1571, Y: 519, Z: 10}, + }, + { + Type: pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES, + Position: &pb.Position{X: 1331, Y: 566, Z: -66}, + }, + { + Type: pb.FaceAnnotation_Landmark_NOSE_TIP, + Position: &pb.Position{X: 1329, Y: 743, Z: -137}, + }, + { + Type: pb.FaceAnnotation_Landmark_UPPER_LIP, + Position: &pb.Position{X: 1330, Y: 836, Z: -66}, + }, + { + Type: pb.FaceAnnotation_Landmark_LOWER_LIP, + Position: &pb.Position{X: 1334, Y: 954, Z: -36}, + }, + { + Type: pb.FaceAnnotation_Landmark_MOUTH_LEFT, + Position: &pb.Position{X: 1186, Y: 867, Z: 27}, + }, + { + Type: pb.FaceAnnotation_Landmark_MOUTH_RIGHT, + Position: &pb.Position{X: 1484, Y: 857, Z: 19}, + }, + { + Type: pb.FaceAnnotation_Landmark_MOUTH_CENTER, + Position: &pb.Position{X: 1332, Y: 894, Z: -41}, + }, + { + Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT, + Position: &pb.Position{X: 1432, Y: 750, Z: -26}, + }, + { + Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT, + Position: &pb.Position{X: 1236, Y: 755, Z: -20}, + }, + { + Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER, + Position: &pb.Position{X: 1332, Y: 783, Z: -70}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY, + Position: &pb.Position{X: 1193, Y: 561, Z: -20}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER, + Position: &pb.Position{X: 1252, Y: 581, Z: -1}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY, + Position: &pb.Position{X: 1190, Y: 593, Z: -1}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER, + Position: &pb.Position{X: 1133, Y: 584, Z: 28}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL, + Position: &pb.Position{X: 1189, Y: 580, Z: -8}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY, + Position: &pb.Position{X: 1474, Y: 561, Z: -30}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER, + Position: &pb.Position{X: 1536, Y: 581, Z: 15}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY, + Position: &pb.Position{X: 1481, Y: 590, Z: -11}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER, + Position: &pb.Position{X: 1424, Y: 579, Z: -6}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL, + Position: &pb.Position{X: 1478, Y: 580, Z: -18}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT, + Position: &pb.Position{X: 1181, Y: 482, Z: -40}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT, + Position: &pb.Position{X: 1485, Y: 482, Z: -50}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION, + Position: &pb.Position{X: 1027, Y: 696, Z: 361}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION, + Position: &pb.Position{X: 1666, Y: 695, Z: 339}, + }, + { + Type: pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA, + Position: &pb.Position{X: 1332, Y: 514, Z: -75}, + }, + { + Type: pb.FaceAnnotation_Landmark_CHIN_GNATHION, + Position: &pb.Position{X: 1335, Y: 1058, Z: 6}, + }, + { + Type: pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION, + Position: &pb.Position{X: 1055, Y: 882, Z: 257}, + }, + { + Type: pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION, + Position: &pb.Position{X: 1631, Y: 881, Z: 238}, + }, + } + want := &FaceLandmarks{ + Eyebrows: Eyebrows{ + Left: Eyebrow{ + Top: &pb.Position{X: 1181, Y: 482, Z: -40}, + Left: &pb.Position{X: 1097, Y: 522, Z: 27}, + Right: &pb.Position{X: 1266, Y: 521, Z: -61}, + }, + Right: Eyebrow{ + Top: &pb.Position{X: 1485, Y: 482, Z: -50}, + Left: &pb.Position{X: 1402, Y: 520, Z: -66}, + Right: &pb.Position{X: 1571, Y: 519, Z: 10}, + }, + }, + Eyes: Eyes{ + Left: Eye{ + Left: &pb.Position{X: 1133, Y: 584, Z: 28}, + Right: &pb.Position{X: 1252, Y: 581, Z: -1}, + Top: &pb.Position{X: 1193, Y: 561, Z: -20}, + Bottom: &pb.Position{X: 1190, Y: 593, Z: -1}, + Center: &pb.Position{X: 1192, Y: 575, Z: 0}, + Pupil: &pb.Position{X: 1189, Y: 580, Z: -8}, + }, + Right: Eye{ + Left: &pb.Position{X: 1424, Y: 579, Z: -6}, + Right: &pb.Position{X: 1536, Y: 581, Z: 15}, + Top: &pb.Position{X: 1474, Y: 561, Z: -30}, + Bottom: &pb.Position{X: 1481, Y: 590, Z: -11}, + Center: &pb.Position{X: 1479, Y: 571, Z: -9}, + Pupil: &pb.Position{X: 1478, Y: 580, Z: -18}, + }, + }, + Ears: Ears{ + Left: &pb.Position{X: 1027, Y: 696, Z: 361}, + Right: &pb.Position{X: 1666, Y: 695, Z: 339}, + }, + Nose: Nose{ + Left: &pb.Position{X: 1236, Y: 755, Z: -20}, + Right: &pb.Position{X: 1432, Y: 750, Z: -26}, + Top: &pb.Position{X: 1331, Y: 566, Z: -66}, + Bottom: &pb.Position{X: 1332, Y: 783, Z: -70}, + Tip: &pb.Position{X: 1329, Y: 743, Z: -137}, + }, + Mouth: Mouth{ + Left: &pb.Position{X: 1186, Y: 867, Z: 27}, + Center: &pb.Position{X: 1332, Y: 894, Z: -41}, + Right: &pb.Position{X: 1484, Y: 857, Z: 19}, + UpperLip: &pb.Position{X: 1330, Y: 836, Z: -66}, + LowerLip: &pb.Position{X: 1334, Y: 954, Z: -36}, + }, + Chin: Chin{ + Left: &pb.Position{X: 1055, Y: 882, Z: 257}, + Center: &pb.Position{X: 1335, Y: 1058, Z: 6}, + Right: &pb.Position{X: 1631, Y: 881, Z: 238}, + }, + Forehead: &pb.Position{X: 1332, Y: 514, Z: -75}, + } + + got := FaceFromLandmarks(landmarks) + if diff := testutil.Diff(got, want); diff != "" { + t.Error(diff) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image.go b/vendor/cloud.google.com/go/vision/apiv1/image.go new file mode 100644 index 0000000..7981366 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image.go @@ -0,0 +1,37 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "io" + "io/ioutil" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +// NewImageFromReader reads the bytes of an image from r. +func NewImageFromReader(r io.Reader) (*pb.Image, error) { + bytes, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return &pb.Image{Content: bytes}, nil +} + +// NewImageFromURI returns an image that refers to an object in Google Cloud Storage +// (when the uri is of the form "gs://BUCKET/OBJECT") or at a public URL. +func NewImageFromURI(uri string) *pb.Image { + return &pb.Image{Source: &pb.ImageSource{ImageUri: uri}} +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go new file mode 100644 index 0000000..e3524a8 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go @@ -0,0 +1,134 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ImageAnnotatorCallOptions contains the retry settings for each method of ImageAnnotatorClient. +type ImageAnnotatorCallOptions struct { + BatchAnnotateImages []gax.CallOption +} + +func defaultImageAnnotatorClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("vision.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultImageAnnotatorCallOptions() *ImageAnnotatorCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ImageAnnotatorCallOptions{ + BatchAnnotateImages: retry[[2]string{"default", "idempotent"}], + } +} + +// ImageAnnotatorClient is a client for interacting with Google Cloud Vision API. +type ImageAnnotatorClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + imageAnnotatorClient visionpb.ImageAnnotatorClient + + // The call options for this service. + CallOptions *ImageAnnotatorCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewImageAnnotatorClient creates a new image annotator client. +// +// Service that performs Google Cloud Vision API detection tasks over client +// images, such as face, landmark, logo, label, and text detection. The +// ImageAnnotator service returns detected entities from the images. +func NewImageAnnotatorClient(ctx context.Context, opts ...option.ClientOption) (*ImageAnnotatorClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultImageAnnotatorClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ImageAnnotatorClient{ + conn: conn, + CallOptions: defaultImageAnnotatorCallOptions(), + + imageAnnotatorClient: visionpb.NewImageAnnotatorClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ImageAnnotatorClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ImageAnnotatorClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ImageAnnotatorClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// BatchAnnotateImages run image detection and annotation for a batch of images. +func (c *ImageAnnotatorClient) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest, opts ...gax.CallOption) (*visionpb.BatchAnnotateImagesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchAnnotateImages[0:len(c.CallOptions.BatchAnnotateImages):len(c.CallOptions.BatchAnnotateImages)], opts...) + var resp *visionpb.BatchAnnotateImagesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.imageAnnotatorClient.BatchAnnotateImages(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go new file mode 100644 index 0000000..974b385 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision_test + +import ( + "cloud.google.com/go/vision/apiv1" + "golang.org/x/net/context" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func ExampleNewImageAnnotatorClient() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleImageAnnotatorClient_BatchAnnotateImages() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &visionpb.BatchAnnotateImagesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BatchAnnotateImages(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/mock_test.go b/vendor/cloud.google.com/go/vision/apiv1/mock_test.go new file mode 100644 index 0000000..247c1be --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/mock_test.go @@ -0,0 +1,159 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockImageAnnotatorServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + visionpb.ImageAnnotatorServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockImageAnnotatorServer) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*visionpb.BatchAnnotateImagesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockImageAnnotator mockImageAnnotatorServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + visionpb.RegisterImageAnnotatorServer(serv, &mockImageAnnotator) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestImageAnnotatorBatchAnnotateImages(t *testing.T) { + var expectedResponse *visionpb.BatchAnnotateImagesResponse = &visionpb.BatchAnnotateImagesResponse{} + + mockImageAnnotator.err = nil + mockImageAnnotator.reqs = nil + + mockImageAnnotator.resps = append(mockImageAnnotator.resps[:0], expectedResponse) + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockImageAnnotator.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestImageAnnotatorBatchAnnotateImagesError(t *testing.T) { + errCode := codes.PermissionDenied + mockImageAnnotator.err = gstatus.Error(errCode, "test error") + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/BatchAnnotateImages_smoke_test.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/BatchAnnotateImages_smoke_test.go new file mode 100644 index 0000000..50f6376 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/BatchAnnotateImages_smoke_test.go @@ -0,0 +1,82 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1" +) + +import ( + "fmt" + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = fmt.Sprintf +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestImageAnnotatorSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewImageAnnotatorClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var gcsImageUri string = "gs://gapic-toolkit/President_Barack_Obama.jpg" + var source = &visionpb.ImageSource{ + GcsImageUri: gcsImageUri, + } + var image = &visionpb.Image{ + Source: source, + } + var type_ visionpb.Feature_Type = visionpb.Feature_FACE_DETECTION + var featuresElement = &visionpb.Feature{ + Type: type_, + } + var features = []*visionpb.Feature{featuresElement} + var requestsElement = &visionpb.AnnotateImageRequest{ + Image: image, + Features: features, + } + var requests = []*visionpb.AnnotateImageRequest{requestsElement} + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + if _, err := c.BatchAnnotateImages(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go new file mode 100644 index 0000000..f25ee7a --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package vision is an auto-generated package for the +// Google Cloud Vision API. +// +// NOTE: This package is in beta. It is not stable, and may be subject to changes. +// +// Integrates Google Vision features, including image labeling, face, logo, +// and +// landmark detection, optical character recognition (OCR), and detection of +// explicit content, into applications. +package vision // import "cloud.google.com/go/vision/apiv1p1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-vision", + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client.go new file mode 100644 index 0000000..25d8972 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client.go @@ -0,0 +1,134 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ImageAnnotatorCallOptions contains the retry settings for each method of ImageAnnotatorClient. +type ImageAnnotatorCallOptions struct { + BatchAnnotateImages []gax.CallOption +} + +func defaultImageAnnotatorClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("vision.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultImageAnnotatorCallOptions() *ImageAnnotatorCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ImageAnnotatorCallOptions{ + BatchAnnotateImages: retry[[2]string{"default", "idempotent"}], + } +} + +// ImageAnnotatorClient is a client for interacting with Google Cloud Vision API. +type ImageAnnotatorClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + imageAnnotatorClient visionpb.ImageAnnotatorClient + + // The call options for this service. + CallOptions *ImageAnnotatorCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewImageAnnotatorClient creates a new image annotator client. +// +// Service that performs Google Cloud Vision API detection tasks over client +// images, such as face, landmark, logo, label, and text detection. The +// ImageAnnotator service returns detected entities from the images. +func NewImageAnnotatorClient(ctx context.Context, opts ...option.ClientOption) (*ImageAnnotatorClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultImageAnnotatorClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ImageAnnotatorClient{ + conn: conn, + CallOptions: defaultImageAnnotatorCallOptions(), + + imageAnnotatorClient: visionpb.NewImageAnnotatorClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ImageAnnotatorClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ImageAnnotatorClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ImageAnnotatorClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// BatchAnnotateImages run image detection and annotation for a batch of images. +func (c *ImageAnnotatorClient) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest, opts ...gax.CallOption) (*visionpb.BatchAnnotateImagesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchAnnotateImages[0:len(c.CallOptions.BatchAnnotateImages):len(c.CallOptions.BatchAnnotateImages)], opts...) + var resp *visionpb.BatchAnnotateImagesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.imageAnnotatorClient.BatchAnnotateImages(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client_example_test.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client_example_test.go new file mode 100644 index 0000000..ed5aae6 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision_test + +import ( + "cloud.google.com/go/vision/apiv1p1beta1" + "golang.org/x/net/context" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1" +) + +func ExampleNewImageAnnotatorClient() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleImageAnnotatorClient_BatchAnnotateImages() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &visionpb.BatchAnnotateImagesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BatchAnnotateImages(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/mock_test.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/mock_test.go new file mode 100644 index 0000000..5f330cf --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/mock_test.go @@ -0,0 +1,159 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockImageAnnotatorServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + visionpb.ImageAnnotatorServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockImageAnnotatorServer) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*visionpb.BatchAnnotateImagesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockImageAnnotator mockImageAnnotatorServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + visionpb.RegisterImageAnnotatorServer(serv, &mockImageAnnotator) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestImageAnnotatorBatchAnnotateImages(t *testing.T) { + var expectedResponse *visionpb.BatchAnnotateImagesResponse = &visionpb.BatchAnnotateImagesResponse{} + + mockImageAnnotator.err = nil + mockImageAnnotator.reqs = nil + + mockImageAnnotator.resps = append(mockImageAnnotator.resps[:0], expectedResponse) + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockImageAnnotator.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestImageAnnotatorBatchAnnotateImagesError(t *testing.T) { + errCode := codes.PermissionDenied + mockImageAnnotator.err = gstatus.Error(errCode, "test error") + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE b/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE new file mode 100644 index 0000000..5782c72 --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014, Elazar Leibovich +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/README.md b/vendor/github.com/elazarl/go-bindata-assetfs/README.md new file mode 100644 index 0000000..27ee48f --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/README.md @@ -0,0 +1,46 @@ +# go-bindata-assetfs + +Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`. + +[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs) + +### Installation + +Install with + + $ go get github.com/jteeuwen/go-bindata/... + $ go get github.com/elazarl/go-bindata-assetfs/... + +### Creating embedded data + +Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage, +instead of running `go-bindata` run `go-bindata-assetfs`. + +The tool will create a `bindata_assetfs.go` file, which contains the embedded data. + +A typical use case is + + $ go-bindata-assetfs data/... + +### Using assetFS in your code + +The generated file provides an `assetFS()` function that returns a `http.Filesystem` +wrapping the embedded files. What you usually want to do is: + + http.Handle("/", http.FileServer(assetFS())) + +This would run an HTTP server serving the embedded files. + +## Without running binary tool + +You can always just run the `go-bindata` tool, and then + +use + + import "github.com/elazarl/go-bindata-assetfs" + ... + http.Handle("/", + http.FileServer( + &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"})) + +to serve files embedded from the `data` directory. diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go b/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go new file mode 100644 index 0000000..04f6d7a --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go @@ -0,0 +1,167 @@ +package assetfs + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +var ( + defaultFileTimestamp = time.Now() +) + +// FakeFile implements os.FileInfo interface for a given path and size +type FakeFile struct { + // Path is the path of this file + Path string + // Dir marks of the path is a directory + Dir bool + // Len is the length of the fake file, zero if it is a directory + Len int64 + // Timestamp is the ModTime of this file + Timestamp time.Time +} + +func (f *FakeFile) Name() string { + _, name := filepath.Split(f.Path) + return name +} + +func (f *FakeFile) Mode() os.FileMode { + mode := os.FileMode(0644) + if f.Dir { + return mode | os.ModeDir + } + return mode +} + +func (f *FakeFile) ModTime() time.Time { + return f.Timestamp +} + +func (f *FakeFile) Size() int64 { + return f.Len +} + +func (f *FakeFile) IsDir() bool { + return f.Mode().IsDir() +} + +func (f *FakeFile) Sys() interface{} { + return nil +} + +// AssetFile implements http.File interface for a no-directory file with content +type AssetFile struct { + *bytes.Reader + io.Closer + FakeFile +} + +func NewAssetFile(name string, content []byte, timestamp time.Time) *AssetFile { + if timestamp.IsZero() { + timestamp = defaultFileTimestamp + } + return &AssetFile{ + bytes.NewReader(content), + ioutil.NopCloser(nil), + FakeFile{name, false, int64(len(content)), timestamp}} +} + +func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) { + return nil, errors.New("not a directory") +} + +func (f *AssetFile) Size() int64 { + return f.FakeFile.Size() +} + +func (f *AssetFile) Stat() (os.FileInfo, error) { + return f, nil +} + +// AssetDirectory implements http.File interface for a directory +type AssetDirectory struct { + AssetFile + ChildrenRead int + Children []os.FileInfo +} + +func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory { + fileinfos := make([]os.FileInfo, 0, len(children)) + for _, child := range children { + _, err := fs.AssetDir(filepath.Join(name, child)) + fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0, time.Time{}}) + } + return &AssetDirectory{ + AssetFile{ + bytes.NewReader(nil), + ioutil.NopCloser(nil), + FakeFile{name, true, 0, time.Time{}}, + }, + 0, + fileinfos} +} + +func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) { + if count <= 0 { + return f.Children, nil + } + if f.ChildrenRead+count > len(f.Children) { + count = len(f.Children) - f.ChildrenRead + } + rv := f.Children[f.ChildrenRead : f.ChildrenRead+count] + f.ChildrenRead += count + return rv, nil +} + +func (f *AssetDirectory) Stat() (os.FileInfo, error) { + return f, nil +} + +// AssetFS implements http.FileSystem, allowing +// embedded files to be served from net/http package. +type AssetFS struct { + // Asset should return content of file in path if exists + Asset func(path string) ([]byte, error) + // AssetDir should return list of files in the path + AssetDir func(path string) ([]string, error) + // AssetInfo should return the info of file in path if exists + AssetInfo func(path string) (os.FileInfo, error) + // Prefix would be prepended to http requests + Prefix string +} + +func (fs *AssetFS) Open(name string) (http.File, error) { + name = path.Join(fs.Prefix, name) + if len(name) > 0 && name[0] == '/' { + name = name[1:] + } + if b, err := fs.Asset(name); err == nil { + timestamp := defaultFileTimestamp + if fs.AssetInfo != nil { + if info, err := fs.AssetInfo(name); err == nil { + timestamp = info.ModTime() + } + } + return NewAssetFile(name, b, timestamp), nil + } + if children, err := fs.AssetDir(name); err == nil { + return NewAssetDirectory(name, children, fs), nil + } else { + // If the error is not found, return an error that will + // result in a 404 error. Otherwise the server returns + // a 500 error for files not found. + if strings.Contains(err.Error(), "not found") { + return nil, os.ErrNotExist + } + return nil, err + } +} diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/doc.go b/vendor/github.com/elazarl/go-bindata-assetfs/doc.go new file mode 100644 index 0000000..a664249 --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/doc.go @@ -0,0 +1,13 @@ +// assetfs allows packages to serve static content embedded +// with the go-bindata tool with the standard net/http package. +// +// See https://github.com/jteeuwen/go-bindata for more information +// about embedding binary data with go-bindata. +// +// Usage example, after running +// $ go-bindata data/... +// use: +// http.Handle("/", +// http.FileServer( +// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"})) +package assetfs diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go b/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go new file mode 100644 index 0000000..fdaad5e --- /dev/null +++ b/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go @@ -0,0 +1,100 @@ +package main + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "os" + "os/exec" + "strings" +) + +const bindatafile = "bindata.go" + +func isDebug(args []string) bool { + flagset := flag.NewFlagSet("", flag.ContinueOnError) + debug := flagset.Bool("debug", false, "") + debugArgs := make([]string, 0) + for _, arg := range args { + if strings.HasPrefix(arg, "-debug") { + debugArgs = append(debugArgs, arg) + } + } + flagset.Parse(debugArgs) + if debug == nil { + return false + } + return *debug +} + +func main() { + if _, err := exec.LookPath("go-bindata"); err != nil { + fmt.Println("Cannot find go-bindata executable in path") + fmt.Println("Maybe you need: go get github.com/elazarl/go-bindata-assetfs/...") + os.Exit(1) + } + cmd := exec.Command("go-bindata", os.Args[1:]...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + os.Exit(1) + } + in, err := os.Open(bindatafile) + if err != nil { + fmt.Fprintln(os.Stderr, "Cannot read", bindatafile, err) + return + } + out, err := os.Create("bindata_assetfs.go") + if err != nil { + fmt.Fprintln(os.Stderr, "Cannot write 'bindata_assetfs.go'", err) + return + } + debug := isDebug(os.Args[1:]) + r := bufio.NewReader(in) + done := false + for line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() { + if !isPrefix { + line = append(line, '\n') + } + if _, err := out.Write(line); err != nil { + fmt.Fprintln(os.Stderr, "Cannot write to 'bindata_assetfs.go'", err) + return + } + if !done && !isPrefix && bytes.HasPrefix(line, []byte("import (")) { + if debug { + fmt.Fprintln(out, "\t\"net/http\"") + } else { + fmt.Fprintln(out, "\t\"github.com/elazarl/go-bindata-assetfs\"") + } + done = true + } + } + if debug { + fmt.Fprintln(out, ` +func assetFS() http.FileSystem { + for k := range _bintree.Children { + return http.Dir(k) + } + panic("unreachable") +}`) + } else { + fmt.Fprintln(out, ` +func assetFS() *assetfs.AssetFS { + assetInfo := func(path string) (os.FileInfo, error) { + return os.Stat(path) + } + for k := range _bintree.Children { + return &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: assetInfo, Prefix: k} + } + panic("unreachable") +}`) + } + // Close files BEFORE remove calls (don't use defer). + in.Close() + out.Close() + if err := os.Remove(bindatafile); err != nil { + fmt.Fprintln(os.Stderr, "Cannot remove", bindatafile, err) + } +} diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore new file mode 100644 index 0000000..8f5b596 --- /dev/null +++ b/vendor/github.com/golang/protobuf/.gitignore @@ -0,0 +1,16 @@ +.DS_Store +*.[568ao] +*.ao +*.so +*.pyc +._* +.nfs.* +[568a].out +*~ +*.orig +core +_obj +_test +_testmain.go +protoc-gen-go/testdata/multi/*.pb.go +_conformance/_conformance diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml new file mode 100644 index 0000000..93c6780 --- /dev/null +++ b/vendor/github.com/golang/protobuf/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +go: +- 1.6.x +- 1.7.x +- 1.8.x +- 1.9.x + +install: + - go get -v -d -t github.com/golang/protobuf/... + - curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip + - unzip /tmp/protoc.zip -d $HOME/protoc + +env: + - PATH=$HOME/protoc/bin:$PATH + +script: + - make all test diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000..1b1b192 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/Make.protobuf b/vendor/github.com/golang/protobuf/Make.protobuf new file mode 100644 index 0000000..15071de --- /dev/null +++ b/vendor/github.com/golang/protobuf/Make.protobuf @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Includable Makefile to add a rule for generating .pb.go files from .proto files +# (Google protocol buffer descriptions). +# Typical use if myproto.proto is a file in package mypackage in this directory: +# +# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf + +%.pb.go: %.proto + protoc --go_out=. $< + diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile new file mode 100644 index 0000000..a1421d8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/Makefile @@ -0,0 +1,55 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +all: install + +install: + go install ./proto ./jsonpb ./ptypes + go install ./protoc-gen-go + +test: + go test ./proto ./jsonpb ./ptypes + make -C protoc-gen-go/testdata test + +clean: + go clean ./... + +nuke: + go clean -i ./... + +regenerate: + make -C protoc-gen-go/descriptor regenerate + make -C protoc-gen-go/plugin regenerate + make -C protoc-gen-go/testdata regenerate + make -C proto/testdata regenerate + make -C jsonpb/jsonpb_test_proto regenerate + make -C _conformance regenerate diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md new file mode 100644 index 0000000..9c4c815 --- /dev/null +++ b/vendor/github.com/golang/protobuf/README.md @@ -0,0 +1,244 @@ +# Go support for Protocol Buffers + +[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) +[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf) + +Google's data interchange format. +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.4. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ + +## Installation ## + +To use this software, you must: +- Install the standard C++ implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +- Of course, install the Go compiler and tools from + https://golang.org/ + See + https://golang.org/doc/install + for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo +- Grab the code from the repository and install the proto package. + The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. + The compiler plugin, protoc-gen-go, will be installed in $GOBIN, + defaulting to $GOPATH/bin. It must be in your $PATH for the protocol + compiler, protoc, to find it. + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is support for gRPC in Go using protocol buffers. +See the note at the bottom of this file for details. + +There are no insertion points in the plugin. + + +## Using protocol buffers with Go ## + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --go_out +parameter set to the directory you want to output the Go code to. + + protoc --go_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +========== + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +Consider file test.proto, containing + +```proto + syntax = "proto2"; + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } +``` + +To create and play with a Test object from the example package, + +```go + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +``` + +## Parameters ## + +To pass extra parameters to the plugin, use a comma-separated +parameter list separated from the output directory by a colon: + + + protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto + + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. Useful for things like generating protos in a + subdirectory, or regenerating vendored protobufs in-place. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to + load. The only plugin in this repo is `grpc`. +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is + associated with Go package quux/shme. This is subject to the + import_prefix parameter. + +## gRPC Support ## + +If a proto file specifies RPC services, protoc-gen-go can be instructed to +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into +the --go_out argument to protoc: + + protoc --go_out=plugins=grpc:. *.proto + +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/golang/protobuf/_conformance/Makefile b/vendor/github.com/golang/protobuf/_conformance/Makefile new file mode 100644 index 0000000..89800e2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/_conformance/Makefile @@ -0,0 +1,33 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2016 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers,Mgoogle/protobuf/field_mask.proto=google.golang.org/genproto/protobuf:. conformance_proto/conformance.proto diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance.go b/vendor/github.com/golang/protobuf/_conformance/conformance.go new file mode 100644 index 0000000..c54212c --- /dev/null +++ b/vendor/github.com/golang/protobuf/_conformance/conformance.go @@ -0,0 +1,161 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// conformance implements the conformance test subprocess protocol as +// documented in conformance.proto. +package main + +import ( + "encoding/binary" + "fmt" + "io" + "os" + + pb "github.com/golang/protobuf/_conformance/conformance_proto" + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +func main() { + var sizeBuf [4]byte + inbuf := make([]byte, 0, 4096) + outbuf := proto.NewBuffer(nil) + for { + if _, err := io.ReadFull(os.Stdin, sizeBuf[:]); err == io.EOF { + break + } else if err != nil { + fmt.Fprintln(os.Stderr, "go conformance: read request:", err) + os.Exit(1) + } + size := binary.LittleEndian.Uint32(sizeBuf[:]) + if int(size) > cap(inbuf) { + inbuf = make([]byte, size) + } + inbuf = inbuf[:size] + if _, err := io.ReadFull(os.Stdin, inbuf); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: read request:", err) + os.Exit(1) + } + + req := new(pb.ConformanceRequest) + if err := proto.Unmarshal(inbuf, req); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: parse request:", err) + os.Exit(1) + } + res := handle(req) + + if err := outbuf.Marshal(res); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: marshal response:", err) + os.Exit(1) + } + binary.LittleEndian.PutUint32(sizeBuf[:], uint32(len(outbuf.Bytes()))) + if _, err := os.Stdout.Write(sizeBuf[:]); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: write response:", err) + os.Exit(1) + } + if _, err := os.Stdout.Write(outbuf.Bytes()); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: write response:", err) + os.Exit(1) + } + outbuf.Reset() + } +} + +var jsonMarshaler = jsonpb.Marshaler{ + OrigName: true, +} + +func handle(req *pb.ConformanceRequest) *pb.ConformanceResponse { + var err error + var msg pb.TestAllTypes + switch p := req.Payload.(type) { + case *pb.ConformanceRequest_ProtobufPayload: + err = proto.Unmarshal(p.ProtobufPayload, &msg) + case *pb.ConformanceRequest_JsonPayload: + err = jsonpb.UnmarshalString(p.JsonPayload, &msg) + if err != nil && err.Error() == "unmarshaling Any not supported yet" { + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_Skipped{ + Skipped: err.Error(), + }, + } + } + default: + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_RuntimeError{ + RuntimeError: "unknown request payload type", + }, + } + } + if err != nil { + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_ParseError{ + ParseError: err.Error(), + }, + } + } + switch req.RequestedOutputFormat { + case pb.WireFormat_PROTOBUF: + p, err := proto.Marshal(&msg) + if err != nil { + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_SerializeError{ + SerializeError: err.Error(), + }, + } + } + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_ProtobufPayload{ + ProtobufPayload: p, + }, + } + case pb.WireFormat_JSON: + p, err := jsonMarshaler.MarshalToString(&msg) + if err != nil { + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_SerializeError{ + SerializeError: err.Error(), + }, + } + } + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_JsonPayload{ + JsonPayload: p, + }, + } + default: + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_RuntimeError{ + RuntimeError: "unknown output format", + }, + } + } +} diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go new file mode 100644 index 0000000..ec354ea --- /dev/null +++ b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go @@ -0,0 +1,1885 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: conformance_proto/conformance.proto + +/* +Package conformance is a generated protocol buffer package. + +It is generated from these files: + conformance_proto/conformance.proto + +It has these top-level messages: + ConformanceRequest + ConformanceResponse + TestAllTypes + ForeignMessage +*/ +package conformance + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "google.golang.org/genproto/protobuf" +import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf5 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type WireFormat int32 + +const ( + WireFormat_UNSPECIFIED WireFormat = 0 + WireFormat_PROTOBUF WireFormat = 1 + WireFormat_JSON WireFormat = 2 +) + +var WireFormat_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "PROTOBUF", + 2: "JSON", +} +var WireFormat_value = map[string]int32{ + "UNSPECIFIED": 0, + "PROTOBUF": 1, + "JSON": 2, +} + +func (x WireFormat) String() string { + return proto.EnumName(WireFormat_name, int32(x)) +} +func (WireFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type ForeignEnum int32 + +const ( + ForeignEnum_FOREIGN_FOO ForeignEnum = 0 + ForeignEnum_FOREIGN_BAR ForeignEnum = 1 + ForeignEnum_FOREIGN_BAZ ForeignEnum = 2 +) + +var ForeignEnum_name = map[int32]string{ + 0: "FOREIGN_FOO", + 1: "FOREIGN_BAR", + 2: "FOREIGN_BAZ", +} +var ForeignEnum_value = map[string]int32{ + "FOREIGN_FOO": 0, + "FOREIGN_BAR": 1, + "FOREIGN_BAZ": 2, +} + +func (x ForeignEnum) String() string { + return proto.EnumName(ForeignEnum_name, int32(x)) +} +func (ForeignEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type TestAllTypes_NestedEnum int32 + +const ( + TestAllTypes_FOO TestAllTypes_NestedEnum = 0 + TestAllTypes_BAR TestAllTypes_NestedEnum = 1 + TestAllTypes_BAZ TestAllTypes_NestedEnum = 2 + TestAllTypes_NEG TestAllTypes_NestedEnum = -1 +) + +var TestAllTypes_NestedEnum_name = map[int32]string{ + 0: "FOO", + 1: "BAR", + 2: "BAZ", + -1: "NEG", +} +var TestAllTypes_NestedEnum_value = map[string]int32{ + "FOO": 0, + "BAR": 1, + "BAZ": 2, + "NEG": -1, +} + +func (x TestAllTypes_NestedEnum) String() string { + return proto.EnumName(TestAllTypes_NestedEnum_name, int32(x)) +} +func (TestAllTypes_NestedEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// Represents a single test case's input. The testee should: +// +// 1. parse this proto (which should always succeed) +// 2. parse the protobuf or JSON payload in "payload" (which may fail) +// 3. if the parse succeeded, serialize the message in the requested format. +type ConformanceRequest struct { + // The payload (whether protobuf of JSON) is always for a TestAllTypes proto + // (see below). + // + // Types that are valid to be assigned to Payload: + // *ConformanceRequest_ProtobufPayload + // *ConformanceRequest_JsonPayload + Payload isConformanceRequest_Payload `protobuf_oneof:"payload"` + // Which format should the testee serialize its message to? + RequestedOutputFormat WireFormat `protobuf:"varint,3,opt,name=requested_output_format,json=requestedOutputFormat,enum=conformance.WireFormat" json:"requested_output_format,omitempty"` +} + +func (m *ConformanceRequest) Reset() { *m = ConformanceRequest{} } +func (m *ConformanceRequest) String() string { return proto.CompactTextString(m) } +func (*ConformanceRequest) ProtoMessage() {} +func (*ConformanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isConformanceRequest_Payload interface { + isConformanceRequest_Payload() +} + +type ConformanceRequest_ProtobufPayload struct { + ProtobufPayload []byte `protobuf:"bytes,1,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"` +} +type ConformanceRequest_JsonPayload struct { + JsonPayload string `protobuf:"bytes,2,opt,name=json_payload,json=jsonPayload,oneof"` +} + +func (*ConformanceRequest_ProtobufPayload) isConformanceRequest_Payload() {} +func (*ConformanceRequest_JsonPayload) isConformanceRequest_Payload() {} + +func (m *ConformanceRequest) GetPayload() isConformanceRequest_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *ConformanceRequest) GetProtobufPayload() []byte { + if x, ok := m.GetPayload().(*ConformanceRequest_ProtobufPayload); ok { + return x.ProtobufPayload + } + return nil +} + +func (m *ConformanceRequest) GetJsonPayload() string { + if x, ok := m.GetPayload().(*ConformanceRequest_JsonPayload); ok { + return x.JsonPayload + } + return "" +} + +func (m *ConformanceRequest) GetRequestedOutputFormat() WireFormat { + if m != nil { + return m.RequestedOutputFormat + } + return WireFormat_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConformanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConformanceRequest_OneofMarshaler, _ConformanceRequest_OneofUnmarshaler, _ConformanceRequest_OneofSizer, []interface{}{ + (*ConformanceRequest_ProtobufPayload)(nil), + (*ConformanceRequest_JsonPayload)(nil), + } +} + +func _ConformanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConformanceRequest) + // payload + switch x := m.Payload.(type) { + case *ConformanceRequest_ProtobufPayload: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ProtobufPayload) + case *ConformanceRequest_JsonPayload: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.JsonPayload) + case nil: + default: + return fmt.Errorf("ConformanceRequest.Payload has unexpected type %T", x) + } + return nil +} + +func _ConformanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConformanceRequest) + switch tag { + case 1: // payload.protobuf_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Payload = &ConformanceRequest_ProtobufPayload{x} + return true, err + case 2: // payload.json_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Payload = &ConformanceRequest_JsonPayload{x} + return true, err + default: + return false, nil + } +} + +func _ConformanceRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConformanceRequest) + // payload + switch x := m.Payload.(type) { + case *ConformanceRequest_ProtobufPayload: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) + n += len(x.ProtobufPayload) + case *ConformanceRequest_JsonPayload: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.JsonPayload))) + n += len(x.JsonPayload) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents a single test case's output. +type ConformanceResponse struct { + // Types that are valid to be assigned to Result: + // *ConformanceResponse_ParseError + // *ConformanceResponse_SerializeError + // *ConformanceResponse_RuntimeError + // *ConformanceResponse_ProtobufPayload + // *ConformanceResponse_JsonPayload + // *ConformanceResponse_Skipped + Result isConformanceResponse_Result `protobuf_oneof:"result"` +} + +func (m *ConformanceResponse) Reset() { *m = ConformanceResponse{} } +func (m *ConformanceResponse) String() string { return proto.CompactTextString(m) } +func (*ConformanceResponse) ProtoMessage() {} +func (*ConformanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type isConformanceResponse_Result interface { + isConformanceResponse_Result() +} + +type ConformanceResponse_ParseError struct { + ParseError string `protobuf:"bytes,1,opt,name=parse_error,json=parseError,oneof"` +} +type ConformanceResponse_SerializeError struct { + SerializeError string `protobuf:"bytes,6,opt,name=serialize_error,json=serializeError,oneof"` +} +type ConformanceResponse_RuntimeError struct { + RuntimeError string `protobuf:"bytes,2,opt,name=runtime_error,json=runtimeError,oneof"` +} +type ConformanceResponse_ProtobufPayload struct { + ProtobufPayload []byte `protobuf:"bytes,3,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"` +} +type ConformanceResponse_JsonPayload struct { + JsonPayload string `protobuf:"bytes,4,opt,name=json_payload,json=jsonPayload,oneof"` +} +type ConformanceResponse_Skipped struct { + Skipped string `protobuf:"bytes,5,opt,name=skipped,oneof"` +} + +func (*ConformanceResponse_ParseError) isConformanceResponse_Result() {} +func (*ConformanceResponse_SerializeError) isConformanceResponse_Result() {} +func (*ConformanceResponse_RuntimeError) isConformanceResponse_Result() {} +func (*ConformanceResponse_ProtobufPayload) isConformanceResponse_Result() {} +func (*ConformanceResponse_JsonPayload) isConformanceResponse_Result() {} +func (*ConformanceResponse_Skipped) isConformanceResponse_Result() {} + +func (m *ConformanceResponse) GetResult() isConformanceResponse_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *ConformanceResponse) GetParseError() string { + if x, ok := m.GetResult().(*ConformanceResponse_ParseError); ok { + return x.ParseError + } + return "" +} + +func (m *ConformanceResponse) GetSerializeError() string { + if x, ok := m.GetResult().(*ConformanceResponse_SerializeError); ok { + return x.SerializeError + } + return "" +} + +func (m *ConformanceResponse) GetRuntimeError() string { + if x, ok := m.GetResult().(*ConformanceResponse_RuntimeError); ok { + return x.RuntimeError + } + return "" +} + +func (m *ConformanceResponse) GetProtobufPayload() []byte { + if x, ok := m.GetResult().(*ConformanceResponse_ProtobufPayload); ok { + return x.ProtobufPayload + } + return nil +} + +func (m *ConformanceResponse) GetJsonPayload() string { + if x, ok := m.GetResult().(*ConformanceResponse_JsonPayload); ok { + return x.JsonPayload + } + return "" +} + +func (m *ConformanceResponse) GetSkipped() string { + if x, ok := m.GetResult().(*ConformanceResponse_Skipped); ok { + return x.Skipped + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConformanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConformanceResponse_OneofMarshaler, _ConformanceResponse_OneofUnmarshaler, _ConformanceResponse_OneofSizer, []interface{}{ + (*ConformanceResponse_ParseError)(nil), + (*ConformanceResponse_SerializeError)(nil), + (*ConformanceResponse_RuntimeError)(nil), + (*ConformanceResponse_ProtobufPayload)(nil), + (*ConformanceResponse_JsonPayload)(nil), + (*ConformanceResponse_Skipped)(nil), + } +} + +func _ConformanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConformanceResponse) + // result + switch x := m.Result.(type) { + case *ConformanceResponse_ParseError: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ParseError) + case *ConformanceResponse_SerializeError: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.SerializeError) + case *ConformanceResponse_RuntimeError: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.RuntimeError) + case *ConformanceResponse_ProtobufPayload: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ProtobufPayload) + case *ConformanceResponse_JsonPayload: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.JsonPayload) + case *ConformanceResponse_Skipped: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Skipped) + case nil: + default: + return fmt.Errorf("ConformanceResponse.Result has unexpected type %T", x) + } + return nil +} + +func _ConformanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConformanceResponse) + switch tag { + case 1: // result.parse_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_ParseError{x} + return true, err + case 6: // result.serialize_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_SerializeError{x} + return true, err + case 2: // result.runtime_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_RuntimeError{x} + return true, err + case 3: // result.protobuf_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Result = &ConformanceResponse_ProtobufPayload{x} + return true, err + case 4: // result.json_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_JsonPayload{x} + return true, err + case 5: // result.skipped + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_Skipped{x} + return true, err + default: + return false, nil + } +} + +func _ConformanceResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConformanceResponse) + // result + switch x := m.Result.(type) { + case *ConformanceResponse_ParseError: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ParseError))) + n += len(x.ParseError) + case *ConformanceResponse_SerializeError: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.SerializeError))) + n += len(x.SerializeError) + case *ConformanceResponse_RuntimeError: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RuntimeError))) + n += len(x.RuntimeError) + case *ConformanceResponse_ProtobufPayload: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) + n += len(x.ProtobufPayload) + case *ConformanceResponse_JsonPayload: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.JsonPayload))) + n += len(x.JsonPayload) + case *ConformanceResponse_Skipped: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Skipped))) + n += len(x.Skipped) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// This proto includes every type of field in both singular and repeated +// forms. +type TestAllTypes struct { + // Singular + OptionalInt32 int32 `protobuf:"varint,1,opt,name=optional_int32,json=optionalInt32" json:"optional_int32,omitempty"` + OptionalInt64 int64 `protobuf:"varint,2,opt,name=optional_int64,json=optionalInt64" json:"optional_int64,omitempty"` + OptionalUint32 uint32 `protobuf:"varint,3,opt,name=optional_uint32,json=optionalUint32" json:"optional_uint32,omitempty"` + OptionalUint64 uint64 `protobuf:"varint,4,opt,name=optional_uint64,json=optionalUint64" json:"optional_uint64,omitempty"` + OptionalSint32 int32 `protobuf:"zigzag32,5,opt,name=optional_sint32,json=optionalSint32" json:"optional_sint32,omitempty"` + OptionalSint64 int64 `protobuf:"zigzag64,6,opt,name=optional_sint64,json=optionalSint64" json:"optional_sint64,omitempty"` + OptionalFixed32 uint32 `protobuf:"fixed32,7,opt,name=optional_fixed32,json=optionalFixed32" json:"optional_fixed32,omitempty"` + OptionalFixed64 uint64 `protobuf:"fixed64,8,opt,name=optional_fixed64,json=optionalFixed64" json:"optional_fixed64,omitempty"` + OptionalSfixed32 int32 `protobuf:"fixed32,9,opt,name=optional_sfixed32,json=optionalSfixed32" json:"optional_sfixed32,omitempty"` + OptionalSfixed64 int64 `protobuf:"fixed64,10,opt,name=optional_sfixed64,json=optionalSfixed64" json:"optional_sfixed64,omitempty"` + OptionalFloat float32 `protobuf:"fixed32,11,opt,name=optional_float,json=optionalFloat" json:"optional_float,omitempty"` + OptionalDouble float64 `protobuf:"fixed64,12,opt,name=optional_double,json=optionalDouble" json:"optional_double,omitempty"` + OptionalBool bool `protobuf:"varint,13,opt,name=optional_bool,json=optionalBool" json:"optional_bool,omitempty"` + OptionalString string `protobuf:"bytes,14,opt,name=optional_string,json=optionalString" json:"optional_string,omitempty"` + OptionalBytes []byte `protobuf:"bytes,15,opt,name=optional_bytes,json=optionalBytes,proto3" json:"optional_bytes,omitempty"` + OptionalNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,18,opt,name=optional_nested_message,json=optionalNestedMessage" json:"optional_nested_message,omitempty"` + OptionalForeignMessage *ForeignMessage `protobuf:"bytes,19,opt,name=optional_foreign_message,json=optionalForeignMessage" json:"optional_foreign_message,omitempty"` + OptionalNestedEnum TestAllTypes_NestedEnum `protobuf:"varint,21,opt,name=optional_nested_enum,json=optionalNestedEnum,enum=conformance.TestAllTypes_NestedEnum" json:"optional_nested_enum,omitempty"` + OptionalForeignEnum ForeignEnum `protobuf:"varint,22,opt,name=optional_foreign_enum,json=optionalForeignEnum,enum=conformance.ForeignEnum" json:"optional_foreign_enum,omitempty"` + OptionalStringPiece string `protobuf:"bytes,24,opt,name=optional_string_piece,json=optionalStringPiece" json:"optional_string_piece,omitempty"` + OptionalCord string `protobuf:"bytes,25,opt,name=optional_cord,json=optionalCord" json:"optional_cord,omitempty"` + RecursiveMessage *TestAllTypes `protobuf:"bytes,27,opt,name=recursive_message,json=recursiveMessage" json:"recursive_message,omitempty"` + // Repeated + RepeatedInt32 []int32 `protobuf:"varint,31,rep,packed,name=repeated_int32,json=repeatedInt32" json:"repeated_int32,omitempty"` + RepeatedInt64 []int64 `protobuf:"varint,32,rep,packed,name=repeated_int64,json=repeatedInt64" json:"repeated_int64,omitempty"` + RepeatedUint32 []uint32 `protobuf:"varint,33,rep,packed,name=repeated_uint32,json=repeatedUint32" json:"repeated_uint32,omitempty"` + RepeatedUint64 []uint64 `protobuf:"varint,34,rep,packed,name=repeated_uint64,json=repeatedUint64" json:"repeated_uint64,omitempty"` + RepeatedSint32 []int32 `protobuf:"zigzag32,35,rep,packed,name=repeated_sint32,json=repeatedSint32" json:"repeated_sint32,omitempty"` + RepeatedSint64 []int64 `protobuf:"zigzag64,36,rep,packed,name=repeated_sint64,json=repeatedSint64" json:"repeated_sint64,omitempty"` + RepeatedFixed32 []uint32 `protobuf:"fixed32,37,rep,packed,name=repeated_fixed32,json=repeatedFixed32" json:"repeated_fixed32,omitempty"` + RepeatedFixed64 []uint64 `protobuf:"fixed64,38,rep,packed,name=repeated_fixed64,json=repeatedFixed64" json:"repeated_fixed64,omitempty"` + RepeatedSfixed32 []int32 `protobuf:"fixed32,39,rep,packed,name=repeated_sfixed32,json=repeatedSfixed32" json:"repeated_sfixed32,omitempty"` + RepeatedSfixed64 []int64 `protobuf:"fixed64,40,rep,packed,name=repeated_sfixed64,json=repeatedSfixed64" json:"repeated_sfixed64,omitempty"` + RepeatedFloat []float32 `protobuf:"fixed32,41,rep,packed,name=repeated_float,json=repeatedFloat" json:"repeated_float,omitempty"` + RepeatedDouble []float64 `protobuf:"fixed64,42,rep,packed,name=repeated_double,json=repeatedDouble" json:"repeated_double,omitempty"` + RepeatedBool []bool `protobuf:"varint,43,rep,packed,name=repeated_bool,json=repeatedBool" json:"repeated_bool,omitempty"` + RepeatedString []string `protobuf:"bytes,44,rep,name=repeated_string,json=repeatedString" json:"repeated_string,omitempty"` + RepeatedBytes [][]byte `protobuf:"bytes,45,rep,name=repeated_bytes,json=repeatedBytes,proto3" json:"repeated_bytes,omitempty"` + RepeatedNestedMessage []*TestAllTypes_NestedMessage `protobuf:"bytes,48,rep,name=repeated_nested_message,json=repeatedNestedMessage" json:"repeated_nested_message,omitempty"` + RepeatedForeignMessage []*ForeignMessage `protobuf:"bytes,49,rep,name=repeated_foreign_message,json=repeatedForeignMessage" json:"repeated_foreign_message,omitempty"` + RepeatedNestedEnum []TestAllTypes_NestedEnum `protobuf:"varint,51,rep,packed,name=repeated_nested_enum,json=repeatedNestedEnum,enum=conformance.TestAllTypes_NestedEnum" json:"repeated_nested_enum,omitempty"` + RepeatedForeignEnum []ForeignEnum `protobuf:"varint,52,rep,packed,name=repeated_foreign_enum,json=repeatedForeignEnum,enum=conformance.ForeignEnum" json:"repeated_foreign_enum,omitempty"` + RepeatedStringPiece []string `protobuf:"bytes,54,rep,name=repeated_string_piece,json=repeatedStringPiece" json:"repeated_string_piece,omitempty"` + RepeatedCord []string `protobuf:"bytes,55,rep,name=repeated_cord,json=repeatedCord" json:"repeated_cord,omitempty"` + // Map + MapInt32Int32 map[int32]int32 `protobuf:"bytes,56,rep,name=map_int32_int32,json=mapInt32Int32" json:"map_int32_int32,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapInt64Int64 map[int64]int64 `protobuf:"bytes,57,rep,name=map_int64_int64,json=mapInt64Int64" json:"map_int64_int64,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapUint32Uint32 map[uint32]uint32 `protobuf:"bytes,58,rep,name=map_uint32_uint32,json=mapUint32Uint32" json:"map_uint32_uint32,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapUint64Uint64 map[uint64]uint64 `protobuf:"bytes,59,rep,name=map_uint64_uint64,json=mapUint64Uint64" json:"map_uint64_uint64,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapSint32Sint32 map[int32]int32 `protobuf:"bytes,60,rep,name=map_sint32_sint32,json=mapSint32Sint32" json:"map_sint32_sint32,omitempty" protobuf_key:"zigzag32,1,opt,name=key" protobuf_val:"zigzag32,2,opt,name=value"` + MapSint64Sint64 map[int64]int64 `protobuf:"bytes,61,rep,name=map_sint64_sint64,json=mapSint64Sint64" json:"map_sint64_sint64,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"zigzag64,2,opt,name=value"` + MapFixed32Fixed32 map[uint32]uint32 `protobuf:"bytes,62,rep,name=map_fixed32_fixed32,json=mapFixed32Fixed32" json:"map_fixed32_fixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` + MapFixed64Fixed64 map[uint64]uint64 `protobuf:"bytes,63,rep,name=map_fixed64_fixed64,json=mapFixed64Fixed64" json:"map_fixed64_fixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` + MapSfixed32Sfixed32 map[int32]int32 `protobuf:"bytes,64,rep,name=map_sfixed32_sfixed32,json=mapSfixed32Sfixed32" json:"map_sfixed32_sfixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` + MapSfixed64Sfixed64 map[int64]int64 `protobuf:"bytes,65,rep,name=map_sfixed64_sfixed64,json=mapSfixed64Sfixed64" json:"map_sfixed64_sfixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` + MapInt32Float map[int32]float32 `protobuf:"bytes,66,rep,name=map_int32_float,json=mapInt32Float" json:"map_int32_float,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` + MapInt32Double map[int32]float64 `protobuf:"bytes,67,rep,name=map_int32_double,json=mapInt32Double" json:"map_int32_double,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` + MapBoolBool map[bool]bool `protobuf:"bytes,68,rep,name=map_bool_bool,json=mapBoolBool" json:"map_bool_bool,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapStringString map[string]string `protobuf:"bytes,69,rep,name=map_string_string,json=mapStringString" json:"map_string_string,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MapStringBytes map[string][]byte `protobuf:"bytes,70,rep,name=map_string_bytes,json=mapStringBytes" json:"map_string_bytes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + MapStringNestedMessage map[string]*TestAllTypes_NestedMessage `protobuf:"bytes,71,rep,name=map_string_nested_message,json=mapStringNestedMessage" json:"map_string_nested_message,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MapStringForeignMessage map[string]*ForeignMessage `protobuf:"bytes,72,rep,name=map_string_foreign_message,json=mapStringForeignMessage" json:"map_string_foreign_message,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MapStringNestedEnum map[string]TestAllTypes_NestedEnum `protobuf:"bytes,73,rep,name=map_string_nested_enum,json=mapStringNestedEnum" json:"map_string_nested_enum,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=conformance.TestAllTypes_NestedEnum"` + MapStringForeignEnum map[string]ForeignEnum `protobuf:"bytes,74,rep,name=map_string_foreign_enum,json=mapStringForeignEnum" json:"map_string_foreign_enum,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=conformance.ForeignEnum"` + // Types that are valid to be assigned to OneofField: + // *TestAllTypes_OneofUint32 + // *TestAllTypes_OneofNestedMessage + // *TestAllTypes_OneofString + // *TestAllTypes_OneofBytes + // *TestAllTypes_OneofBool + // *TestAllTypes_OneofUint64 + // *TestAllTypes_OneofFloat + // *TestAllTypes_OneofDouble + // *TestAllTypes_OneofEnum + OneofField isTestAllTypes_OneofField `protobuf_oneof:"oneof_field"` + // Well-known types + OptionalBoolWrapper *google_protobuf5.BoolValue `protobuf:"bytes,201,opt,name=optional_bool_wrapper,json=optionalBoolWrapper" json:"optional_bool_wrapper,omitempty"` + OptionalInt32Wrapper *google_protobuf5.Int32Value `protobuf:"bytes,202,opt,name=optional_int32_wrapper,json=optionalInt32Wrapper" json:"optional_int32_wrapper,omitempty"` + OptionalInt64Wrapper *google_protobuf5.Int64Value `protobuf:"bytes,203,opt,name=optional_int64_wrapper,json=optionalInt64Wrapper" json:"optional_int64_wrapper,omitempty"` + OptionalUint32Wrapper *google_protobuf5.UInt32Value `protobuf:"bytes,204,opt,name=optional_uint32_wrapper,json=optionalUint32Wrapper" json:"optional_uint32_wrapper,omitempty"` + OptionalUint64Wrapper *google_protobuf5.UInt64Value `protobuf:"bytes,205,opt,name=optional_uint64_wrapper,json=optionalUint64Wrapper" json:"optional_uint64_wrapper,omitempty"` + OptionalFloatWrapper *google_protobuf5.FloatValue `protobuf:"bytes,206,opt,name=optional_float_wrapper,json=optionalFloatWrapper" json:"optional_float_wrapper,omitempty"` + OptionalDoubleWrapper *google_protobuf5.DoubleValue `protobuf:"bytes,207,opt,name=optional_double_wrapper,json=optionalDoubleWrapper" json:"optional_double_wrapper,omitempty"` + OptionalStringWrapper *google_protobuf5.StringValue `protobuf:"bytes,208,opt,name=optional_string_wrapper,json=optionalStringWrapper" json:"optional_string_wrapper,omitempty"` + OptionalBytesWrapper *google_protobuf5.BytesValue `protobuf:"bytes,209,opt,name=optional_bytes_wrapper,json=optionalBytesWrapper" json:"optional_bytes_wrapper,omitempty"` + RepeatedBoolWrapper []*google_protobuf5.BoolValue `protobuf:"bytes,211,rep,name=repeated_bool_wrapper,json=repeatedBoolWrapper" json:"repeated_bool_wrapper,omitempty"` + RepeatedInt32Wrapper []*google_protobuf5.Int32Value `protobuf:"bytes,212,rep,name=repeated_int32_wrapper,json=repeatedInt32Wrapper" json:"repeated_int32_wrapper,omitempty"` + RepeatedInt64Wrapper []*google_protobuf5.Int64Value `protobuf:"bytes,213,rep,name=repeated_int64_wrapper,json=repeatedInt64Wrapper" json:"repeated_int64_wrapper,omitempty"` + RepeatedUint32Wrapper []*google_protobuf5.UInt32Value `protobuf:"bytes,214,rep,name=repeated_uint32_wrapper,json=repeatedUint32Wrapper" json:"repeated_uint32_wrapper,omitempty"` + RepeatedUint64Wrapper []*google_protobuf5.UInt64Value `protobuf:"bytes,215,rep,name=repeated_uint64_wrapper,json=repeatedUint64Wrapper" json:"repeated_uint64_wrapper,omitempty"` + RepeatedFloatWrapper []*google_protobuf5.FloatValue `protobuf:"bytes,216,rep,name=repeated_float_wrapper,json=repeatedFloatWrapper" json:"repeated_float_wrapper,omitempty"` + RepeatedDoubleWrapper []*google_protobuf5.DoubleValue `protobuf:"bytes,217,rep,name=repeated_double_wrapper,json=repeatedDoubleWrapper" json:"repeated_double_wrapper,omitempty"` + RepeatedStringWrapper []*google_protobuf5.StringValue `protobuf:"bytes,218,rep,name=repeated_string_wrapper,json=repeatedStringWrapper" json:"repeated_string_wrapper,omitempty"` + RepeatedBytesWrapper []*google_protobuf5.BytesValue `protobuf:"bytes,219,rep,name=repeated_bytes_wrapper,json=repeatedBytesWrapper" json:"repeated_bytes_wrapper,omitempty"` + OptionalDuration *google_protobuf1.Duration `protobuf:"bytes,301,opt,name=optional_duration,json=optionalDuration" json:"optional_duration,omitempty"` + OptionalTimestamp *google_protobuf4.Timestamp `protobuf:"bytes,302,opt,name=optional_timestamp,json=optionalTimestamp" json:"optional_timestamp,omitempty"` + OptionalFieldMask *google_protobuf2.FieldMask `protobuf:"bytes,303,opt,name=optional_field_mask,json=optionalFieldMask" json:"optional_field_mask,omitempty"` + OptionalStruct *google_protobuf3.Struct `protobuf:"bytes,304,opt,name=optional_struct,json=optionalStruct" json:"optional_struct,omitempty"` + OptionalAny *google_protobuf.Any `protobuf:"bytes,305,opt,name=optional_any,json=optionalAny" json:"optional_any,omitempty"` + OptionalValue *google_protobuf3.Value `protobuf:"bytes,306,opt,name=optional_value,json=optionalValue" json:"optional_value,omitempty"` + RepeatedDuration []*google_protobuf1.Duration `protobuf:"bytes,311,rep,name=repeated_duration,json=repeatedDuration" json:"repeated_duration,omitempty"` + RepeatedTimestamp []*google_protobuf4.Timestamp `protobuf:"bytes,312,rep,name=repeated_timestamp,json=repeatedTimestamp" json:"repeated_timestamp,omitempty"` + RepeatedFieldmask []*google_protobuf2.FieldMask `protobuf:"bytes,313,rep,name=repeated_fieldmask,json=repeatedFieldmask" json:"repeated_fieldmask,omitempty"` + RepeatedStruct []*google_protobuf3.Struct `protobuf:"bytes,324,rep,name=repeated_struct,json=repeatedStruct" json:"repeated_struct,omitempty"` + RepeatedAny []*google_protobuf.Any `protobuf:"bytes,315,rep,name=repeated_any,json=repeatedAny" json:"repeated_any,omitempty"` + RepeatedValue []*google_protobuf3.Value `protobuf:"bytes,316,rep,name=repeated_value,json=repeatedValue" json:"repeated_value,omitempty"` + // Test field-name-to-JSON-name convention. + // (protobuf says names can be any valid C/C++ identifier.) + Fieldname1 int32 `protobuf:"varint,401,opt,name=fieldname1" json:"fieldname1,omitempty"` + FieldName2 int32 `protobuf:"varint,402,opt,name=field_name2,json=fieldName2" json:"field_name2,omitempty"` + XFieldName3 int32 `protobuf:"varint,403,opt,name=_field_name3,json=FieldName3" json:"_field_name3,omitempty"` + Field_Name4_ int32 `protobuf:"varint,404,opt,name=field__name4_,json=fieldName4" json:"field__name4_,omitempty"` + Field0Name5 int32 `protobuf:"varint,405,opt,name=field0name5" json:"field0name5,omitempty"` + Field_0Name6 int32 `protobuf:"varint,406,opt,name=field_0_name6,json=field0Name6" json:"field_0_name6,omitempty"` + FieldName7 int32 `protobuf:"varint,407,opt,name=fieldName7" json:"fieldName7,omitempty"` + FieldName8 int32 `protobuf:"varint,408,opt,name=FieldName8" json:"FieldName8,omitempty"` + Field_Name9 int32 `protobuf:"varint,409,opt,name=field_Name9,json=fieldName9" json:"field_Name9,omitempty"` + Field_Name10 int32 `protobuf:"varint,410,opt,name=Field_Name10,json=FieldName10" json:"Field_Name10,omitempty"` + FIELD_NAME11 int32 `protobuf:"varint,411,opt,name=FIELD_NAME11,json=FIELDNAME11" json:"FIELD_NAME11,omitempty"` + FIELDName12 int32 `protobuf:"varint,412,opt,name=FIELD_name12,json=FIELDName12" json:"FIELD_name12,omitempty"` + XFieldName13 int32 `protobuf:"varint,413,opt,name=__field_name13,json=FieldName13" json:"__field_name13,omitempty"` + X_FieldName14 int32 `protobuf:"varint,414,opt,name=__Field_name14,json=FieldName14" json:"__Field_name14,omitempty"` + Field_Name15 int32 `protobuf:"varint,415,opt,name=field__name15,json=fieldName15" json:"field__name15,omitempty"` + Field__Name16 int32 `protobuf:"varint,416,opt,name=field__Name16,json=fieldName16" json:"field__Name16,omitempty"` + FieldName17__ int32 `protobuf:"varint,417,opt,name=field_name17__,json=fieldName17" json:"field_name17__,omitempty"` + FieldName18__ int32 `protobuf:"varint,418,opt,name=Field_name18__,json=FieldName18" json:"Field_name18__,omitempty"` +} + +func (m *TestAllTypes) Reset() { *m = TestAllTypes{} } +func (m *TestAllTypes) String() string { return proto.CompactTextString(m) } +func (*TestAllTypes) ProtoMessage() {} +func (*TestAllTypes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isTestAllTypes_OneofField interface { + isTestAllTypes_OneofField() +} + +type TestAllTypes_OneofUint32 struct { + OneofUint32 uint32 `protobuf:"varint,111,opt,name=oneof_uint32,json=oneofUint32,oneof"` +} +type TestAllTypes_OneofNestedMessage struct { + OneofNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,112,opt,name=oneof_nested_message,json=oneofNestedMessage,oneof"` +} +type TestAllTypes_OneofString struct { + OneofString string `protobuf:"bytes,113,opt,name=oneof_string,json=oneofString,oneof"` +} +type TestAllTypes_OneofBytes struct { + OneofBytes []byte `protobuf:"bytes,114,opt,name=oneof_bytes,json=oneofBytes,proto3,oneof"` +} +type TestAllTypes_OneofBool struct { + OneofBool bool `protobuf:"varint,115,opt,name=oneof_bool,json=oneofBool,oneof"` +} +type TestAllTypes_OneofUint64 struct { + OneofUint64 uint64 `protobuf:"varint,116,opt,name=oneof_uint64,json=oneofUint64,oneof"` +} +type TestAllTypes_OneofFloat struct { + OneofFloat float32 `protobuf:"fixed32,117,opt,name=oneof_float,json=oneofFloat,oneof"` +} +type TestAllTypes_OneofDouble struct { + OneofDouble float64 `protobuf:"fixed64,118,opt,name=oneof_double,json=oneofDouble,oneof"` +} +type TestAllTypes_OneofEnum struct { + OneofEnum TestAllTypes_NestedEnum `protobuf:"varint,119,opt,name=oneof_enum,json=oneofEnum,enum=conformance.TestAllTypes_NestedEnum,oneof"` +} + +func (*TestAllTypes_OneofUint32) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofNestedMessage) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofString) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofBytes) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofBool) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofUint64) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofFloat) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofDouble) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofEnum) isTestAllTypes_OneofField() {} + +func (m *TestAllTypes) GetOneofField() isTestAllTypes_OneofField { + if m != nil { + return m.OneofField + } + return nil +} + +func (m *TestAllTypes) GetOptionalInt32() int32 { + if m != nil { + return m.OptionalInt32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalInt64() int64 { + if m != nil { + return m.OptionalInt64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalUint32() uint32 { + if m != nil { + return m.OptionalUint32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalUint64() uint64 { + if m != nil { + return m.OptionalUint64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalSint32() int32 { + if m != nil { + return m.OptionalSint32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalSint64() int64 { + if m != nil { + return m.OptionalSint64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalFixed32() uint32 { + if m != nil { + return m.OptionalFixed32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalFixed64() uint64 { + if m != nil { + return m.OptionalFixed64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalSfixed32() int32 { + if m != nil { + return m.OptionalSfixed32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalSfixed64() int64 { + if m != nil { + return m.OptionalSfixed64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalFloat() float32 { + if m != nil { + return m.OptionalFloat + } + return 0 +} + +func (m *TestAllTypes) GetOptionalDouble() float64 { + if m != nil { + return m.OptionalDouble + } + return 0 +} + +func (m *TestAllTypes) GetOptionalBool() bool { + if m != nil { + return m.OptionalBool + } + return false +} + +func (m *TestAllTypes) GetOptionalString() string { + if m != nil { + return m.OptionalString + } + return "" +} + +func (m *TestAllTypes) GetOptionalBytes() []byte { + if m != nil { + return m.OptionalBytes + } + return nil +} + +func (m *TestAllTypes) GetOptionalNestedMessage() *TestAllTypes_NestedMessage { + if m != nil { + return m.OptionalNestedMessage + } + return nil +} + +func (m *TestAllTypes) GetOptionalForeignMessage() *ForeignMessage { + if m != nil { + return m.OptionalForeignMessage + } + return nil +} + +func (m *TestAllTypes) GetOptionalNestedEnum() TestAllTypes_NestedEnum { + if m != nil { + return m.OptionalNestedEnum + } + return TestAllTypes_FOO +} + +func (m *TestAllTypes) GetOptionalForeignEnum() ForeignEnum { + if m != nil { + return m.OptionalForeignEnum + } + return ForeignEnum_FOREIGN_FOO +} + +func (m *TestAllTypes) GetOptionalStringPiece() string { + if m != nil { + return m.OptionalStringPiece + } + return "" +} + +func (m *TestAllTypes) GetOptionalCord() string { + if m != nil { + return m.OptionalCord + } + return "" +} + +func (m *TestAllTypes) GetRecursiveMessage() *TestAllTypes { + if m != nil { + return m.RecursiveMessage + } + return nil +} + +func (m *TestAllTypes) GetRepeatedInt32() []int32 { + if m != nil { + return m.RepeatedInt32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedInt64() []int64 { + if m != nil { + return m.RepeatedInt64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedUint32() []uint32 { + if m != nil { + return m.RepeatedUint32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedUint64() []uint64 { + if m != nil { + return m.RepeatedUint64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedSint32() []int32 { + if m != nil { + return m.RepeatedSint32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedSint64() []int64 { + if m != nil { + return m.RepeatedSint64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFixed32() []uint32 { + if m != nil { + return m.RepeatedFixed32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFixed64() []uint64 { + if m != nil { + return m.RepeatedFixed64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedSfixed32() []int32 { + if m != nil { + return m.RepeatedSfixed32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedSfixed64() []int64 { + if m != nil { + return m.RepeatedSfixed64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFloat() []float32 { + if m != nil { + return m.RepeatedFloat + } + return nil +} + +func (m *TestAllTypes) GetRepeatedDouble() []float64 { + if m != nil { + return m.RepeatedDouble + } + return nil +} + +func (m *TestAllTypes) GetRepeatedBool() []bool { + if m != nil { + return m.RepeatedBool + } + return nil +} + +func (m *TestAllTypes) GetRepeatedString() []string { + if m != nil { + return m.RepeatedString + } + return nil +} + +func (m *TestAllTypes) GetRepeatedBytes() [][]byte { + if m != nil { + return m.RepeatedBytes + } + return nil +} + +func (m *TestAllTypes) GetRepeatedNestedMessage() []*TestAllTypes_NestedMessage { + if m != nil { + return m.RepeatedNestedMessage + } + return nil +} + +func (m *TestAllTypes) GetRepeatedForeignMessage() []*ForeignMessage { + if m != nil { + return m.RepeatedForeignMessage + } + return nil +} + +func (m *TestAllTypes) GetRepeatedNestedEnum() []TestAllTypes_NestedEnum { + if m != nil { + return m.RepeatedNestedEnum + } + return nil +} + +func (m *TestAllTypes) GetRepeatedForeignEnum() []ForeignEnum { + if m != nil { + return m.RepeatedForeignEnum + } + return nil +} + +func (m *TestAllTypes) GetRepeatedStringPiece() []string { + if m != nil { + return m.RepeatedStringPiece + } + return nil +} + +func (m *TestAllTypes) GetRepeatedCord() []string { + if m != nil { + return m.RepeatedCord + } + return nil +} + +func (m *TestAllTypes) GetMapInt32Int32() map[int32]int32 { + if m != nil { + return m.MapInt32Int32 + } + return nil +} + +func (m *TestAllTypes) GetMapInt64Int64() map[int64]int64 { + if m != nil { + return m.MapInt64Int64 + } + return nil +} + +func (m *TestAllTypes) GetMapUint32Uint32() map[uint32]uint32 { + if m != nil { + return m.MapUint32Uint32 + } + return nil +} + +func (m *TestAllTypes) GetMapUint64Uint64() map[uint64]uint64 { + if m != nil { + return m.MapUint64Uint64 + } + return nil +} + +func (m *TestAllTypes) GetMapSint32Sint32() map[int32]int32 { + if m != nil { + return m.MapSint32Sint32 + } + return nil +} + +func (m *TestAllTypes) GetMapSint64Sint64() map[int64]int64 { + if m != nil { + return m.MapSint64Sint64 + } + return nil +} + +func (m *TestAllTypes) GetMapFixed32Fixed32() map[uint32]uint32 { + if m != nil { + return m.MapFixed32Fixed32 + } + return nil +} + +func (m *TestAllTypes) GetMapFixed64Fixed64() map[uint64]uint64 { + if m != nil { + return m.MapFixed64Fixed64 + } + return nil +} + +func (m *TestAllTypes) GetMapSfixed32Sfixed32() map[int32]int32 { + if m != nil { + return m.MapSfixed32Sfixed32 + } + return nil +} + +func (m *TestAllTypes) GetMapSfixed64Sfixed64() map[int64]int64 { + if m != nil { + return m.MapSfixed64Sfixed64 + } + return nil +} + +func (m *TestAllTypes) GetMapInt32Float() map[int32]float32 { + if m != nil { + return m.MapInt32Float + } + return nil +} + +func (m *TestAllTypes) GetMapInt32Double() map[int32]float64 { + if m != nil { + return m.MapInt32Double + } + return nil +} + +func (m *TestAllTypes) GetMapBoolBool() map[bool]bool { + if m != nil { + return m.MapBoolBool + } + return nil +} + +func (m *TestAllTypes) GetMapStringString() map[string]string { + if m != nil { + return m.MapStringString + } + return nil +} + +func (m *TestAllTypes) GetMapStringBytes() map[string][]byte { + if m != nil { + return m.MapStringBytes + } + return nil +} + +func (m *TestAllTypes) GetMapStringNestedMessage() map[string]*TestAllTypes_NestedMessage { + if m != nil { + return m.MapStringNestedMessage + } + return nil +} + +func (m *TestAllTypes) GetMapStringForeignMessage() map[string]*ForeignMessage { + if m != nil { + return m.MapStringForeignMessage + } + return nil +} + +func (m *TestAllTypes) GetMapStringNestedEnum() map[string]TestAllTypes_NestedEnum { + if m != nil { + return m.MapStringNestedEnum + } + return nil +} + +func (m *TestAllTypes) GetMapStringForeignEnum() map[string]ForeignEnum { + if m != nil { + return m.MapStringForeignEnum + } + return nil +} + +func (m *TestAllTypes) GetOneofUint32() uint32 { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint32); ok { + return x.OneofUint32 + } + return 0 +} + +func (m *TestAllTypes) GetOneofNestedMessage() *TestAllTypes_NestedMessage { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofNestedMessage); ok { + return x.OneofNestedMessage + } + return nil +} + +func (m *TestAllTypes) GetOneofString() string { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofString); ok { + return x.OneofString + } + return "" +} + +func (m *TestAllTypes) GetOneofBytes() []byte { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofBytes); ok { + return x.OneofBytes + } + return nil +} + +func (m *TestAllTypes) GetOneofBool() bool { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofBool); ok { + return x.OneofBool + } + return false +} + +func (m *TestAllTypes) GetOneofUint64() uint64 { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint64); ok { + return x.OneofUint64 + } + return 0 +} + +func (m *TestAllTypes) GetOneofFloat() float32 { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofFloat); ok { + return x.OneofFloat + } + return 0 +} + +func (m *TestAllTypes) GetOneofDouble() float64 { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofDouble); ok { + return x.OneofDouble + } + return 0 +} + +func (m *TestAllTypes) GetOneofEnum() TestAllTypes_NestedEnum { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofEnum); ok { + return x.OneofEnum + } + return TestAllTypes_FOO +} + +func (m *TestAllTypes) GetOptionalBoolWrapper() *google_protobuf5.BoolValue { + if m != nil { + return m.OptionalBoolWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalInt32Wrapper() *google_protobuf5.Int32Value { + if m != nil { + return m.OptionalInt32Wrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalInt64Wrapper() *google_protobuf5.Int64Value { + if m != nil { + return m.OptionalInt64Wrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalUint32Wrapper() *google_protobuf5.UInt32Value { + if m != nil { + return m.OptionalUint32Wrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalUint64Wrapper() *google_protobuf5.UInt64Value { + if m != nil { + return m.OptionalUint64Wrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalFloatWrapper() *google_protobuf5.FloatValue { + if m != nil { + return m.OptionalFloatWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalDoubleWrapper() *google_protobuf5.DoubleValue { + if m != nil { + return m.OptionalDoubleWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalStringWrapper() *google_protobuf5.StringValue { + if m != nil { + return m.OptionalStringWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalBytesWrapper() *google_protobuf5.BytesValue { + if m != nil { + return m.OptionalBytesWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedBoolWrapper() []*google_protobuf5.BoolValue { + if m != nil { + return m.RepeatedBoolWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedInt32Wrapper() []*google_protobuf5.Int32Value { + if m != nil { + return m.RepeatedInt32Wrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedInt64Wrapper() []*google_protobuf5.Int64Value { + if m != nil { + return m.RepeatedInt64Wrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedUint32Wrapper() []*google_protobuf5.UInt32Value { + if m != nil { + return m.RepeatedUint32Wrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedUint64Wrapper() []*google_protobuf5.UInt64Value { + if m != nil { + return m.RepeatedUint64Wrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFloatWrapper() []*google_protobuf5.FloatValue { + if m != nil { + return m.RepeatedFloatWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedDoubleWrapper() []*google_protobuf5.DoubleValue { + if m != nil { + return m.RepeatedDoubleWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedStringWrapper() []*google_protobuf5.StringValue { + if m != nil { + return m.RepeatedStringWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedBytesWrapper() []*google_protobuf5.BytesValue { + if m != nil { + return m.RepeatedBytesWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalDuration() *google_protobuf1.Duration { + if m != nil { + return m.OptionalDuration + } + return nil +} + +func (m *TestAllTypes) GetOptionalTimestamp() *google_protobuf4.Timestamp { + if m != nil { + return m.OptionalTimestamp + } + return nil +} + +func (m *TestAllTypes) GetOptionalFieldMask() *google_protobuf2.FieldMask { + if m != nil { + return m.OptionalFieldMask + } + return nil +} + +func (m *TestAllTypes) GetOptionalStruct() *google_protobuf3.Struct { + if m != nil { + return m.OptionalStruct + } + return nil +} + +func (m *TestAllTypes) GetOptionalAny() *google_protobuf.Any { + if m != nil { + return m.OptionalAny + } + return nil +} + +func (m *TestAllTypes) GetOptionalValue() *google_protobuf3.Value { + if m != nil { + return m.OptionalValue + } + return nil +} + +func (m *TestAllTypes) GetRepeatedDuration() []*google_protobuf1.Duration { + if m != nil { + return m.RepeatedDuration + } + return nil +} + +func (m *TestAllTypes) GetRepeatedTimestamp() []*google_protobuf4.Timestamp { + if m != nil { + return m.RepeatedTimestamp + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFieldmask() []*google_protobuf2.FieldMask { + if m != nil { + return m.RepeatedFieldmask + } + return nil +} + +func (m *TestAllTypes) GetRepeatedStruct() []*google_protobuf3.Struct { + if m != nil { + return m.RepeatedStruct + } + return nil +} + +func (m *TestAllTypes) GetRepeatedAny() []*google_protobuf.Any { + if m != nil { + return m.RepeatedAny + } + return nil +} + +func (m *TestAllTypes) GetRepeatedValue() []*google_protobuf3.Value { + if m != nil { + return m.RepeatedValue + } + return nil +} + +func (m *TestAllTypes) GetFieldname1() int32 { + if m != nil { + return m.Fieldname1 + } + return 0 +} + +func (m *TestAllTypes) GetFieldName2() int32 { + if m != nil { + return m.FieldName2 + } + return 0 +} + +func (m *TestAllTypes) GetXFieldName3() int32 { + if m != nil { + return m.XFieldName3 + } + return 0 +} + +func (m *TestAllTypes) GetField_Name4_() int32 { + if m != nil { + return m.Field_Name4_ + } + return 0 +} + +func (m *TestAllTypes) GetField0Name5() int32 { + if m != nil { + return m.Field0Name5 + } + return 0 +} + +func (m *TestAllTypes) GetField_0Name6() int32 { + if m != nil { + return m.Field_0Name6 + } + return 0 +} + +func (m *TestAllTypes) GetFieldName7() int32 { + if m != nil { + return m.FieldName7 + } + return 0 +} + +func (m *TestAllTypes) GetFieldName8() int32 { + if m != nil { + return m.FieldName8 + } + return 0 +} + +func (m *TestAllTypes) GetField_Name9() int32 { + if m != nil { + return m.Field_Name9 + } + return 0 +} + +func (m *TestAllTypes) GetField_Name10() int32 { + if m != nil { + return m.Field_Name10 + } + return 0 +} + +func (m *TestAllTypes) GetFIELD_NAME11() int32 { + if m != nil { + return m.FIELD_NAME11 + } + return 0 +} + +func (m *TestAllTypes) GetFIELDName12() int32 { + if m != nil { + return m.FIELDName12 + } + return 0 +} + +func (m *TestAllTypes) GetXFieldName13() int32 { + if m != nil { + return m.XFieldName13 + } + return 0 +} + +func (m *TestAllTypes) GetX_FieldName14() int32 { + if m != nil { + return m.X_FieldName14 + } + return 0 +} + +func (m *TestAllTypes) GetField_Name15() int32 { + if m != nil { + return m.Field_Name15 + } + return 0 +} + +func (m *TestAllTypes) GetField__Name16() int32 { + if m != nil { + return m.Field__Name16 + } + return 0 +} + +func (m *TestAllTypes) GetFieldName17__() int32 { + if m != nil { + return m.FieldName17__ + } + return 0 +} + +func (m *TestAllTypes) GetFieldName18__() int32 { + if m != nil { + return m.FieldName18__ + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TestAllTypes) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TestAllTypes_OneofMarshaler, _TestAllTypes_OneofUnmarshaler, _TestAllTypes_OneofSizer, []interface{}{ + (*TestAllTypes_OneofUint32)(nil), + (*TestAllTypes_OneofNestedMessage)(nil), + (*TestAllTypes_OneofString)(nil), + (*TestAllTypes_OneofBytes)(nil), + (*TestAllTypes_OneofBool)(nil), + (*TestAllTypes_OneofUint64)(nil), + (*TestAllTypes_OneofFloat)(nil), + (*TestAllTypes_OneofDouble)(nil), + (*TestAllTypes_OneofEnum)(nil), + } +} + +func _TestAllTypes_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TestAllTypes) + // oneof_field + switch x := m.OneofField.(type) { + case *TestAllTypes_OneofUint32: + b.EncodeVarint(111<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.OneofUint32)) + case *TestAllTypes_OneofNestedMessage: + b.EncodeVarint(112<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.OneofNestedMessage); err != nil { + return err + } + case *TestAllTypes_OneofString: + b.EncodeVarint(113<<3 | proto.WireBytes) + b.EncodeStringBytes(x.OneofString) + case *TestAllTypes_OneofBytes: + b.EncodeVarint(114<<3 | proto.WireBytes) + b.EncodeRawBytes(x.OneofBytes) + case *TestAllTypes_OneofBool: + t := uint64(0) + if x.OneofBool { + t = 1 + } + b.EncodeVarint(115<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *TestAllTypes_OneofUint64: + b.EncodeVarint(116<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.OneofUint64)) + case *TestAllTypes_OneofFloat: + b.EncodeVarint(117<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.OneofFloat))) + case *TestAllTypes_OneofDouble: + b.EncodeVarint(118<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.OneofDouble)) + case *TestAllTypes_OneofEnum: + b.EncodeVarint(119<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.OneofEnum)) + case nil: + default: + return fmt.Errorf("TestAllTypes.OneofField has unexpected type %T", x) + } + return nil +} + +func _TestAllTypes_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TestAllTypes) + switch tag { + case 111: // oneof_field.oneof_uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofField = &TestAllTypes_OneofUint32{uint32(x)} + return true, err + case 112: // oneof_field.oneof_nested_message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TestAllTypes_NestedMessage) + err := b.DecodeMessage(msg) + m.OneofField = &TestAllTypes_OneofNestedMessage{msg} + return true, err + case 113: // oneof_field.oneof_string + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.OneofField = &TestAllTypes_OneofString{x} + return true, err + case 114: // oneof_field.oneof_bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.OneofField = &TestAllTypes_OneofBytes{x} + return true, err + case 115: // oneof_field.oneof_bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofField = &TestAllTypes_OneofBool{x != 0} + return true, err + case 116: // oneof_field.oneof_uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofField = &TestAllTypes_OneofUint64{x} + return true, err + case 117: // oneof_field.oneof_float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.OneofField = &TestAllTypes_OneofFloat{math.Float32frombits(uint32(x))} + return true, err + case 118: // oneof_field.oneof_double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.OneofField = &TestAllTypes_OneofDouble{math.Float64frombits(x)} + return true, err + case 119: // oneof_field.oneof_enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofField = &TestAllTypes_OneofEnum{TestAllTypes_NestedEnum(x)} + return true, err + default: + return false, nil + } +} + +func _TestAllTypes_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TestAllTypes) + // oneof_field + switch x := m.OneofField.(type) { + case *TestAllTypes_OneofUint32: + n += proto.SizeVarint(111<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.OneofUint32)) + case *TestAllTypes_OneofNestedMessage: + s := proto.Size(x.OneofNestedMessage) + n += proto.SizeVarint(112<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TestAllTypes_OneofString: + n += proto.SizeVarint(113<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.OneofString))) + n += len(x.OneofString) + case *TestAllTypes_OneofBytes: + n += proto.SizeVarint(114<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.OneofBytes))) + n += len(x.OneofBytes) + case *TestAllTypes_OneofBool: + n += proto.SizeVarint(115<<3 | proto.WireVarint) + n += 1 + case *TestAllTypes_OneofUint64: + n += proto.SizeVarint(116<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.OneofUint64)) + case *TestAllTypes_OneofFloat: + n += proto.SizeVarint(117<<3 | proto.WireFixed32) + n += 4 + case *TestAllTypes_OneofDouble: + n += proto.SizeVarint(118<<3 | proto.WireFixed64) + n += 8 + case *TestAllTypes_OneofEnum: + n += proto.SizeVarint(119<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.OneofEnum)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type TestAllTypes_NestedMessage struct { + A int32 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` + Corecursive *TestAllTypes `protobuf:"bytes,2,opt,name=corecursive" json:"corecursive,omitempty"` +} + +func (m *TestAllTypes_NestedMessage) Reset() { *m = TestAllTypes_NestedMessage{} } +func (m *TestAllTypes_NestedMessage) String() string { return proto.CompactTextString(m) } +func (*TestAllTypes_NestedMessage) ProtoMessage() {} +func (*TestAllTypes_NestedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *TestAllTypes_NestedMessage) GetA() int32 { + if m != nil { + return m.A + } + return 0 +} + +func (m *TestAllTypes_NestedMessage) GetCorecursive() *TestAllTypes { + if m != nil { + return m.Corecursive + } + return nil +} + +type ForeignMessage struct { + C int32 `protobuf:"varint,1,opt,name=c" json:"c,omitempty"` +} + +func (m *ForeignMessage) Reset() { *m = ForeignMessage{} } +func (m *ForeignMessage) String() string { return proto.CompactTextString(m) } +func (*ForeignMessage) ProtoMessage() {} +func (*ForeignMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ForeignMessage) GetC() int32 { + if m != nil { + return m.C + } + return 0 +} + +func init() { + proto.RegisterType((*ConformanceRequest)(nil), "conformance.ConformanceRequest") + proto.RegisterType((*ConformanceResponse)(nil), "conformance.ConformanceResponse") + proto.RegisterType((*TestAllTypes)(nil), "conformance.TestAllTypes") + proto.RegisterType((*TestAllTypes_NestedMessage)(nil), "conformance.TestAllTypes.NestedMessage") + proto.RegisterType((*ForeignMessage)(nil), "conformance.ForeignMessage") + proto.RegisterEnum("conformance.WireFormat", WireFormat_name, WireFormat_value) + proto.RegisterEnum("conformance.ForeignEnum", ForeignEnum_name, ForeignEnum_value) + proto.RegisterEnum("conformance.TestAllTypes_NestedEnum", TestAllTypes_NestedEnum_name, TestAllTypes_NestedEnum_value) +} + +func init() { proto.RegisterFile("conformance_proto/conformance.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2737 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xd9, 0x72, 0xdb, 0xc8, + 0xd5, 0x16, 0x08, 0x59, 0x4b, 0x93, 0x92, 0xa8, 0xd6, 0xd6, 0x96, 0x5d, 0x63, 0x58, 0xb2, 0x7f, + 0xd3, 0xf6, 0x8c, 0xac, 0x05, 0x86, 0x65, 0xcf, 0x3f, 0x8e, 0x45, 0x9b, 0xb4, 0xe4, 0x8c, 0x25, + 0x17, 0x64, 0x8d, 0xab, 0x9c, 0x0b, 0x06, 0xa6, 0x20, 0x15, 0xc7, 0x24, 0xc1, 0x01, 0x48, 0x4f, + 0x94, 0xcb, 0xbc, 0x41, 0xf6, 0x7d, 0xbd, 0xcf, 0x7a, 0x93, 0xa4, 0x92, 0xab, 0x54, 0x6e, 0xb2, + 0x27, 0x95, 0x3d, 0x79, 0x85, 0xbc, 0x43, 0x52, 0xbd, 0xa2, 0xbb, 0x01, 0x50, 0xf4, 0x54, 0x0d, + 0x25, 0x1e, 0x7c, 0xfd, 0x9d, 0xd3, 0xe7, 0x1c, 0x7c, 0x2d, 0x1c, 0x18, 0x2c, 0xd7, 0x83, 0xf6, + 0x51, 0x10, 0xb6, 0xbc, 0x76, 0xdd, 0xaf, 0x75, 0xc2, 0xa0, 0x1b, 0xdc, 0x90, 0x2c, 0x2b, 0xc4, + 0x02, 0xf3, 0x92, 0x69, 0xf1, 0xec, 0x71, 0x10, 0x1c, 0x37, 0xfd, 0x1b, 0xe4, 0xd2, 0x8b, 0xde, + 0xd1, 0x0d, 0xaf, 0x7d, 0x42, 0x71, 0x8b, 0x6f, 0xe8, 0x97, 0x0e, 0x7b, 0xa1, 0xd7, 0x6d, 0x04, + 0x6d, 0x76, 0xdd, 0xd2, 0xaf, 0x1f, 0x35, 0xfc, 0xe6, 0x61, 0xad, 0xe5, 0x45, 0x2f, 0x19, 0xe2, + 0xbc, 0x8e, 0x88, 0xba, 0x61, 0xaf, 0xde, 0x65, 0x57, 0x2f, 0xe8, 0x57, 0xbb, 0x8d, 0x96, 0x1f, + 0x75, 0xbd, 0x56, 0x27, 0x2b, 0x80, 0x0f, 0x43, 0xaf, 0xd3, 0xf1, 0xc3, 0x88, 0x5e, 0x5f, 0xfa, + 0x85, 0x01, 0xe0, 0xfd, 0x78, 0x2f, 0xae, 0xff, 0x41, 0xcf, 0x8f, 0xba, 0xf0, 0x3a, 0x28, 0xf2, + 0x15, 0xb5, 0x8e, 0x77, 0xd2, 0x0c, 0xbc, 0x43, 0x64, 0x58, 0x46, 0xa9, 0xb0, 0x3d, 0xe4, 0x4e, + 0xf1, 0x2b, 0x4f, 0xe8, 0x05, 0xb8, 0x0c, 0x0a, 0xef, 0x47, 0x41, 0x5b, 0x00, 0x73, 0x96, 0x51, + 0x1a, 0xdf, 0x1e, 0x72, 0xf3, 0xd8, 0xca, 0x41, 0x7b, 0x60, 0x21, 0xa4, 0xe4, 0xfe, 0x61, 0x2d, + 0xe8, 0x75, 0x3b, 0xbd, 0x6e, 0x8d, 0x78, 0xed, 0x22, 0xd3, 0x32, 0x4a, 0x93, 0xeb, 0x0b, 0x2b, + 0x72, 0x9a, 0x9f, 0x35, 0x42, 0xbf, 0x4a, 0x2e, 0xbb, 0x73, 0x62, 0xdd, 0x1e, 0x59, 0x46, 0xcd, + 0xe5, 0x71, 0x30, 0xca, 0x1c, 0x2e, 0x7d, 0x2a, 0x07, 0x66, 0x94, 0x4d, 0x44, 0x9d, 0xa0, 0x1d, + 0xf9, 0xf0, 0x22, 0xc8, 0x77, 0xbc, 0x30, 0xf2, 0x6b, 0x7e, 0x18, 0x06, 0x21, 0xd9, 0x00, 0x8e, + 0x0b, 0x10, 0x63, 0x05, 0xdb, 0xe0, 0x55, 0x30, 0x15, 0xf9, 0x61, 0xc3, 0x6b, 0x36, 0x3e, 0xc9, + 0x61, 0x23, 0x0c, 0x36, 0x29, 0x2e, 0x50, 0xe8, 0x65, 0x30, 0x11, 0xf6, 0xda, 0x38, 0xc1, 0x0c, + 0xc8, 0xf7, 0x59, 0x60, 0x66, 0x0a, 0x4b, 0x4b, 0x9d, 0x39, 0x68, 0xea, 0x86, 0xd3, 0x52, 0xb7, + 0x08, 0x46, 0xa3, 0x97, 0x8d, 0x4e, 0xc7, 0x3f, 0x44, 0x67, 0xd8, 0x75, 0x6e, 0x28, 0x8f, 0x81, + 0x91, 0xd0, 0x8f, 0x7a, 0xcd, 0xee, 0xd2, 0x7f, 0xaa, 0xa0, 0xf0, 0xd4, 0x8f, 0xba, 0x5b, 0xcd, + 0xe6, 0xd3, 0x93, 0x8e, 0x1f, 0xc1, 0xcb, 0x60, 0x32, 0xe8, 0xe0, 0x5e, 0xf3, 0x9a, 0xb5, 0x46, + 0xbb, 0xbb, 0xb1, 0x4e, 0x12, 0x70, 0xc6, 0x9d, 0xe0, 0xd6, 0x1d, 0x6c, 0xd4, 0x61, 0x8e, 0x4d, + 0xf6, 0x65, 0x2a, 0x30, 0xc7, 0x86, 0x57, 0xc0, 0x94, 0x80, 0xf5, 0x28, 0x1d, 0xde, 0xd5, 0x84, + 0x2b, 0x56, 0x1f, 0x10, 0x6b, 0x02, 0xe8, 0xd8, 0x64, 0x57, 0xc3, 0x2a, 0x50, 0x63, 0x8c, 0x28, + 0x23, 0xde, 0xde, 0x74, 0x0c, 0xdc, 0x4f, 0x32, 0x46, 0x94, 0x11, 0xd7, 0x08, 0xaa, 0x40, 0xc7, + 0x86, 0x57, 0x41, 0x51, 0x00, 0x8f, 0x1a, 0x9f, 0xf0, 0x0f, 0x37, 0xd6, 0xd1, 0xa8, 0x65, 0x94, + 0x46, 0x5d, 0x41, 0x50, 0xa5, 0xe6, 0x24, 0xd4, 0xb1, 0xd1, 0x98, 0x65, 0x94, 0x46, 0x34, 0xa8, + 0x63, 0xc3, 0xeb, 0x60, 0x3a, 0x76, 0xcf, 0x69, 0xc7, 0x2d, 0xa3, 0x34, 0xe5, 0x0a, 0x8e, 0x7d, + 0x66, 0x4f, 0x01, 0x3b, 0x36, 0x02, 0x96, 0x51, 0x2a, 0xea, 0x60, 0xc7, 0x56, 0x52, 0x7f, 0xd4, + 0x0c, 0xbc, 0x2e, 0xca, 0x5b, 0x46, 0x29, 0x17, 0xa7, 0xbe, 0x8a, 0x8d, 0xca, 0xfe, 0x0f, 0x83, + 0xde, 0x8b, 0xa6, 0x8f, 0x0a, 0x96, 0x51, 0x32, 0xe2, 0xfd, 0x3f, 0x20, 0x56, 0xb8, 0x0c, 0xc4, + 0xca, 0xda, 0x8b, 0x20, 0x68, 0xa2, 0x09, 0xcb, 0x28, 0x8d, 0xb9, 0x05, 0x6e, 0x2c, 0x07, 0x41, + 0x53, 0xcd, 0x66, 0x37, 0x6c, 0xb4, 0x8f, 0xd1, 0x24, 0xee, 0x2a, 0x29, 0x9b, 0xc4, 0xaa, 0x44, + 0xf7, 0xe2, 0xa4, 0xeb, 0x47, 0x68, 0x0a, 0xb7, 0x71, 0x1c, 0x5d, 0x19, 0x1b, 0x61, 0x0d, 0x2c, + 0x08, 0x58, 0x9b, 0xde, 0xde, 0x2d, 0x3f, 0x8a, 0xbc, 0x63, 0x1f, 0x41, 0xcb, 0x28, 0xe5, 0xd7, + 0xaf, 0x28, 0x37, 0xb6, 0xdc, 0xa2, 0x2b, 0xbb, 0x04, 0xff, 0x98, 0xc2, 0xdd, 0x39, 0xce, 0xa3, + 0x98, 0xe1, 0x01, 0x40, 0x71, 0x96, 0x82, 0xd0, 0x6f, 0x1c, 0xb7, 0x85, 0x87, 0x19, 0xe2, 0xe1, + 0x9c, 0xe2, 0xa1, 0x4a, 0x31, 0x9c, 0x75, 0x5e, 0x24, 0x53, 0xb1, 0xc3, 0xf7, 0xc0, 0xac, 0x1e, + 0xb7, 0xdf, 0xee, 0xb5, 0xd0, 0x1c, 0x51, 0xa3, 0x4b, 0xa7, 0x05, 0x5d, 0x69, 0xf7, 0x5a, 0x2e, + 0x54, 0x23, 0xc6, 0x36, 0xf8, 0x2e, 0x98, 0x4b, 0x84, 0x4b, 0x88, 0xe7, 0x09, 0x31, 0x4a, 0x8b, + 0x95, 0x90, 0xcd, 0x68, 0x81, 0x12, 0x36, 0x47, 0x62, 0xa3, 0xd5, 0xaa, 0x75, 0x1a, 0x7e, 0xdd, + 0x47, 0x08, 0xd7, 0xac, 0x9c, 0x1b, 0xcb, 0xc5, 0xeb, 0x68, 0xdd, 0x9e, 0xe0, 0xcb, 0xf0, 0x8a, + 0xd4, 0x0a, 0xf5, 0x20, 0x3c, 0x44, 0x67, 0x19, 0xde, 0x88, 0xdb, 0xe1, 0x7e, 0x10, 0x1e, 0xc2, + 0x2a, 0x98, 0x0e, 0xfd, 0x7a, 0x2f, 0x8c, 0x1a, 0xaf, 0x7c, 0x91, 0xd6, 0x73, 0x24, 0xad, 0x67, + 0x33, 0x73, 0xe0, 0x16, 0xc5, 0x1a, 0x9e, 0xce, 0xcb, 0x60, 0x32, 0xf4, 0x3b, 0xbe, 0x87, 0xf3, + 0x48, 0x6f, 0xe6, 0x0b, 0x96, 0x89, 0xd5, 0x86, 0x5b, 0x85, 0xda, 0xc8, 0x30, 0xc7, 0x46, 0x96, + 0x65, 0x62, 0xb5, 0x91, 0x60, 0x54, 0x1b, 0x04, 0x8c, 0xa9, 0xcd, 0x45, 0xcb, 0xc4, 0x6a, 0xc3, + 0xcd, 0xb1, 0xda, 0x28, 0x40, 0xc7, 0x46, 0x4b, 0x96, 0x89, 0xd5, 0x46, 0x06, 0x6a, 0x8c, 0x4c, + 0x6d, 0x96, 0x2d, 0x13, 0xab, 0x0d, 0x37, 0xef, 0x27, 0x19, 0x99, 0xda, 0x5c, 0xb2, 0x4c, 0xac, + 0x36, 0x32, 0x90, 0xaa, 0x8d, 0x00, 0x72, 0x59, 0xb8, 0x6c, 0x99, 0x58, 0x6d, 0xb8, 0x5d, 0x52, + 0x1b, 0x15, 0xea, 0xd8, 0xe8, 0xff, 0x2c, 0x13, 0xab, 0x8d, 0x02, 0xa5, 0x6a, 0x13, 0xbb, 0xe7, + 0xb4, 0x57, 0x2c, 0x13, 0xab, 0x8d, 0x08, 0x40, 0x52, 0x1b, 0x0d, 0xec, 0xd8, 0xa8, 0x64, 0x99, + 0x58, 0x6d, 0x54, 0x30, 0x55, 0x9b, 0x38, 0x08, 0xa2, 0x36, 0x57, 0x2d, 0x13, 0xab, 0x8d, 0x08, + 0x81, 0xab, 0x8d, 0x80, 0x31, 0xb5, 0xb9, 0x66, 0x99, 0x58, 0x6d, 0xb8, 0x39, 0x56, 0x1b, 0x01, + 0x24, 0x6a, 0x73, 0xdd, 0x32, 0xb1, 0xda, 0x70, 0x23, 0x57, 0x9b, 0x38, 0x42, 0xaa, 0x36, 0x6f, + 0x5a, 0x26, 0x56, 0x1b, 0x11, 0x9f, 0x50, 0x9b, 0x98, 0x8d, 0xa8, 0xcd, 0x5b, 0x96, 0x89, 0xd5, + 0x46, 0xd0, 0x71, 0xb5, 0x11, 0x30, 0x4d, 0x6d, 0x56, 0x2d, 0xf3, 0xb5, 0xd4, 0x86, 0xf3, 0x24, + 0xd4, 0x26, 0xce, 0x92, 0xa6, 0x36, 0x6b, 0xc4, 0x43, 0x7f, 0xb5, 0x11, 0xc9, 0x4c, 0xa8, 0x8d, + 0x1e, 0x37, 0x11, 0x85, 0x0d, 0xcb, 0x1c, 0x5c, 0x6d, 0xd4, 0x88, 0xb9, 0xda, 0x24, 0xc2, 0x25, + 0xc4, 0x36, 0x21, 0xee, 0xa3, 0x36, 0x5a, 0xa0, 0x5c, 0x6d, 0xb4, 0x6a, 0x31, 0xb5, 0x71, 0x70, + 0xcd, 0xa8, 0xda, 0xa8, 0x75, 0x13, 0x6a, 0x23, 0xd6, 0x11, 0xb5, 0xb9, 0xc5, 0xf0, 0x46, 0xdc, + 0x0e, 0x44, 0x6d, 0x9e, 0x82, 0xa9, 0x96, 0xd7, 0xa1, 0x02, 0xc1, 0x64, 0x62, 0x93, 0x24, 0xf5, + 0xcd, 0xec, 0x0c, 0x3c, 0xf6, 0x3a, 0x44, 0x3b, 0xc8, 0x47, 0xa5, 0xdd, 0x0d, 0x4f, 0xdc, 0x89, + 0x96, 0x6c, 0x93, 0x58, 0x1d, 0x9b, 0xa9, 0xca, 0xed, 0xc1, 0x58, 0x1d, 0x9b, 0x7c, 0x28, 0xac, + 0xcc, 0x06, 0x9f, 0x83, 0x69, 0xcc, 0x4a, 0xe5, 0x87, 0xab, 0xd0, 0x1d, 0xc2, 0xbb, 0xd2, 0x97, + 0x97, 0x4a, 0x13, 0xfd, 0xa4, 0xcc, 0x38, 0x3c, 0xd9, 0x2a, 0x73, 0x3b, 0x36, 0x17, 0xae, 0xb7, + 0x07, 0xe4, 0x76, 0x6c, 0xfa, 0xa9, 0x72, 0x73, 0x2b, 0xe7, 0xa6, 0x22, 0xc7, 0xb5, 0xee, 0xff, + 0x07, 0xe0, 0xa6, 0x02, 0xb8, 0xaf, 0xc5, 0x2d, 0x5b, 0x65, 0x6e, 0xc7, 0xe6, 0xf2, 0xf8, 0xce, + 0x80, 0xdc, 0x8e, 0xbd, 0xaf, 0xc5, 0x2d, 0x5b, 0xe1, 0xc7, 0xc1, 0x0c, 0xe6, 0x66, 0xda, 0x26, + 0x24, 0xf5, 0x2e, 0x61, 0x5f, 0xed, 0xcb, 0xce, 0x74, 0x96, 0xfd, 0xa0, 0xfc, 0x38, 0x50, 0xd5, + 0xae, 0x78, 0x70, 0x6c, 0xa1, 0xc4, 0x1f, 0x19, 0xd4, 0x83, 0x63, 0xb3, 0x1f, 0x9a, 0x07, 0x61, + 0x87, 0x47, 0x60, 0x8e, 0xe4, 0x87, 0x6f, 0x42, 0x28, 0xf8, 0x3d, 0xe2, 0x63, 0xbd, 0x7f, 0x8e, + 0x18, 0x98, 0xff, 0xa4, 0x5e, 0x70, 0xc8, 0xfa, 0x15, 0xd5, 0x0f, 0xae, 0x04, 0xdf, 0xcb, 0xd6, + 0xc0, 0x7e, 0x1c, 0x9b, 0xff, 0xd4, 0xfd, 0xc4, 0x57, 0xd4, 0xfb, 0x95, 0x1e, 0x1a, 0xe5, 0x41, + 0xef, 0x57, 0x72, 0x9c, 0x68, 0xf7, 0x2b, 0x3d, 0x62, 0x9e, 0x81, 0x62, 0xcc, 0xca, 0xce, 0x98, + 0xfb, 0x84, 0xf6, 0xad, 0xd3, 0x69, 0xe9, 0xe9, 0x43, 0x79, 0x27, 0x5b, 0x8a, 0x11, 0xee, 0x02, + 0xec, 0x89, 0x9c, 0x46, 0xf4, 0x48, 0x7a, 0x40, 0x58, 0xaf, 0xf5, 0x65, 0xc5, 0xe7, 0x14, 0xfe, + 0x9f, 0x52, 0xe6, 0x5b, 0xb1, 0x45, 0xb4, 0x3b, 0x95, 0x42, 0x76, 0x7e, 0x55, 0x06, 0x69, 0x77, + 0x02, 0xa5, 0x9f, 0x52, 0xbb, 0x4b, 0x56, 0x9e, 0x04, 0xc6, 0x4d, 0x8f, 0xbc, 0xea, 0x00, 0x49, + 0xa0, 0xcb, 0xc9, 0x69, 0x18, 0x27, 0x41, 0x32, 0xc2, 0x0e, 0x38, 0x2b, 0x11, 0x6b, 0x87, 0xe4, + 0x43, 0xe2, 0xe1, 0xe6, 0x00, 0x1e, 0x94, 0x63, 0x91, 0x7a, 0x9a, 0x6f, 0xa5, 0x5e, 0x84, 0x11, + 0x58, 0x94, 0x3c, 0xea, 0xa7, 0xe6, 0x36, 0x71, 0xe9, 0x0c, 0xe0, 0x52, 0x3d, 0x33, 0xa9, 0xcf, + 0x85, 0x56, 0xfa, 0x55, 0x78, 0x0c, 0xe6, 0x93, 0xdb, 0x24, 0x47, 0xdf, 0xce, 0x20, 0xf7, 0x80, + 0xb4, 0x0d, 0x7c, 0xf4, 0x49, 0xf7, 0x80, 0x76, 0x05, 0xbe, 0x0f, 0x16, 0x52, 0x76, 0x47, 0x3c, + 0x3d, 0x22, 0x9e, 0x36, 0x06, 0xdf, 0x5a, 0xec, 0x6a, 0xb6, 0x95, 0x72, 0x09, 0x2e, 0x83, 0x42, + 0xd0, 0xf6, 0x83, 0x23, 0x7e, 0xdc, 0x04, 0xf8, 0x11, 0x7b, 0x7b, 0xc8, 0xcd, 0x13, 0x2b, 0x3b, + 0x3c, 0x3e, 0x06, 0x66, 0x29, 0x48, 0xab, 0x6d, 0xe7, 0xb5, 0x1e, 0xb7, 0xb6, 0x87, 0x5c, 0x48, + 0x68, 0xd4, 0x5a, 0x8a, 0x08, 0x58, 0xb7, 0x7f, 0xc0, 0x27, 0x12, 0xc4, 0xca, 0x7a, 0xf7, 0x22, + 0xa0, 0x5f, 0x59, 0xdb, 0x86, 0x6c, 0xbc, 0x01, 0x88, 0x91, 0x76, 0xe1, 0x05, 0x00, 0x18, 0x04, + 0xdf, 0x87, 0x11, 0x7e, 0x10, 0xdd, 0x1e, 0x72, 0xc7, 0x29, 0x02, 0xdf, 0x5b, 0xca, 0x56, 0x1d, + 0x1b, 0x75, 0x2d, 0xa3, 0x34, 0xac, 0x6c, 0xd5, 0xb1, 0x63, 0x47, 0x54, 0x7b, 0x7a, 0xf8, 0xf1, + 0x58, 0x38, 0xa2, 0x62, 0x22, 0x78, 0x98, 0x90, 0xbc, 0xc2, 0x8f, 0xc6, 0x82, 0x87, 0x09, 0x43, + 0x85, 0x47, 0x43, 0xca, 0xf6, 0xe1, 0xe0, 0x8f, 0x78, 0x22, 0x66, 0x52, 0x9e, 0x3d, 0xe9, 0x69, + 0x8c, 0x88, 0x0c, 0x9b, 0xa6, 0xa1, 0x5f, 0x19, 0x24, 0xf7, 0x8b, 0x2b, 0x74, 0xdc, 0xb6, 0xc2, + 0xe7, 0x3c, 0x2b, 0x78, 0xab, 0xef, 0x79, 0xcd, 0x9e, 0x1f, 0x3f, 0xa6, 0x61, 0xd3, 0x33, 0xba, + 0x0e, 0xba, 0x60, 0x5e, 0x9d, 0xd1, 0x08, 0xc6, 0x5f, 0x1b, 0xec, 0xd1, 0x56, 0x67, 0x24, 0x7a, + 0x47, 0x29, 0x67, 0x95, 0x49, 0x4e, 0x06, 0xa7, 0x63, 0x0b, 0xce, 0xdf, 0xf4, 0xe1, 0x74, 0xec, + 0x24, 0xa7, 0x63, 0x73, 0xce, 0x03, 0xe9, 0x21, 0xbf, 0xa7, 0x06, 0xfa, 0x5b, 0x4a, 0x7a, 0x3e, + 0x41, 0x7a, 0x20, 0x45, 0x3a, 0xa7, 0x0e, 0x89, 0xb2, 0x68, 0xa5, 0x58, 0x7f, 0xd7, 0x8f, 0x96, + 0x07, 0x3b, 0xa7, 0x8e, 0x94, 0xd2, 0x32, 0x40, 0x1a, 0x47, 0xb0, 0xfe, 0x3e, 0x2b, 0x03, 0xa4, + 0x97, 0xb4, 0x0c, 0x10, 0x5b, 0x5a, 0xa8, 0xb4, 0xd3, 0x04, 0xe9, 0x1f, 0xb2, 0x42, 0xa5, 0xcd, + 0xa7, 0x85, 0x4a, 0x8d, 0x69, 0xb4, 0x4c, 0x61, 0x38, 0xed, 0x1f, 0xb3, 0x68, 0xe9, 0x4d, 0xa8, + 0xd1, 0x52, 0x63, 0x5a, 0x06, 0xc8, 0x3d, 0x2a, 0x58, 0xff, 0x94, 0x95, 0x01, 0x72, 0xdb, 0x6a, + 0x19, 0x20, 0x36, 0xce, 0xb9, 0x27, 0x3d, 0x1c, 0x28, 0xcd, 0xff, 0x67, 0x83, 0xc8, 0x60, 0xdf, + 0xe6, 0x97, 0x1f, 0x0a, 0xa5, 0x20, 0xd5, 0x91, 0x81, 0x60, 0xfc, 0x8b, 0xc1, 0x9e, 0xb4, 0xfa, + 0x35, 0xbf, 0x32, 0x58, 0xc8, 0xe0, 0x94, 0x1a, 0xea, 0xaf, 0x7d, 0x38, 0x45, 0xf3, 0x2b, 0x53, + 0x08, 0xa9, 0x46, 0xda, 0x30, 0x42, 0x90, 0xfe, 0x8d, 0x92, 0x9e, 0xd2, 0xfc, 0xea, 0xcc, 0x22, + 0x8b, 0x56, 0x8a, 0xf5, 0xef, 0xfd, 0x68, 0x45, 0xf3, 0xab, 0x13, 0x8e, 0xb4, 0x0c, 0xa8, 0xcd, + 0xff, 0x8f, 0xac, 0x0c, 0xc8, 0xcd, 0xaf, 0x0c, 0x03, 0xd2, 0x42, 0xd5, 0x9a, 0xff, 0x9f, 0x59, + 0xa1, 0x2a, 0xcd, 0xaf, 0x8e, 0x0e, 0xd2, 0x68, 0xb5, 0xe6, 0xff, 0x57, 0x16, 0xad, 0xd2, 0xfc, + 0xea, 0xb3, 0x68, 0x5a, 0x06, 0xd4, 0xe6, 0xff, 0x77, 0x56, 0x06, 0xe4, 0xe6, 0x57, 0x06, 0x0e, + 0x9c, 0xf3, 0xa1, 0x34, 0xd7, 0xe5, 0xef, 0x70, 0xd0, 0x77, 0x73, 0x6c, 0x4e, 0x96, 0xd8, 0x3b, + 0x43, 0xc4, 0x33, 0x5f, 0x6e, 0x81, 0x8f, 0x80, 0x18, 0x1a, 0xd6, 0xc4, 0xcb, 0x1a, 0xf4, 0xbd, + 0x5c, 0xc6, 0xf9, 0xf1, 0x94, 0x43, 0x5c, 0xe1, 0x5f, 0x98, 0xe0, 0x47, 0xc1, 0x8c, 0x34, 0xc4, + 0xe6, 0x2f, 0x8e, 0xd0, 0xf7, 0xb3, 0xc8, 0xaa, 0x18, 0xf3, 0xd8, 0x8b, 0x5e, 0xc6, 0x64, 0xc2, + 0x04, 0xb7, 0xd4, 0xb9, 0x70, 0xaf, 0xde, 0x45, 0x3f, 0xa0, 0x44, 0x0b, 0x69, 0x45, 0xe8, 0xd5, + 0xbb, 0xca, 0xc4, 0xb8, 0x57, 0xef, 0xc2, 0x4d, 0x20, 0x66, 0x8b, 0x35, 0xaf, 0x7d, 0x82, 0x7e, + 0x48, 0xd7, 0xcf, 0x26, 0xd6, 0x6f, 0xb5, 0x4f, 0xdc, 0x3c, 0x87, 0x6e, 0xb5, 0x4f, 0xe0, 0x5d, + 0x69, 0xd6, 0xfc, 0x0a, 0x97, 0x01, 0xfd, 0x88, 0xae, 0x9d, 0x4f, 0xac, 0xa5, 0x55, 0x12, 0xd3, + 0x4d, 0xf2, 0x15, 0x97, 0x27, 0x6e, 0x50, 0x5e, 0x9e, 0x1f, 0xe7, 0x48, 0xb5, 0xfb, 0x95, 0x47, + 0xf4, 0xa5, 0x54, 0x1e, 0x41, 0x14, 0x97, 0xe7, 0x27, 0xb9, 0x0c, 0x85, 0x93, 0xca, 0xc3, 0x97, + 0xc5, 0xe5, 0x91, 0xb9, 0x48, 0x79, 0x48, 0x75, 0x7e, 0x9a, 0xc5, 0x25, 0x55, 0x27, 0x1e, 0x0a, + 0xb2, 0x55, 0xb8, 0x3a, 0xf2, 0xad, 0x82, 0xab, 0xf3, 0x4b, 0x4a, 0x94, 0x5d, 0x1d, 0xe9, 0xee, + 0x60, 0xd5, 0x11, 0x14, 0xb8, 0x3a, 0x3f, 0xa3, 0xeb, 0x33, 0xaa, 0xc3, 0xa1, 0xac, 0x3a, 0x62, + 0x25, 0xad, 0xce, 0xcf, 0xe9, 0xda, 0xcc, 0xea, 0x70, 0x38, 0xad, 0xce, 0x05, 0x00, 0xc8, 0xfe, + 0xdb, 0x5e, 0xcb, 0x5f, 0x43, 0x9f, 0x36, 0xc9, 0x6b, 0x28, 0xc9, 0x04, 0x2d, 0x90, 0xa7, 0xfd, + 0x8b, 0xbf, 0xae, 0xa3, 0xcf, 0xc8, 0x88, 0x5d, 0x6c, 0x82, 0x17, 0x41, 0xa1, 0x16, 0x43, 0x36, + 0xd0, 0x67, 0x19, 0xa4, 0xca, 0x21, 0x1b, 0x70, 0x09, 0x4c, 0x50, 0x04, 0x81, 0xd8, 0x35, 0xf4, + 0x39, 0x9d, 0x86, 0xfc, 0x3d, 0x49, 0xbe, 0xad, 0x62, 0xc8, 0x4d, 0xf4, 0x79, 0x8a, 0x90, 0x6d, + 0x70, 0x99, 0xd3, 0xac, 0x12, 0x1e, 0x07, 0x7d, 0x41, 0x01, 0x61, 0x1e, 0x47, 0xec, 0x08, 0x7f, + 0xbb, 0x85, 0xbe, 0xa8, 0x3b, 0xba, 0x85, 0x01, 0x22, 0xb4, 0x4d, 0xf4, 0x25, 0x3d, 0xda, 0xcd, + 0x78, 0xcb, 0xf8, 0xeb, 0x6d, 0xf4, 0x65, 0x9d, 0xe2, 0x36, 0x5c, 0x02, 0x85, 0xaa, 0x40, 0xac, + 0xad, 0xa2, 0xaf, 0xb0, 0x38, 0x04, 0xc9, 0xda, 0x2a, 0xc1, 0xec, 0x54, 0xde, 0x7d, 0x50, 0xdb, + 0xdd, 0x7a, 0x5c, 0x59, 0x5b, 0x43, 0x5f, 0xe5, 0x18, 0x6c, 0xa4, 0xb6, 0x18, 0x43, 0x72, 0xbd, + 0x8e, 0xbe, 0xa6, 0x60, 0x88, 0x0d, 0x5e, 0x02, 0x93, 0x35, 0x29, 0xbf, 0x6b, 0x1b, 0xe8, 0xeb, + 0x09, 0x6f, 0x1b, 0x14, 0x55, 0x8d, 0x51, 0x36, 0xfa, 0x46, 0x02, 0x65, 0xc7, 0x09, 0xa4, 0xa0, + 0x9b, 0xe8, 0x9b, 0x72, 0x02, 0x09, 0x48, 0xca, 0x32, 0xdd, 0x9d, 0x83, 0xbe, 0x95, 0x00, 0x39, + 0xd8, 0x9f, 0x14, 0xd3, 0xad, 0x5a, 0x0d, 0x7d, 0x3b, 0x81, 0xba, 0x85, 0x51, 0x52, 0x4c, 0x9b, + 0xb5, 0x1a, 0xfa, 0x4e, 0x22, 0xaa, 0xcd, 0xc5, 0xe7, 0x60, 0x42, 0x7d, 0xd0, 0x29, 0x00, 0xc3, + 0x63, 0x6f, 0x44, 0x0d, 0x0f, 0xbe, 0x0d, 0xf2, 0xf5, 0x40, 0xbc, 0xd4, 0x40, 0xb9, 0xd3, 0x5e, + 0x80, 0xc8, 0xe8, 0xc5, 0x7b, 0x00, 0x26, 0x87, 0x94, 0xb0, 0x08, 0xcc, 0x97, 0xfe, 0x09, 0x73, + 0x81, 0x7f, 0x85, 0xb3, 0xe0, 0x0c, 0xbd, 0x7d, 0x72, 0xc4, 0x46, 0xbf, 0xdc, 0xc9, 0x6d, 0x1a, + 0x31, 0x83, 0x3c, 0x90, 0x94, 0x19, 0xcc, 0x14, 0x06, 0x53, 0x66, 0x28, 0x83, 0xd9, 0xb4, 0xd1, + 0xa3, 0xcc, 0x31, 0x91, 0xc2, 0x31, 0x91, 0xce, 0xa1, 0x8c, 0x18, 0x65, 0x8e, 0xe1, 0x14, 0x8e, + 0xe1, 0x24, 0x47, 0x62, 0x94, 0x28, 0x73, 0x4c, 0xa7, 0x70, 0x4c, 0xa7, 0x73, 0x28, 0x23, 0x43, + 0x99, 0x03, 0xa6, 0x70, 0x40, 0x99, 0xe3, 0x01, 0x98, 0x4f, 0x1f, 0x0c, 0xca, 0x2c, 0xa3, 0x29, + 0x2c, 0xa3, 0x19, 0x2c, 0xea, 0xf0, 0x4f, 0x66, 0x19, 0x49, 0x61, 0x19, 0x91, 0x59, 0xaa, 0x00, + 0x65, 0x8d, 0xf7, 0x64, 0x9e, 0xa9, 0x14, 0x9e, 0xa9, 0x2c, 0x1e, 0x6d, 0x7c, 0x27, 0xf3, 0x14, + 0x53, 0x78, 0x8a, 0xa9, 0xdd, 0x26, 0x0f, 0xe9, 0x4e, 0xeb, 0xd7, 0x9c, 0xcc, 0xb0, 0x05, 0x66, + 0x52, 0xe6, 0x71, 0xa7, 0x51, 0x18, 0x32, 0xc5, 0x5d, 0x50, 0xd4, 0x87, 0x6f, 0xf2, 0xfa, 0xb1, + 0x94, 0xf5, 0x63, 0x29, 0x4d, 0xa2, 0x0f, 0xda, 0x64, 0x8e, 0xf1, 0x14, 0x8e, 0xf1, 0xe4, 0x36, + 0xf4, 0x89, 0xda, 0x69, 0x14, 0x05, 0x99, 0x22, 0x04, 0xe7, 0xfa, 0x8c, 0xcc, 0x52, 0xa8, 0xde, + 0x91, 0xa9, 0x5e, 0xe3, 0x7d, 0x95, 0xe4, 0xf3, 0x18, 0x9c, 0xef, 0x37, 0x33, 0x4b, 0x71, 0xba, + 0xa6, 0x3a, 0xed, 0xfb, 0x0a, 0x4b, 0x72, 0xd4, 0xa4, 0x0d, 0x97, 0x36, 0x2b, 0x4b, 0x71, 0x72, + 0x47, 0x76, 0x32, 0xe8, 0x4b, 0x2d, 0xc9, 0x9b, 0x07, 0xce, 0x66, 0xce, 0xcb, 0x52, 0xdc, 0xad, + 0xa8, 0xee, 0xb2, 0x5f, 0x75, 0xc5, 0x2e, 0x96, 0x6e, 0x03, 0x20, 0x4d, 0xf6, 0x46, 0x81, 0x59, + 0xdd, 0xdb, 0x2b, 0x0e, 0xe1, 0x5f, 0xca, 0x5b, 0x6e, 0xd1, 0xa0, 0xbf, 0x3c, 0x2f, 0xe6, 0xb0, + 0xbb, 0xdd, 0xca, 0xc3, 0xe2, 0x7f, 0xf9, 0x7f, 0x46, 0x79, 0x42, 0x8c, 0xa2, 0xf0, 0xa9, 0xb2, + 0xf4, 0x06, 0x98, 0xd4, 0x06, 0x92, 0x05, 0x60, 0xd4, 0xf9, 0x81, 0x52, 0xbf, 0x76, 0x13, 0x80, + 0xf8, 0xdf, 0x30, 0xc1, 0x29, 0x90, 0x3f, 0xd8, 0xdd, 0x7f, 0x52, 0xb9, 0xbf, 0x53, 0xdd, 0xa9, + 0x3c, 0x28, 0x0e, 0xc1, 0x02, 0x18, 0x7b, 0xe2, 0xee, 0x3d, 0xdd, 0x2b, 0x1f, 0x54, 0x8b, 0x06, + 0x1c, 0x03, 0xc3, 0x8f, 0xf6, 0xf7, 0x76, 0x8b, 0xb9, 0x6b, 0xf7, 0x40, 0x5e, 0x9e, 0x07, 0x4e, + 0x81, 0x7c, 0x75, 0xcf, 0xad, 0xec, 0x3c, 0xdc, 0xad, 0xd1, 0x48, 0x25, 0x03, 0x8d, 0x58, 0x31, + 0x3c, 0x2f, 0xe6, 0xca, 0x17, 0xc1, 0x85, 0x7a, 0xd0, 0x4a, 0xfc, 0x61, 0x26, 0x25, 0xe7, 0xc5, + 0x08, 0xb1, 0x6e, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x33, 0xc2, 0x0c, 0xb6, 0xeb, 0x26, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto new file mode 100644 index 0000000..95a8fd1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto @@ -0,0 +1,285 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package conformance; +option java_package = "com.google.protobuf.conformance"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +// This defines the conformance testing protocol. This protocol exists between +// the conformance test suite itself and the code being tested. For each test, +// the suite will send a ConformanceRequest message and expect a +// ConformanceResponse message. +// +// You can either run the tests in two different ways: +// +// 1. in-process (using the interface in conformance_test.h). +// +// 2. as a sub-process communicating over a pipe. Information about how to +// do this is in conformance_test_runner.cc. +// +// Pros/cons of the two approaches: +// +// - running as a sub-process is much simpler for languages other than C/C++. +// +// - running as a sub-process may be more tricky in unusual environments like +// iOS apps, where fork/stdin/stdout are not available. + +enum WireFormat { + UNSPECIFIED = 0; + PROTOBUF = 1; + JSON = 2; +} + +// Represents a single test case's input. The testee should: +// +// 1. parse this proto (which should always succeed) +// 2. parse the protobuf or JSON payload in "payload" (which may fail) +// 3. if the parse succeeded, serialize the message in the requested format. +message ConformanceRequest { + // The payload (whether protobuf of JSON) is always for a TestAllTypes proto + // (see below). + oneof payload { + bytes protobuf_payload = 1; + string json_payload = 2; + } + + // Which format should the testee serialize its message to? + WireFormat requested_output_format = 3; +} + +// Represents a single test case's output. +message ConformanceResponse { + oneof result { + // This string should be set to indicate parsing failed. The string can + // provide more information about the parse error if it is available. + // + // Setting this string does not necessarily mean the testee failed the + // test. Some of the test cases are intentionally invalid input. + string parse_error = 1; + + // If the input was successfully parsed but errors occurred when + // serializing it to the requested output format, set the error message in + // this field. + string serialize_error = 6; + + // This should be set if some other error occurred. This will always + // indicate that the test failed. The string can provide more information + // about the failure. + string runtime_error = 2; + + // If the input was successfully parsed and the requested output was + // protobuf, serialize it to protobuf and set it in this field. + bytes protobuf_payload = 3; + + // If the input was successfully parsed and the requested output was JSON, + // serialize to JSON and set it in this field. + string json_payload = 4; + + // For when the testee skipped the test, likely because a certain feature + // wasn't supported, like JSON input/output. + string skipped = 5; + } +} + +// This proto includes every type of field in both singular and repeated +// forms. +message TestAllTypes { + message NestedMessage { + int32 a = 1; + TestAllTypes corecursive = 2; + } + + enum NestedEnum { + FOO = 0; + BAR = 1; + BAZ = 2; + NEG = -1; // Intentionally negative. + } + + // Singular + int32 optional_int32 = 1; + int64 optional_int64 = 2; + uint32 optional_uint32 = 3; + uint64 optional_uint64 = 4; + sint32 optional_sint32 = 5; + sint64 optional_sint64 = 6; + fixed32 optional_fixed32 = 7; + fixed64 optional_fixed64 = 8; + sfixed32 optional_sfixed32 = 9; + sfixed64 optional_sfixed64 = 10; + float optional_float = 11; + double optional_double = 12; + bool optional_bool = 13; + string optional_string = 14; + bytes optional_bytes = 15; + + NestedMessage optional_nested_message = 18; + ForeignMessage optional_foreign_message = 19; + + NestedEnum optional_nested_enum = 21; + ForeignEnum optional_foreign_enum = 22; + + string optional_string_piece = 24 [ctype=STRING_PIECE]; + string optional_cord = 25 [ctype=CORD]; + + TestAllTypes recursive_message = 27; + + // Repeated + repeated int32 repeated_int32 = 31; + repeated int64 repeated_int64 = 32; + repeated uint32 repeated_uint32 = 33; + repeated uint64 repeated_uint64 = 34; + repeated sint32 repeated_sint32 = 35; + repeated sint64 repeated_sint64 = 36; + repeated fixed32 repeated_fixed32 = 37; + repeated fixed64 repeated_fixed64 = 38; + repeated sfixed32 repeated_sfixed32 = 39; + repeated sfixed64 repeated_sfixed64 = 40; + repeated float repeated_float = 41; + repeated double repeated_double = 42; + repeated bool repeated_bool = 43; + repeated string repeated_string = 44; + repeated bytes repeated_bytes = 45; + + repeated NestedMessage repeated_nested_message = 48; + repeated ForeignMessage repeated_foreign_message = 49; + + repeated NestedEnum repeated_nested_enum = 51; + repeated ForeignEnum repeated_foreign_enum = 52; + + repeated string repeated_string_piece = 54 [ctype=STRING_PIECE]; + repeated string repeated_cord = 55 [ctype=CORD]; + + // Map + map < int32, int32> map_int32_int32 = 56; + map < int64, int64> map_int64_int64 = 57; + map < uint32, uint32> map_uint32_uint32 = 58; + map < uint64, uint64> map_uint64_uint64 = 59; + map < sint32, sint32> map_sint32_sint32 = 60; + map < sint64, sint64> map_sint64_sint64 = 61; + map < fixed32, fixed32> map_fixed32_fixed32 = 62; + map < fixed64, fixed64> map_fixed64_fixed64 = 63; + map map_sfixed32_sfixed32 = 64; + map map_sfixed64_sfixed64 = 65; + map < int32, float> map_int32_float = 66; + map < int32, double> map_int32_double = 67; + map < bool, bool> map_bool_bool = 68; + map < string, string> map_string_string = 69; + map < string, bytes> map_string_bytes = 70; + map < string, NestedMessage> map_string_nested_message = 71; + map < string, ForeignMessage> map_string_foreign_message = 72; + map < string, NestedEnum> map_string_nested_enum = 73; + map < string, ForeignEnum> map_string_foreign_enum = 74; + + oneof oneof_field { + uint32 oneof_uint32 = 111; + NestedMessage oneof_nested_message = 112; + string oneof_string = 113; + bytes oneof_bytes = 114; + bool oneof_bool = 115; + uint64 oneof_uint64 = 116; + float oneof_float = 117; + double oneof_double = 118; + NestedEnum oneof_enum = 119; + } + + // Well-known types + google.protobuf.BoolValue optional_bool_wrapper = 201; + google.protobuf.Int32Value optional_int32_wrapper = 202; + google.protobuf.Int64Value optional_int64_wrapper = 203; + google.protobuf.UInt32Value optional_uint32_wrapper = 204; + google.protobuf.UInt64Value optional_uint64_wrapper = 205; + google.protobuf.FloatValue optional_float_wrapper = 206; + google.protobuf.DoubleValue optional_double_wrapper = 207; + google.protobuf.StringValue optional_string_wrapper = 208; + google.protobuf.BytesValue optional_bytes_wrapper = 209; + + repeated google.protobuf.BoolValue repeated_bool_wrapper = 211; + repeated google.protobuf.Int32Value repeated_int32_wrapper = 212; + repeated google.protobuf.Int64Value repeated_int64_wrapper = 213; + repeated google.protobuf.UInt32Value repeated_uint32_wrapper = 214; + repeated google.protobuf.UInt64Value repeated_uint64_wrapper = 215; + repeated google.protobuf.FloatValue repeated_float_wrapper = 216; + repeated google.protobuf.DoubleValue repeated_double_wrapper = 217; + repeated google.protobuf.StringValue repeated_string_wrapper = 218; + repeated google.protobuf.BytesValue repeated_bytes_wrapper = 219; + + google.protobuf.Duration optional_duration = 301; + google.protobuf.Timestamp optional_timestamp = 302; + google.protobuf.FieldMask optional_field_mask = 303; + google.protobuf.Struct optional_struct = 304; + google.protobuf.Any optional_any = 305; + google.protobuf.Value optional_value = 306; + + repeated google.protobuf.Duration repeated_duration = 311; + repeated google.protobuf.Timestamp repeated_timestamp = 312; + repeated google.protobuf.FieldMask repeated_fieldmask = 313; + repeated google.protobuf.Struct repeated_struct = 324; + repeated google.protobuf.Any repeated_any = 315; + repeated google.protobuf.Value repeated_value = 316; + + // Test field-name-to-JSON-name convention. + // (protobuf says names can be any valid C/C++ identifier.) + int32 fieldname1 = 401; + int32 field_name2 = 402; + int32 _field_name3 = 403; + int32 field__name4_ = 404; + int32 field0name5 = 405; + int32 field_0_name6 = 406; + int32 fieldName7 = 407; + int32 FieldName8 = 408; + int32 field_Name9 = 409; + int32 Field_Name10 = 410; + int32 FIELD_NAME11 = 411; + int32 FIELD_name12 = 412; + int32 __field_name13 = 413; + int32 __Field_name14 = 414; + int32 field__name15 = 415; + int32 field__Name16 = 416; + int32 field_name17__ = 417; + int32 Field_name18__ = 418; +} + +message ForeignMessage { + int32 c = 1; +} + +enum ForeignEnum { + FOREIGN_FOO = 0; + FOREIGN_BAR = 1; + FOREIGN_BAZ = 2; +} diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go new file mode 100644 index 0000000..ac7e51b --- /dev/null +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go @@ -0,0 +1,93 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/golang/protobuf/proto" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(protobuf.FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go new file mode 100644 index 0000000..27b0729 --- /dev/null +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go @@ -0,0 +1,32 @@ +package descriptor_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/descriptor" + tpb "github.com/golang/protobuf/proto/testdata" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +func TestMessage(t *testing.T) { + var msg *protobuf.DescriptorProto + fd, md := descriptor.ForMessage(msg) + if pkg, want := fd.GetPackage(), "google.protobuf"; pkg != want { + t.Errorf("descriptor.ForMessage(%T).GetPackage() = %q; want %q", msg, pkg, want) + } + if name, want := md.GetName(), "DescriptorProto"; name != want { + t.Fatalf("descriptor.ForMessage(%T).GetName() = %q; want %q", msg, name, want) + } +} + +func Example_Options() { + var msg *tpb.MyMessageSet + _, md := descriptor.ForMessage(msg) + if md.GetOptions().GetMessageSetWireFormat() { + fmt.Printf("%v uses option message_set_wire_format.\n", md.GetName()) + } + + // Output: + // MyMessageSet uses option message_set_wire_format. +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go new file mode 100644 index 0000000..110ae13 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1083 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" +) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond + x := fmt.Sprintf("%.9f", d.Seconds()) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + var err error + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s), len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := strconv.Unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + var keyprop, valprop *proto.Properties + if prop != nil { + // These could still be nil if the protobuf metadata is broken somehow. + // TODO: This won't work because the fields are unexported. + // We should probably just reparse them. + //keyprop, valprop = prop.mkeyprop, prop.mvalprop + } + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + if err := u.unmarshalValue(v, raw, valprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // 64-bit integers can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go new file mode 100644 index 0000000..2428d05 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go @@ -0,0 +1,896 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package jsonpb + +import ( + "bytes" + "encoding/json" + "io" + "math" + "reflect" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + "github.com/golang/protobuf/ptypes" + anypb "github.com/golang/protobuf/ptypes/any" + durpb "github.com/golang/protobuf/ptypes/duration" + stpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + wpb "github.com/golang/protobuf/ptypes/wrappers" +) + +var ( + marshaler = Marshaler{} + + marshalerAllOptions = Marshaler{ + Indent: " ", + } + + simpleObject = &pb.Simple{ + OInt32: proto.Int32(-32), + OInt64: proto.Int64(-6400000000), + OUint32: proto.Uint32(32), + OUint64: proto.Uint64(6400000000), + OSint32: proto.Int32(-13), + OSint64: proto.Int64(-2600000000), + OFloat: proto.Float32(3.14), + ODouble: proto.Float64(6.02214179e23), + OBool: proto.Bool(true), + OString: proto.String("hello \"there\""), + OBytes: []byte("beep boop"), + } + + simpleObjectJSON = `{` + + `"oBool":true,` + + `"oInt32":-32,` + + `"oInt64":"-6400000000",` + + `"oUint32":32,` + + `"oUint64":"6400000000",` + + `"oSint32":-13,` + + `"oSint64":"-2600000000",` + + `"oFloat":3.14,` + + `"oDouble":6.02214179e+23,` + + `"oString":"hello \"there\"",` + + `"oBytes":"YmVlcCBib29w"` + + `}` + + simpleObjectPrettyJSON = `{ + "oBool": true, + "oInt32": -32, + "oInt64": "-6400000000", + "oUint32": 32, + "oUint64": "6400000000", + "oSint32": -13, + "oSint64": "-2600000000", + "oFloat": 3.14, + "oDouble": 6.02214179e+23, + "oString": "hello \"there\"", + "oBytes": "YmVlcCBib29w" +}` + + repeatsObject = &pb.Repeats{ + RBool: []bool{true, false, true}, + RInt32: []int32{-3, -4, -5}, + RInt64: []int64{-123456789, -987654321}, + RUint32: []uint32{1, 2, 3}, + RUint64: []uint64{6789012345, 3456789012}, + RSint32: []int32{-1, -2, -3}, + RSint64: []int64{-6789012345, -3456789012}, + RFloat: []float32{3.14, 6.28}, + RDouble: []float64{299792458 * 1e20, 6.62606957e-34}, + RString: []string{"happy", "days"}, + RBytes: [][]byte{[]byte("skittles"), []byte("m&m's")}, + } + + repeatsObjectJSON = `{` + + `"rBool":[true,false,true],` + + `"rInt32":[-3,-4,-5],` + + `"rInt64":["-123456789","-987654321"],` + + `"rUint32":[1,2,3],` + + `"rUint64":["6789012345","3456789012"],` + + `"rSint32":[-1,-2,-3],` + + `"rSint64":["-6789012345","-3456789012"],` + + `"rFloat":[3.14,6.28],` + + `"rDouble":[2.99792458e+28,6.62606957e-34],` + + `"rString":["happy","days"],` + + `"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` + + `}` + + repeatsObjectPrettyJSON = `{ + "rBool": [ + true, + false, + true + ], + "rInt32": [ + -3, + -4, + -5 + ], + "rInt64": [ + "-123456789", + "-987654321" + ], + "rUint32": [ + 1, + 2, + 3 + ], + "rUint64": [ + "6789012345", + "3456789012" + ], + "rSint32": [ + -1, + -2, + -3 + ], + "rSint64": [ + "-6789012345", + "-3456789012" + ], + "rFloat": [ + 3.14, + 6.28 + ], + "rDouble": [ + 2.99792458e+28, + 6.62606957e-34 + ], + "rString": [ + "happy", + "days" + ], + "rBytes": [ + "c2tpdHRsZXM=", + "bSZtJ3M=" + ] +}` + + innerSimple = &pb.Simple{OInt32: proto.Int32(-32)} + innerSimple2 = &pb.Simple{OInt64: proto.Int64(25)} + innerRepeats = &pb.Repeats{RString: []string{"roses", "red"}} + innerRepeats2 = &pb.Repeats{RString: []string{"violets", "blue"}} + complexObject = &pb.Widget{ + Color: pb.Widget_GREEN.Enum(), + RColor: []pb.Widget_Color{pb.Widget_RED, pb.Widget_GREEN, pb.Widget_BLUE}, + Simple: innerSimple, + RSimple: []*pb.Simple{innerSimple, innerSimple2}, + Repeats: innerRepeats, + RRepeats: []*pb.Repeats{innerRepeats, innerRepeats2}, + } + + complexObjectJSON = `{"color":"GREEN",` + + `"rColor":["RED","GREEN","BLUE"],` + + `"simple":{"oInt32":-32},` + + `"rSimple":[{"oInt32":-32},{"oInt64":"25"}],` + + `"repeats":{"rString":["roses","red"]},` + + `"rRepeats":[{"rString":["roses","red"]},{"rString":["violets","blue"]}]` + + `}` + + complexObjectPrettyJSON = `{ + "color": "GREEN", + "rColor": [ + "RED", + "GREEN", + "BLUE" + ], + "simple": { + "oInt32": -32 + }, + "rSimple": [ + { + "oInt32": -32 + }, + { + "oInt64": "25" + } + ], + "repeats": { + "rString": [ + "roses", + "red" + ] + }, + "rRepeats": [ + { + "rString": [ + "roses", + "red" + ] + }, + { + "rString": [ + "violets", + "blue" + ] + } + ] +}` + + colorPrettyJSON = `{ + "color": 2 +}` + + colorListPrettyJSON = `{ + "color": 1000, + "rColor": [ + "RED" + ] +}` + + nummyPrettyJSON = `{ + "nummy": { + "1": 2, + "3": 4 + } +}` + + objjyPrettyJSON = `{ + "objjy": { + "1": { + "dub": 1 + } + } +}` + realNumber = &pb.Real{Value: proto.Float64(3.14159265359)} + realNumberName = "Pi" + complexNumber = &pb.Complex{Imaginary: proto.Float64(0.5772156649)} + realNumberJSON = `{` + + `"value":3.14159265359,` + + `"[jsonpb.Complex.real_extension]":{"imaginary":0.5772156649},` + + `"[jsonpb.name]":"Pi"` + + `}` + + anySimple = &pb.KnownTypes{ + An: &anypb.Any{ + TypeUrl: "something.example.com/jsonpb.Simple", + Value: []byte{ + // &pb.Simple{OBool:true} + 1 << 3, 1, + }, + }, + } + anySimpleJSON = `{"an":{"@type":"something.example.com/jsonpb.Simple","oBool":true}}` + anySimplePrettyJSON = `{ + "an": { + "@type": "something.example.com/jsonpb.Simple", + "oBool": true + } +}` + + anyWellKnown = &pb.KnownTypes{ + An: &anypb.Any{ + TypeUrl: "type.googleapis.com/google.protobuf.Duration", + Value: []byte{ + // &durpb.Duration{Seconds: 1, Nanos: 212000000 } + 1 << 3, 1, // seconds + 2 << 3, 0x80, 0xba, 0x8b, 0x65, // nanos + }, + }, + } + anyWellKnownJSON = `{"an":{"@type":"type.googleapis.com/google.protobuf.Duration","value":"1.212s"}}` + anyWellKnownPrettyJSON = `{ + "an": { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } +}` + + nonFinites = &pb.NonFinites{ + FNan: proto.Float32(float32(math.NaN())), + FPinf: proto.Float32(float32(math.Inf(1))), + FNinf: proto.Float32(float32(math.Inf(-1))), + DNan: proto.Float64(float64(math.NaN())), + DPinf: proto.Float64(float64(math.Inf(1))), + DNinf: proto.Float64(float64(math.Inf(-1))), + } + nonFinitesJSON = `{` + + `"fNan":"NaN",` + + `"fPinf":"Infinity",` + + `"fNinf":"-Infinity",` + + `"dNan":"NaN",` + + `"dPinf":"Infinity",` + + `"dNinf":"-Infinity"` + + `}` +) + +func init() { + if err := proto.SetExtension(realNumber, pb.E_Name, &realNumberName); err != nil { + panic(err) + } + if err := proto.SetExtension(realNumber, pb.E_Complex_RealExtension, complexNumber); err != nil { + panic(err) + } +} + +var marshalingTests = []struct { + desc string + marshaler Marshaler + pb proto.Message + json string +}{ + {"simple flat object", marshaler, simpleObject, simpleObjectJSON}, + {"simple pretty object", marshalerAllOptions, simpleObject, simpleObjectPrettyJSON}, + {"non-finite floats fields object", marshaler, nonFinites, nonFinitesJSON}, + {"repeated fields flat object", marshaler, repeatsObject, repeatsObjectJSON}, + {"repeated fields pretty object", marshalerAllOptions, repeatsObject, repeatsObjectPrettyJSON}, + {"nested message/enum flat object", marshaler, complexObject, complexObjectJSON}, + {"nested message/enum pretty object", marshalerAllOptions, complexObject, complexObjectPrettyJSON}, + {"enum-string flat object", Marshaler{}, + &pb.Widget{Color: pb.Widget_BLUE.Enum()}, `{"color":"BLUE"}`}, + {"enum-value pretty object", Marshaler{EnumsAsInts: true, Indent: " "}, + &pb.Widget{Color: pb.Widget_BLUE.Enum()}, colorPrettyJSON}, + {"unknown enum value object", marshalerAllOptions, + &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}, colorListPrettyJSON}, + {"repeated proto3 enum", Marshaler{}, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}, + `{"rFunny":["PUNS","SLAPSTICK"]}`}, + {"repeated proto3 enum as int", Marshaler{EnumsAsInts: true}, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}, + `{"rFunny":[1,2]}`}, + {"empty value", marshaler, &pb.Simple3{}, `{}`}, + {"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`}, + {"empty repeated emitted", Marshaler{EmitDefaults: true}, &pb.SimpleSlice3{}, `{"slices":[]}`}, + {"empty map emitted", Marshaler{EmitDefaults: true}, &pb.SimpleMap3{}, `{"stringy":{}}`}, + {"nested struct null", Marshaler{EmitDefaults: true}, &pb.SimpleNull3{}, `{"simple":null}`}, + {"map", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`}, + {"map", marshalerAllOptions, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, nummyPrettyJSON}, + {"map", marshaler, + &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}, + `{"strry":{"\"one\"":"two","three":"four"}}`}, + {"map", marshaler, + &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, `{"objjy":{"1":{"dub":1}}}`}, + {"map", marshalerAllOptions, + &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, objjyPrettyJSON}, + {"map", marshaler, &pb.Mappy{Buggy: map[int64]string{1234: "yup"}}, + `{"buggy":{"1234":"yup"}}`}, + {"map", marshaler, &pb.Mappy{Booly: map[bool]bool{false: true}}, `{"booly":{"false":true}}`}, + // TODO: This is broken. + //{"map", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}`}, + {"map", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`}, + {"map", marshaler, &pb.Mappy{S32Booly: map[int32]bool{1: true, 3: false, 10: true, 12: false}}, `{"s32booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{S64Booly: map[int64]bool{1: true, 3: false, 10: true, 12: false}}, `{"s64booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{U32Booly: map[uint32]bool{1: true, 3: false, 10: true, 12: false}}, `{"u32booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{U64Booly: map[uint64]bool{1: true, 3: false, 10: true, 12: false}}, `{"u64booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"proto2 map", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}}, + `{"mInt64Str":{"213":"cat"}}`}, + {"proto2 map", marshaler, + &pb.Maps{MBoolSimple: map[bool]*pb.Simple{true: {OInt32: proto.Int32(1)}}}, + `{"mBoolSimple":{"true":{"oInt32":1}}}`}, + {"oneof, not set", marshaler, &pb.MsgWithOneof{}, `{}`}, + {"oneof, set", marshaler, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Title{"Grand Poobah"}}, `{"title":"Grand Poobah"}`}, + {"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)}, + `{"o_int32":4}`}, + {"proto2 extension", marshaler, realNumber, realNumberJSON}, + {"Any with message", marshaler, anySimple, anySimpleJSON}, + {"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON}, + {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON}, + {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON}, + {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`}, + {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{ + Fields: map[string]*stpb.Value{ + "one": {Kind: &stpb.Value_StringValue{"loneliest number"}}, + "two": {Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}, + }, + }}, `{"st":{"one":"loneliest number","two":null}}`}, + {"empty ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{}}, `{"lv":[]}`}, + {"basic ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{ + {Kind: &stpb.Value_StringValue{"x"}}, + {Kind: &stpb.Value_NullValue{}}, + {Kind: &stpb.Value_NumberValue{3}}, + {Kind: &stpb.Value_BoolValue{true}}, + }}}, `{"lv":["x",null,3,true]}`}, + {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`}, + {"number Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}, `{"val":1}`}, + {"null Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}, `{"val":null}`}, + {"string number value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}, `{"val":"9223372036854775807"}`}, + {"list of lists Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{ + Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{ + {Kind: &stpb.Value_StringValue{"x"}}, + {Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{ + {Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}}, + }}}, + {Kind: &stpb.Value_StringValue{"z"}}, + }, + }}}, + }, + }}, + }}, `{"val":["x",[["y"],"z"]]}`}, + + {"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`}, + {"FloatValue", marshaler, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}, `{"flt":1.2}`}, + {"Int64Value", marshaler, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}, `{"i64":"-3"}`}, + {"UInt64Value", marshaler, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}, `{"u64":"3"}`}, + {"Int32Value", marshaler, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}, `{"i32":-4}`}, + {"UInt32Value", marshaler, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}, `{"u32":4}`}, + {"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`}, + {"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`}, + {"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`}, +} + +func TestMarshaling(t *testing.T) { + for _, tt := range marshalingTests { + json, err := tt.marshaler.MarshalToString(tt.pb) + if err != nil { + t.Errorf("%s: marshaling error: %v", tt.desc, err) + } else if tt.json != json { + t.Errorf("%s: got [%v] want [%v]", tt.desc, json, tt.json) + } + } +} + +func TestMarshalJSONPBMarshaler(t *testing.T) { + rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` + msg := dynamicMessage{rawJson: rawJson} + str, err := new(Marshaler).MarshalToString(&msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling JSONPBMarshaler: %v", err) + } + if str != rawJson { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, rawJson) + } +} + +func TestMarshalAnyJSONPBMarshaler(t *testing.T) { + msg := dynamicMessage{rawJson: `{ "foo": "bar", "baz": [0, 1, 2, 3] }`} + a, err := ptypes.MarshalAny(&msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling to Any: %v", err) + } + str, err := new(Marshaler).MarshalToString(a) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling Any to JSON: %v", err) + } + // after custom marshaling, it's round-tripped through JSON decoding/encoding already, + // so the keys are sorted, whitespace is compacted, and "@type" key has been added + expected := `{"@type":"type.googleapis.com/` + dynamicMessageName + `","baz":[0,1,2,3],"foo":"bar"}` + if str != expected { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, expected) + } +} + +var unmarshalingTests = []struct { + desc string + unmarshaler Unmarshaler + json string + pb proto.Message +}{ + {"simple flat object", Unmarshaler{}, simpleObjectJSON, simpleObject}, + {"simple pretty object", Unmarshaler{}, simpleObjectPrettyJSON, simpleObject}, + {"repeated fields flat object", Unmarshaler{}, repeatsObjectJSON, repeatsObject}, + {"repeated fields pretty object", Unmarshaler{}, repeatsObjectPrettyJSON, repeatsObject}, + {"nested message/enum flat object", Unmarshaler{}, complexObjectJSON, complexObject}, + {"nested message/enum pretty object", Unmarshaler{}, complexObjectPrettyJSON, complexObject}, + {"enum-string object", Unmarshaler{}, `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, + {"enum-value object", Unmarshaler{}, "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, + {"unknown field with allowed option", Unmarshaler{AllowUnknownFields: true}, `{"unknown": "foo"}`, new(pb.Simple)}, + {"proto3 enum string", Unmarshaler{}, `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 enum value", Unmarshaler{}, `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"unknown enum value object", + Unmarshaler{}, + "{\n \"color\": 1000,\n \"r_color\": [\n \"RED\"\n ]\n}", + &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}}, + {"repeated proto3 enum", Unmarshaler{}, `{"rFunny":["PUNS","SLAPSTICK"]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"repeated proto3 enum as int", Unmarshaler{}, `{"rFunny":[1,2]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"repeated proto3 enum as mix of strings and ints", Unmarshaler{}, `{"rFunny":["PUNS",2]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"unquoted int64 object", Unmarshaler{}, `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, + {"unquoted uint64 object", Unmarshaler{}, `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, + {"NaN", Unmarshaler{}, `{"oDouble":"NaN"}`, &pb.Simple{ODouble: proto.Float64(math.NaN())}}, + {"Inf", Unmarshaler{}, `{"oFloat":"Infinity"}`, &pb.Simple{OFloat: proto.Float32(float32(math.Inf(1)))}}, + {"-Inf", Unmarshaler{}, `{"oDouble":"-Infinity"}`, &pb.Simple{ODouble: proto.Float64(math.Inf(-1))}}, + {"map", Unmarshaler{}, `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}}, + {"map", Unmarshaler{}, `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}}, + {"map", Unmarshaler{}, `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}}, + {"proto2 extension", Unmarshaler{}, realNumberJSON, realNumber}, + {"Any with message", Unmarshaler{}, anySimpleJSON, anySimple}, + {"Any with message and indent", Unmarshaler{}, anySimplePrettyJSON, anySimple}, + {"Any with WKT", Unmarshaler{}, anyWellKnownJSON, anyWellKnown}, + {"Any with WKT and indent", Unmarshaler{}, anyWellKnownPrettyJSON, anyWellKnown}, + // TODO: This is broken. + //{"map", Unmarshaler{}, `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"map", Unmarshaler{}, `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"oneof", Unmarshaler{}, `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}}, + {"oneof spec name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"oneof orig_name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"oneof spec name2", Unmarshaler{}, `{"homeAddress":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, + {"oneof orig_name2", Unmarshaler{}, `{"home_address":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, + {"orig_name input", Unmarshaler{}, `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + {"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + + {"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, + {"null Duration", Unmarshaler{}, `{"dur":null}`, &pb.KnownTypes{Dur: nil}}, + {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, + {"PreEpochTimestamp", Unmarshaler{}, `{"ts":"1969-12-31T23:59:58.999999995Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -2, Nanos: 999999995}}}, + {"ZeroTimeTimestamp", Unmarshaler{}, `{"ts":"0001-01-01T00:00:00Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -62135596800, Nanos: 0}}}, + {"null Timestamp", Unmarshaler{}, `{"ts":null}`, &pb.KnownTypes{Ts: nil}}, + {"null Struct", Unmarshaler{}, `{"st": null}`, &pb.KnownTypes{St: nil}}, + {"empty Struct", Unmarshaler{}, `{"st": {}}`, &pb.KnownTypes{St: &stpb.Struct{}}}, + {"basic Struct", Unmarshaler{}, `{"st": {"a": "x", "b": null, "c": 3, "d": true}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{ + "a": {Kind: &stpb.Value_StringValue{"x"}}, + "b": {Kind: &stpb.Value_NullValue{}}, + "c": {Kind: &stpb.Value_NumberValue{3}}, + "d": {Kind: &stpb.Value_BoolValue{true}}, + }}}}, + {"nested Struct", Unmarshaler{}, `{"st": {"a": {"b": 1, "c": [{"d": true}, "f"]}}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{ + "a": {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{ + "b": {Kind: &stpb.Value_NumberValue{1}}, + "c": {Kind: &stpb.Value_ListValue{&stpb.ListValue{Values: []*stpb.Value{ + {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{"d": {Kind: &stpb.Value_BoolValue{true}}}}}}, + {Kind: &stpb.Value_StringValue{"f"}}, + }}}}, + }}}}, + }}}}, + {"null ListValue", Unmarshaler{}, `{"lv": null}`, &pb.KnownTypes{Lv: nil}}, + {"empty ListValue", Unmarshaler{}, `{"lv": []}`, &pb.KnownTypes{Lv: &stpb.ListValue{}}}, + {"basic ListValue", Unmarshaler{}, `{"lv": ["x", null, 3, true]}`, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{ + {Kind: &stpb.Value_StringValue{"x"}}, + {Kind: &stpb.Value_NullValue{}}, + {Kind: &stpb.Value_NumberValue{3}}, + {Kind: &stpb.Value_BoolValue{true}}, + }}}}, + {"number Value", Unmarshaler{}, `{"val":1}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}}, + {"null Value", Unmarshaler{}, `{"val":null}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}}, + {"bool Value", Unmarshaler{}, `{"val":true}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_BoolValue{true}}}}, + {"string Value", Unmarshaler{}, `{"val":"x"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"x"}}}}, + {"string number value", Unmarshaler{}, `{"val":"9223372036854775807"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}}, + {"list of lists Value", Unmarshaler{}, `{"val":["x", [["y"], "z"]]}`, &pb.KnownTypes{Val: &stpb.Value{ + Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{ + {Kind: &stpb.Value_StringValue{"x"}}, + {Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{ + {Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}}, + }}}, + {Kind: &stpb.Value_StringValue{"z"}}, + }, + }}}, + }, + }}}}}, + + {"DoubleValue", Unmarshaler{}, `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}}, + {"FloatValue", Unmarshaler{}, `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}}, + {"Int64Value", Unmarshaler{}, `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}}, + {"UInt64Value", Unmarshaler{}, `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}}, + {"Int32Value", Unmarshaler{}, `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}}, + {"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}}, + {"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}}, + {"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}}, + {"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}}, + + // Ensure that `null` as a value ends up with a nil pointer instead of a [type]Value struct. + {"null DoubleValue", Unmarshaler{}, `{"dbl":null}`, &pb.KnownTypes{Dbl: nil}}, + {"null FloatValue", Unmarshaler{}, `{"flt":null}`, &pb.KnownTypes{Flt: nil}}, + {"null Int64Value", Unmarshaler{}, `{"i64":null}`, &pb.KnownTypes{I64: nil}}, + {"null UInt64Value", Unmarshaler{}, `{"u64":null}`, &pb.KnownTypes{U64: nil}}, + {"null Int32Value", Unmarshaler{}, `{"i32":null}`, &pb.KnownTypes{I32: nil}}, + {"null UInt32Value", Unmarshaler{}, `{"u32":null}`, &pb.KnownTypes{U32: nil}}, + {"null BoolValue", Unmarshaler{}, `{"bool":null}`, &pb.KnownTypes{Bool: nil}}, + {"null StringValue", Unmarshaler{}, `{"str":null}`, &pb.KnownTypes{Str: nil}}, + {"null BytesValue", Unmarshaler{}, `{"bytes":null}`, &pb.KnownTypes{Bytes: nil}}, +} + +func TestUnmarshaling(t *testing.T) { + for _, tt := range unmarshalingTests { + // Make a new instance of the type of our expected object. + p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) + + err := tt.unmarshaler.Unmarshal(strings.NewReader(tt.json), p) + if err != nil { + t.Errorf("%s: %v", tt.desc, err) + continue + } + + // For easier diffs, compare text strings of the protos. + exp := proto.MarshalTextString(tt.pb) + act := proto.MarshalTextString(p) + if string(exp) != string(act) { + t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) + } + } +} + +func TestUnmarshalNullArray(t *testing.T) { + var repeats pb.Repeats + if err := UnmarshalString(`{"rBool":null}`, &repeats); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(repeats, pb.Repeats{}) { + t.Errorf("got non-nil fields in [%#v]", repeats) + } +} + +func TestUnmarshalNullObject(t *testing.T) { + var maps pb.Maps + if err := UnmarshalString(`{"mInt64Str":null}`, &maps); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(maps, pb.Maps{}) { + t.Errorf("got non-nil fields in [%#v]", maps) + } +} + +func TestUnmarshalNext(t *testing.T) { + // We only need to check against a few, not all of them. + tests := unmarshalingTests[:5] + + // Create a buffer with many concatenated JSON objects. + var b bytes.Buffer + for _, tt := range tests { + b.WriteString(tt.json) + } + + dec := json.NewDecoder(&b) + for _, tt := range tests { + // Make a new instance of the type of our expected object. + p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) + + err := tt.unmarshaler.UnmarshalNext(dec, p) + if err != nil { + t.Errorf("%s: %v", tt.desc, err) + continue + } + + // For easier diffs, compare text strings of the protos. + exp := proto.MarshalTextString(tt.pb) + act := proto.MarshalTextString(p) + if string(exp) != string(act) { + t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) + } + } + + p := &pb.Simple{} + err := new(Unmarshaler).UnmarshalNext(dec, p) + if err != io.EOF { + t.Errorf("eof: got %v, expected io.EOF", err) + } +} + +var unmarshalingShouldError = []struct { + desc string + in string + pb proto.Message +}{ + {"a value", "666", new(pb.Simple)}, + {"gibberish", "{adskja123;l23=-=", new(pb.Simple)}, + {"unknown field", `{"unknown": "foo"}`, new(pb.Simple)}, + {"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)}, +} + +func TestUnmarshalingBadInput(t *testing.T) { + for _, tt := range unmarshalingShouldError { + err := UnmarshalString(tt.in, tt.pb) + if err == nil { + t.Errorf("an error was expected when parsing %q instead of an object", tt.desc) + } + } +} + +type funcResolver func(turl string) (proto.Message, error) + +func (fn funcResolver) Resolve(turl string) (proto.Message, error) { + return fn(turl) +} + +func TestAnyWithCustomResolver(t *testing.T) { + var resolvedTypeUrls []string + resolver := funcResolver(func(turl string) (proto.Message, error) { + resolvedTypeUrls = append(resolvedTypeUrls, turl) + return new(pb.Simple), nil + }) + msg := &pb.Simple{ + OBytes: []byte{1, 2, 3, 4}, + OBool: proto.Bool(true), + OString: proto.String("foobar"), + OInt64: proto.Int64(1020304), + } + msgBytes, err := proto.Marshal(msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshaling message: %v", err) + } + // make an Any with a type URL that won't resolve w/out custom resolver + any := &anypb.Any{ + TypeUrl: "https://foobar.com/some.random.MessageKind", + Value: msgBytes, + } + + m := Marshaler{AnyResolver: resolver} + js, err := m.MarshalToString(any) + if err != nil { + t.Errorf("an unexpected error occurred when marshaling any to JSON: %v", err) + } + if len(resolvedTypeUrls) != 1 { + t.Errorf("custom resolver was not invoked during marshaling") + } else if resolvedTypeUrls[0] != "https://foobar.com/some.random.MessageKind" { + t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[0], "https://foobar.com/some.random.MessageKind") + } + wanted := `{"@type":"https://foobar.com/some.random.MessageKind","oBool":true,"oInt64":"1020304","oString":"foobar","oBytes":"AQIDBA=="}` + if js != wanted { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", js, wanted) + } + + u := Unmarshaler{AnyResolver: resolver} + roundTrip := &anypb.Any{} + err = u.Unmarshal(bytes.NewReader([]byte(js)), roundTrip) + if err != nil { + t.Errorf("an unexpected error occurred when unmarshaling any from JSON: %v", err) + } + if len(resolvedTypeUrls) != 2 { + t.Errorf("custom resolver was not invoked during marshaling") + } else if resolvedTypeUrls[1] != "https://foobar.com/some.random.MessageKind" { + t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[1], "https://foobar.com/some.random.MessageKind") + } + if !proto.Equal(any, roundTrip) { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", roundTrip, any) + } +} + +func TestUnmarshalJSONPBUnmarshaler(t *testing.T) { + rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` + var msg dynamicMessage + if err := Unmarshal(strings.NewReader(rawJson), &msg); err != nil { + t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err) + } + if msg.rawJson != rawJson { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", msg.rawJson, rawJson) + } +} + +func TestUnmarshalNullWithJSONPBUnmarshaler(t *testing.T) { + rawJson := `{"stringField":null}` + var ptrFieldMsg ptrFieldMessage + if err := Unmarshal(strings.NewReader(rawJson), &ptrFieldMsg); err != nil { + t.Errorf("unmarshal error: %v", err) + } + + want := ptrFieldMessage{StringField: &stringField{IsSet: true, StringValue: "null"}} + if !proto.Equal(&ptrFieldMsg, &want) { + t.Errorf("unmarshal result StringField: got %v, want %v", ptrFieldMsg, want) + } +} + +func TestUnmarshalAnyJSONPBUnmarshaler(t *testing.T) { + rawJson := `{ "@type": "blah.com/` + dynamicMessageName + `", "foo": "bar", "baz": [0, 1, 2, 3] }` + var got anypb.Any + if err := Unmarshal(strings.NewReader(rawJson), &got); err != nil { + t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err) + } + + dm := &dynamicMessage{rawJson: `{"baz":[0,1,2,3],"foo":"bar"}`} + var want anypb.Any + if b, err := proto.Marshal(dm); err != nil { + t.Errorf("an unexpected error occurred when marshaling message: %v", err) + } else { + want.TypeUrl = "blah.com/" + dynamicMessageName + want.Value = b + } + + if !proto.Equal(&got, &want) { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", got, want) + } +} + +const ( + dynamicMessageName = "google.protobuf.jsonpb.testing.dynamicMessage" +) + +func init() { + // we register the custom type below so that we can use it in Any types + proto.RegisterType((*dynamicMessage)(nil), dynamicMessageName) +} + +type ptrFieldMessage struct { + StringField *stringField `protobuf:"bytes,1,opt,name=stringField"` +} + +func (m *ptrFieldMessage) Reset() { +} + +func (m *ptrFieldMessage) String() string { + return m.StringField.StringValue +} + +func (m *ptrFieldMessage) ProtoMessage() { +} + +type stringField struct { + IsSet bool `protobuf:"varint,1,opt,name=isSet"` + StringValue string `protobuf:"bytes,2,opt,name=stringValue"` +} + +func (s *stringField) Reset() { +} + +func (s *stringField) String() string { + return s.StringValue +} + +func (s *stringField) ProtoMessage() { +} + +func (s *stringField) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { + s.IsSet = true + s.StringValue = string(js) + return nil +} + +// dynamicMessage implements protobuf.Message but is not a normal generated message type. +// It provides implementations of JSONPBMarshaler and JSONPBUnmarshaler for JSON support. +type dynamicMessage struct { + rawJson string `protobuf:"bytes,1,opt,name=rawJson"` +} + +func (m *dynamicMessage) Reset() { + m.rawJson = "{}" +} + +func (m *dynamicMessage) String() string { + return m.rawJson +} + +func (m *dynamicMessage) ProtoMessage() { +} + +func (m *dynamicMessage) MarshalJSONPB(jm *Marshaler) ([]byte, error) { + return []byte(m.rawJson), nil +} + +func (m *dynamicMessage) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { + m.rawJson = string(js) + return nil +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile new file mode 100644 index 0000000..eeda8ae --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile @@ -0,0 +1,33 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2015 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go new file mode 100644 index 0000000..ebb180e --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: more_test_objects.proto + +/* +Package jsonpb is a generated protocol buffer package. + +It is generated from these files: + more_test_objects.proto + test_objects.proto + +It has these top-level messages: + Simple3 + SimpleSlice3 + SimpleMap3 + SimpleNull3 + Mappy + Simple + NonFinites + Repeats + Widget + Maps + MsgWithOneof + Real + Complex + KnownTypes +*/ +package jsonpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Numeral int32 + +const ( + Numeral_UNKNOWN Numeral = 0 + Numeral_ARABIC Numeral = 1 + Numeral_ROMAN Numeral = 2 +) + +var Numeral_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARABIC", + 2: "ROMAN", +} +var Numeral_value = map[string]int32{ + "UNKNOWN": 0, + "ARABIC": 1, + "ROMAN": 2, +} + +func (x Numeral) String() string { + return proto.EnumName(Numeral_name, int32(x)) +} +func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Simple3 struct { + Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"` +} + +func (m *Simple3) Reset() { *m = Simple3{} } +func (m *Simple3) String() string { return proto.CompactTextString(m) } +func (*Simple3) ProtoMessage() {} +func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Simple3) GetDub() float64 { + if m != nil { + return m.Dub + } + return 0 +} + +type SimpleSlice3 struct { + Slices []string `protobuf:"bytes,1,rep,name=slices" json:"slices,omitempty"` +} + +func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} } +func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) } +func (*SimpleSlice3) ProtoMessage() {} +func (*SimpleSlice3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *SimpleSlice3) GetSlices() []string { + if m != nil { + return m.Slices + } + return nil +} + +type SimpleMap3 struct { + Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *SimpleMap3) Reset() { *m = SimpleMap3{} } +func (m *SimpleMap3) String() string { return proto.CompactTextString(m) } +func (*SimpleMap3) ProtoMessage() {} +func (*SimpleMap3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *SimpleMap3) GetStringy() map[string]string { + if m != nil { + return m.Stringy + } + return nil +} + +type SimpleNull3 struct { + Simple *Simple3 `protobuf:"bytes,1,opt,name=simple" json:"simple,omitempty"` +} + +func (m *SimpleNull3) Reset() { *m = SimpleNull3{} } +func (m *SimpleNull3) String() string { return proto.CompactTextString(m) } +func (*SimpleNull3) ProtoMessage() {} +func (*SimpleNull3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SimpleNull3) GetSimple() *Simple3 { + if m != nil { + return m.Simple + } + return nil +} + +type Mappy struct { + Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` + S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *Mappy) Reset() { *m = Mappy{} } +func (m *Mappy) String() string { return proto.CompactTextString(m) } +func (*Mappy) ProtoMessage() {} +func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Mappy) GetNummy() map[int64]int32 { + if m != nil { + return m.Nummy + } + return nil +} + +func (m *Mappy) GetStrry() map[string]string { + if m != nil { + return m.Strry + } + return nil +} + +func (m *Mappy) GetObjjy() map[int32]*Simple3 { + if m != nil { + return m.Objjy + } + return nil +} + +func (m *Mappy) GetBuggy() map[int64]string { + if m != nil { + return m.Buggy + } + return nil +} + +func (m *Mappy) GetBooly() map[bool]bool { + if m != nil { + return m.Booly + } + return nil +} + +func (m *Mappy) GetEnumy() map[string]Numeral { + if m != nil { + return m.Enumy + } + return nil +} + +func (m *Mappy) GetS32Booly() map[int32]bool { + if m != nil { + return m.S32Booly + } + return nil +} + +func (m *Mappy) GetS64Booly() map[int64]bool { + if m != nil { + return m.S64Booly + } + return nil +} + +func (m *Mappy) GetU32Booly() map[uint32]bool { + if m != nil { + return m.U32Booly + } + return nil +} + +func (m *Mappy) GetU64Booly() map[uint64]bool { + if m != nil { + return m.U64Booly + } + return nil +} + +func init() { + proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") + proto.RegisterType((*SimpleSlice3)(nil), "jsonpb.SimpleSlice3") + proto.RegisterType((*SimpleMap3)(nil), "jsonpb.SimpleMap3") + proto.RegisterType((*SimpleNull3)(nil), "jsonpb.SimpleNull3") + proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") + proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) +} + +func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6b, 0xdb, 0x3c, + 0x14, 0x87, 0x5f, 0x27, 0xf5, 0xd7, 0x49, 0xfb, 0x2e, 0x88, 0xb1, 0x99, 0xf4, 0x62, 0xc5, 0xb0, + 0xad, 0x0c, 0xe6, 0x8b, 0x78, 0x74, 0x5d, 0x77, 0x95, 0x8e, 0x5e, 0x94, 0x11, 0x07, 0x1c, 0xc2, + 0x2e, 0x4b, 0xdc, 0x99, 0x90, 0xcc, 0x5f, 0xd8, 0xd6, 0xc0, 0xd7, 0xfb, 0xbb, 0x07, 0xe3, 0x48, + 0x72, 0x2d, 0x07, 0x85, 0x6c, 0x77, 0x52, 0x7e, 0xcf, 0xe3, 0x73, 0x24, 0x1d, 0x02, 0x2f, 0xd3, + 0xbc, 0x8c, 0x1f, 0xea, 0xb8, 0xaa, 0x1f, 0xf2, 0x68, 0x17, 0x3f, 0xd6, 0x95, 0x57, 0x94, 0x79, + 0x9d, 0x13, 0x63, 0x57, 0xe5, 0x59, 0x11, 0xb9, 0xe7, 0x60, 0x2e, 0xb7, 0x69, 0x91, 0xc4, 0x3e, + 0x19, 0xc3, 0xf0, 0x3b, 0x8d, 0x1c, 0xed, 0x42, 0xbb, 0xd4, 0x42, 0x5c, 0xba, 0x6f, 0xe0, 0x94, + 0x87, 0xcb, 0x64, 0xfb, 0x18, 0xfb, 0xe4, 0x05, 0x18, 0x15, 0xae, 0x2a, 0x47, 0xbb, 0x18, 0x5e, + 0xda, 0xa1, 0xd8, 0xb9, 0xbf, 0x34, 0x00, 0x0e, 0xce, 0xd7, 0x85, 0x4f, 0x3e, 0x81, 0x59, 0xd5, + 0xe5, 0x36, 0xdb, 0x34, 0x8c, 0x1b, 0x4d, 0x5f, 0x79, 0xbc, 0x9a, 0xd7, 0x41, 0xde, 0x92, 0x13, + 0x77, 0x59, 0x5d, 0x36, 0x61, 0xcb, 0x4f, 0x6e, 0xe0, 0x54, 0x0e, 0xb0, 0xa7, 0x1f, 0x71, 0xc3, + 0x7a, 0xb2, 0x43, 0x5c, 0x92, 0xe7, 0xa0, 0xff, 0x5c, 0x27, 0x34, 0x76, 0x06, 0xec, 0x37, 0xbe, + 0xb9, 0x19, 0x5c, 0x6b, 0xee, 0x15, 0x8c, 0xf8, 0xf7, 0x03, 0x9a, 0x24, 0x3e, 0x79, 0x0b, 0x46, + 0xc5, 0xb6, 0xcc, 0x1e, 0x4d, 0x9f, 0xf5, 0x9b, 0xf0, 0x43, 0x11, 0xbb, 0xbf, 0x2d, 0xd0, 0xe7, + 0xeb, 0xa2, 0x68, 0x88, 0x07, 0x7a, 0x46, 0xd3, 0xb4, 0x6d, 0xdb, 0x69, 0x0d, 0x96, 0x7a, 0x01, + 0x46, 0xbc, 0x5f, 0x8e, 0x21, 0x5f, 0xd5, 0x65, 0xd9, 0x38, 0x03, 0x15, 0xbf, 0xc4, 0x48, 0xf0, + 0x0c, 0x43, 0x3e, 0x8f, 0x76, 0xbb, 0xc6, 0x19, 0xaa, 0xf8, 0x05, 0x46, 0x82, 0x67, 0x18, 0xf2, + 0x11, 0xdd, 0x6c, 0x1a, 0xe7, 0x44, 0xc5, 0xdf, 0x62, 0x24, 0x78, 0x86, 0x31, 0x3e, 0xcf, 0x93, + 0xc6, 0xd1, 0x95, 0x3c, 0x46, 0x2d, 0x8f, 0x6b, 0xe4, 0xe3, 0x8c, 0xa6, 0x8d, 0x63, 0xa8, 0xf8, + 0x3b, 0x8c, 0x04, 0xcf, 0x30, 0xf2, 0x11, 0xac, 0xca, 0x9f, 0xf2, 0x12, 0x26, 0x53, 0xce, 0xf7, + 0x8e, 0x2c, 0x52, 0x6e, 0x3d, 0xc1, 0x4c, 0xbc, 0xfa, 0xc0, 0x45, 0x4b, 0x29, 0x8a, 0xb4, 0x15, + 0xc5, 0x16, 0x45, 0xda, 0x56, 0xb4, 0x55, 0xe2, 0xaa, 0x5f, 0x91, 0x4a, 0x15, 0x69, 0x5b, 0x11, + 0x94, 0x62, 0xbf, 0x62, 0x0b, 0x4f, 0xae, 0x01, 0xba, 0x87, 0x96, 0xe7, 0x6f, 0xa8, 0x98, 0x3f, + 0x5d, 0x9a, 0x3f, 0x34, 0xbb, 0x27, 0xff, 0x97, 0xc9, 0x9d, 0xdc, 0x03, 0x74, 0x8f, 0x2f, 0x9b, + 0x3a, 0x37, 0x5f, 0xcb, 0xa6, 0x62, 0x92, 0xfb, 0x4d, 0x74, 0x73, 0x71, 0xac, 0x7d, 0x7b, 0xdf, + 0x7c, 0xba, 0x10, 0xd9, 0xb4, 0x14, 0xa6, 0xb5, 0xd7, 0x7e, 0x37, 0x2b, 0x8a, 0x83, 0xf7, 0xda, + 0xff, 0xbf, 0x6b, 0x3f, 0xa0, 0x69, 0x5c, 0xae, 0x13, 0xf9, 0x53, 0x9f, 0xe1, 0xac, 0x37, 0x43, + 0x8a, 0xcb, 0x38, 0xdc, 0x07, 0xca, 0xf2, 0xab, 0x1e, 0x3b, 0xfe, 0xbe, 0xbc, 0x3a, 0x54, 0xf9, + 0xec, 0x6f, 0xe4, 0x43, 0x95, 0x4f, 0x8e, 0xc8, 0xef, 0xde, 0x83, 0x29, 0x6e, 0x82, 0x8c, 0xc0, + 0x5c, 0x05, 0x5f, 0x83, 0xc5, 0xb7, 0x60, 0xfc, 0x1f, 0x01, 0x30, 0x66, 0xe1, 0xec, 0xf6, 0xfe, + 0xcb, 0x58, 0x23, 0x36, 0xe8, 0xe1, 0x62, 0x3e, 0x0b, 0xc6, 0x83, 0xc8, 0x60, 0x7f, 0xe0, 0xfe, + 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x34, 0xaf, 0xdb, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto new file mode 100644 index 0000000..d254fa5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto @@ -0,0 +1,69 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package jsonpb; + +message Simple3 { + double dub = 1; +} + +message SimpleSlice3 { + repeated string slices = 1; +} + +message SimpleMap3 { + map stringy = 1; +} + +message SimpleNull3 { + Simple3 simple = 1; +} + +enum Numeral { + UNKNOWN = 0; + ARABIC = 1; + ROMAN = 2; +} + +message Mappy { + map nummy = 1; + map strry = 2; + map objjy = 3; + map buggy = 4; + map booly = 5; + map enumy = 6; + map s32booly = 7; + map s64booly = 8; + map u32booly = 9; + map u64booly = 10; +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go new file mode 100644 index 0000000..d413d74 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go @@ -0,0 +1,852 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: test_objects.proto + +package jsonpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Widget_Color int32 + +const ( + Widget_RED Widget_Color = 0 + Widget_GREEN Widget_Color = 1 + Widget_BLUE Widget_Color = 2 +) + +var Widget_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Widget_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Widget_Color) Enum() *Widget_Color { + p := new(Widget_Color) + *p = x + return p +} +func (x Widget_Color) String() string { + return proto.EnumName(Widget_Color_name, int32(x)) +} +func (x *Widget_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color") + if err != nil { + return err + } + *x = Widget_Color(value) + return nil +} +func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } + +// Test message for holding primitive types. +type Simple struct { + OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` + OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` + OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` + OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` + OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` + OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` + OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` + OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` + ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` + OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` + OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Simple) Reset() { *m = Simple{} } +func (m *Simple) String() string { return proto.CompactTextString(m) } +func (*Simple) ProtoMessage() {} +func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Simple) GetOBool() bool { + if m != nil && m.OBool != nil { + return *m.OBool + } + return false +} + +func (m *Simple) GetOInt32() int32 { + if m != nil && m.OInt32 != nil { + return *m.OInt32 + } + return 0 +} + +func (m *Simple) GetOInt64() int64 { + if m != nil && m.OInt64 != nil { + return *m.OInt64 + } + return 0 +} + +func (m *Simple) GetOUint32() uint32 { + if m != nil && m.OUint32 != nil { + return *m.OUint32 + } + return 0 +} + +func (m *Simple) GetOUint64() uint64 { + if m != nil && m.OUint64 != nil { + return *m.OUint64 + } + return 0 +} + +func (m *Simple) GetOSint32() int32 { + if m != nil && m.OSint32 != nil { + return *m.OSint32 + } + return 0 +} + +func (m *Simple) GetOSint64() int64 { + if m != nil && m.OSint64 != nil { + return *m.OSint64 + } + return 0 +} + +func (m *Simple) GetOFloat() float32 { + if m != nil && m.OFloat != nil { + return *m.OFloat + } + return 0 +} + +func (m *Simple) GetODouble() float64 { + if m != nil && m.ODouble != nil { + return *m.ODouble + } + return 0 +} + +func (m *Simple) GetOString() string { + if m != nil && m.OString != nil { + return *m.OString + } + return "" +} + +func (m *Simple) GetOBytes() []byte { + if m != nil { + return m.OBytes + } + return nil +} + +// Test message for holding special non-finites primitives. +type NonFinites struct { + FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"` + FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"` + FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"` + DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"` + DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"` + DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonFinites) Reset() { *m = NonFinites{} } +func (m *NonFinites) String() string { return proto.CompactTextString(m) } +func (*NonFinites) ProtoMessage() {} +func (*NonFinites) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *NonFinites) GetFNan() float32 { + if m != nil && m.FNan != nil { + return *m.FNan + } + return 0 +} + +func (m *NonFinites) GetFPinf() float32 { + if m != nil && m.FPinf != nil { + return *m.FPinf + } + return 0 +} + +func (m *NonFinites) GetFNinf() float32 { + if m != nil && m.FNinf != nil { + return *m.FNinf + } + return 0 +} + +func (m *NonFinites) GetDNan() float64 { + if m != nil && m.DNan != nil { + return *m.DNan + } + return 0 +} + +func (m *NonFinites) GetDPinf() float64 { + if m != nil && m.DPinf != nil { + return *m.DPinf + } + return 0 +} + +func (m *NonFinites) GetDNinf() float64 { + if m != nil && m.DNinf != nil { + return *m.DNinf + } + return 0 +} + +// Test message for holding repeated primitives. +type Repeats struct { + RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` + RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` + RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` + RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` + RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` + RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` + RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` + RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` + RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` + RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` + RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Repeats) Reset() { *m = Repeats{} } +func (m *Repeats) String() string { return proto.CompactTextString(m) } +func (*Repeats) ProtoMessage() {} +func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *Repeats) GetRBool() []bool { + if m != nil { + return m.RBool + } + return nil +} + +func (m *Repeats) GetRInt32() []int32 { + if m != nil { + return m.RInt32 + } + return nil +} + +func (m *Repeats) GetRInt64() []int64 { + if m != nil { + return m.RInt64 + } + return nil +} + +func (m *Repeats) GetRUint32() []uint32 { + if m != nil { + return m.RUint32 + } + return nil +} + +func (m *Repeats) GetRUint64() []uint64 { + if m != nil { + return m.RUint64 + } + return nil +} + +func (m *Repeats) GetRSint32() []int32 { + if m != nil { + return m.RSint32 + } + return nil +} + +func (m *Repeats) GetRSint64() []int64 { + if m != nil { + return m.RSint64 + } + return nil +} + +func (m *Repeats) GetRFloat() []float32 { + if m != nil { + return m.RFloat + } + return nil +} + +func (m *Repeats) GetRDouble() []float64 { + if m != nil { + return m.RDouble + } + return nil +} + +func (m *Repeats) GetRString() []string { + if m != nil { + return m.RString + } + return nil +} + +func (m *Repeats) GetRBytes() [][]byte { + if m != nil { + return m.RBytes + } + return nil +} + +// Test message for holding enums and nested messages. +type Widget struct { + Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` + RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` + Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` + RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` + Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` + RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Widget) Reset() { *m = Widget{} } +func (m *Widget) String() string { return proto.CompactTextString(m) } +func (*Widget) ProtoMessage() {} +func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *Widget) GetColor() Widget_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Widget_RED +} + +func (m *Widget) GetRColor() []Widget_Color { + if m != nil { + return m.RColor + } + return nil +} + +func (m *Widget) GetSimple() *Simple { + if m != nil { + return m.Simple + } + return nil +} + +func (m *Widget) GetRSimple() []*Simple { + if m != nil { + return m.RSimple + } + return nil +} + +func (m *Widget) GetRepeats() *Repeats { + if m != nil { + return m.Repeats + } + return nil +} + +func (m *Widget) GetRRepeats() []*Repeats { + if m != nil { + return m.RRepeats + } + return nil +} + +type Maps struct { + MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Maps) Reset() { *m = Maps{} } +func (m *Maps) String() string { return proto.CompactTextString(m) } +func (*Maps) ProtoMessage() {} +func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *Maps) GetMInt64Str() map[int64]string { + if m != nil { + return m.MInt64Str + } + return nil +} + +func (m *Maps) GetMBoolSimple() map[bool]*Simple { + if m != nil { + return m.MBoolSimple + } + return nil +} + +type MsgWithOneof struct { + // Types that are valid to be assigned to Union: + // *MsgWithOneof_Title + // *MsgWithOneof_Salary + // *MsgWithOneof_Country + // *MsgWithOneof_HomeAddress + Union isMsgWithOneof_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } +func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } +func (*MsgWithOneof) ProtoMessage() {} +func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +type isMsgWithOneof_Union interface { + isMsgWithOneof_Union() +} + +type MsgWithOneof_Title struct { + Title string `protobuf:"bytes,1,opt,name=title,oneof"` +} +type MsgWithOneof_Salary struct { + Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"` +} +type MsgWithOneof_Country struct { + Country string `protobuf:"bytes,3,opt,name=Country,oneof"` +} +type MsgWithOneof_HomeAddress struct { + HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"` +} + +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} +func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} + +func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *MsgWithOneof) GetTitle() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok { + return x.Title + } + return "" +} + +func (m *MsgWithOneof) GetSalary() int64 { + if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok { + return x.Salary + } + return 0 +} + +func (m *MsgWithOneof) GetCountry() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok { + return x.Country + } + return "" +} + +func (m *MsgWithOneof) GetHomeAddress() string { + if x, ok := m.GetUnion().(*MsgWithOneof_HomeAddress); ok { + return x.HomeAddress + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ + (*MsgWithOneof_Title)(nil), + (*MsgWithOneof_Salary)(nil), + (*MsgWithOneof_Country)(nil), + (*MsgWithOneof_HomeAddress)(nil), + } +} + +func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Title) + case *MsgWithOneof_Salary: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Country) + case *MsgWithOneof_HomeAddress: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.HomeAddress) + case nil: + default: + return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) + } + return nil +} + +func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*MsgWithOneof) + switch tag { + case 1: // union.title + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Title{x} + return true, err + case 2: // union.salary + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &MsgWithOneof_Salary{int64(x)} + return true, err + case 3: // union.Country + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Country{x} + return true, err + case 4: // union.home_address + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_HomeAddress{x} + return true, err + default: + return false, nil + } +} + +func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Title))) + n += len(x.Title) + case *MsgWithOneof_Salary: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Country))) + n += len(x.Country) + case *MsgWithOneof_HomeAddress: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.HomeAddress))) + n += len(x.HomeAddress) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Real struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Real) Reset() { *m = Real{} } +func (m *Real) String() string { return proto.CompactTextString(m) } +func (*Real) ProtoMessage() {} +func (*Real) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +var extRange_Real = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Real) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Real +} + +func (m *Real) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Complex struct { + Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Complex) Reset() { *m = Complex{} } +func (m *Complex) String() string { return proto.CompactTextString(m) } +func (*Complex) ProtoMessage() {} +func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +var extRange_Complex = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Complex) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Complex +} + +func (m *Complex) GetImaginary() float64 { + if m != nil && m.Imaginary != nil { + return *m.Imaginary + } + return 0 +} + +var E_Complex_RealExtension = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*Complex)(nil), + Field: 123, + Name: "jsonpb.Complex.real_extension", + Tag: "bytes,123,opt,name=real_extension,json=realExtension", + Filename: "test_objects.proto", +} + +type KnownTypes struct { + An *google_protobuf.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` + Dur *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` + St *google_protobuf2.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` + Ts *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` + Lv *google_protobuf2.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"` + Val *google_protobuf2.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"` + Dbl *google_protobuf4.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` + Flt *google_protobuf4.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` + I64 *google_protobuf4.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` + U64 *google_protobuf4.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` + I32 *google_protobuf4.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` + U32 *google_protobuf4.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` + Bool *google_protobuf4.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` + Str *google_protobuf4.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` + Bytes *google_protobuf4.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KnownTypes) Reset() { *m = KnownTypes{} } +func (m *KnownTypes) String() string { return proto.CompactTextString(m) } +func (*KnownTypes) ProtoMessage() {} +func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *KnownTypes) GetAn() *google_protobuf.Any { + if m != nil { + return m.An + } + return nil +} + +func (m *KnownTypes) GetDur() *google_protobuf1.Duration { + if m != nil { + return m.Dur + } + return nil +} + +func (m *KnownTypes) GetSt() *google_protobuf2.Struct { + if m != nil { + return m.St + } + return nil +} + +func (m *KnownTypes) GetTs() *google_protobuf3.Timestamp { + if m != nil { + return m.Ts + } + return nil +} + +func (m *KnownTypes) GetLv() *google_protobuf2.ListValue { + if m != nil { + return m.Lv + } + return nil +} + +func (m *KnownTypes) GetVal() *google_protobuf2.Value { + if m != nil { + return m.Val + } + return nil +} + +func (m *KnownTypes) GetDbl() *google_protobuf4.DoubleValue { + if m != nil { + return m.Dbl + } + return nil +} + +func (m *KnownTypes) GetFlt() *google_protobuf4.FloatValue { + if m != nil { + return m.Flt + } + return nil +} + +func (m *KnownTypes) GetI64() *google_protobuf4.Int64Value { + if m != nil { + return m.I64 + } + return nil +} + +func (m *KnownTypes) GetU64() *google_protobuf4.UInt64Value { + if m != nil { + return m.U64 + } + return nil +} + +func (m *KnownTypes) GetI32() *google_protobuf4.Int32Value { + if m != nil { + return m.I32 + } + return nil +} + +func (m *KnownTypes) GetU32() *google_protobuf4.UInt32Value { + if m != nil { + return m.U32 + } + return nil +} + +func (m *KnownTypes) GetBool() *google_protobuf4.BoolValue { + if m != nil { + return m.Bool + } + return nil +} + +func (m *KnownTypes) GetStr() *google_protobuf4.StringValue { + if m != nil { + return m.Str + } + return nil +} + +func (m *KnownTypes) GetBytes() *google_protobuf4.BytesValue { + if m != nil { + return m.Bytes + } + return nil +} + +var E_Name = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*string)(nil), + Field: 124, + Name: "jsonpb.name", + Tag: "bytes,124,opt,name=name", + Filename: "test_objects.proto", +} + +func init() { + proto.RegisterType((*Simple)(nil), "jsonpb.Simple") + proto.RegisterType((*NonFinites)(nil), "jsonpb.NonFinites") + proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats") + proto.RegisterType((*Widget)(nil), "jsonpb.Widget") + proto.RegisterType((*Maps)(nil), "jsonpb.Maps") + proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") + proto.RegisterType((*Real)(nil), "jsonpb.Real") + proto.RegisterType((*Complex)(nil), "jsonpb.Complex") + proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes") + proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) + proto.RegisterExtension(E_Complex_RealExtension) + proto.RegisterExtension(E_Name) +} + +func init() { proto.RegisterFile("test_objects.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1160 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x95, 0x41, 0x73, 0xdb, 0x44, + 0x14, 0xc7, 0x23, 0xc9, 0x92, 0xed, 0x75, 0x92, 0x9a, 0x6d, 0xda, 0x2a, 0x26, 0x80, 0xc6, 0x94, + 0x22, 0x0a, 0x75, 0x07, 0xc7, 0xe3, 0x61, 0x0a, 0x97, 0xa4, 0x71, 0x29, 0x43, 0x13, 0x98, 0x4d, + 0x43, 0x8f, 0x1e, 0x39, 0x5a, 0xbb, 0x2a, 0xf2, 0xae, 0x67, 0x77, 0x95, 0xd4, 0x03, 0x87, 0x9c, + 0x39, 0x32, 0x7c, 0x05, 0xf8, 0x08, 0x1c, 0xf8, 0x74, 0xcc, 0xdb, 0x95, 0xac, 0xc4, 0x8e, 0x4f, + 0xf1, 0x7b, 0xef, 0xff, 0xfe, 0x59, 0xed, 0x6f, 0x77, 0x1f, 0xc2, 0x8a, 0x4a, 0x35, 0xe4, 0xa3, + 0x77, 0xf4, 0x5c, 0xc9, 0xce, 0x4c, 0x70, 0xc5, 0xb1, 0xf7, 0x4e, 0x72, 0x36, 0x1b, 0xb5, 0x76, + 0x27, 0x9c, 0x4f, 0x52, 0xfa, 0x54, 0x67, 0x47, 0xd9, 0xf8, 0x69, 0xc4, 0xe6, 0x46, 0xd2, 0xfa, + 0x78, 0xb9, 0x14, 0x67, 0x22, 0x52, 0x09, 0x67, 0x79, 0x7d, 0x6f, 0xb9, 0x2e, 0x95, 0xc8, 0xce, + 0x55, 0x5e, 0xfd, 0x64, 0xb9, 0xaa, 0x92, 0x29, 0x95, 0x2a, 0x9a, 0xce, 0xd6, 0xd9, 0x5f, 0x8a, + 0x68, 0x36, 0xa3, 0x22, 0x5f, 0x61, 0xfb, 0x6f, 0x1b, 0x79, 0xa7, 0xc9, 0x74, 0x96, 0x52, 0x7c, + 0x0f, 0x79, 0x7c, 0x38, 0xe2, 0x3c, 0xf5, 0xad, 0xc0, 0x0a, 0x6b, 0xc4, 0xe5, 0x87, 0x9c, 0xa7, + 0xf8, 0x01, 0xaa, 0xf2, 0x61, 0xc2, 0xd4, 0x7e, 0xd7, 0xb7, 0x03, 0x2b, 0x74, 0x89, 0xc7, 0x7f, + 0x80, 0x68, 0x51, 0xe8, 0xf7, 0x7c, 0x27, 0xb0, 0x42, 0xc7, 0x14, 0xfa, 0x3d, 0xbc, 0x8b, 0x6a, + 0x7c, 0x98, 0x99, 0x96, 0x4a, 0x60, 0x85, 0x5b, 0xa4, 0xca, 0xcf, 0x74, 0x58, 0x96, 0xfa, 0x3d, + 0xdf, 0x0d, 0xac, 0xb0, 0x92, 0x97, 0x8a, 0x2e, 0x69, 0xba, 0xbc, 0xc0, 0x0a, 0x3f, 0x20, 0x55, + 0x7e, 0x7a, 0xad, 0x4b, 0x9a, 0xae, 0x6a, 0x60, 0x85, 0x38, 0x2f, 0xf5, 0x7b, 0x66, 0x11, 0xe3, + 0x94, 0x47, 0xca, 0xaf, 0x05, 0x56, 0x68, 0x13, 0x8f, 0xbf, 0x80, 0xc8, 0xf4, 0xc4, 0x3c, 0x1b, + 0xa5, 0xd4, 0xaf, 0x07, 0x56, 0x68, 0x91, 0x2a, 0x3f, 0xd2, 0x61, 0x6e, 0xa7, 0x44, 0xc2, 0x26, + 0x3e, 0x0a, 0xac, 0xb0, 0x0e, 0x76, 0x3a, 0x34, 0x76, 0xa3, 0xb9, 0xa2, 0xd2, 0x6f, 0x04, 0x56, + 0xb8, 0x49, 0x3c, 0x7e, 0x08, 0x51, 0xfb, 0x4f, 0x0b, 0xa1, 0x13, 0xce, 0x5e, 0x24, 0x2c, 0x51, + 0x54, 0xe2, 0xbb, 0xc8, 0x1d, 0x0f, 0x59, 0xc4, 0xf4, 0x56, 0xd9, 0xa4, 0x32, 0x3e, 0x89, 0x18, + 0x6c, 0xe0, 0x78, 0x38, 0x4b, 0xd8, 0x58, 0x6f, 0x94, 0x4d, 0xdc, 0xf1, 0xcf, 0x09, 0x1b, 0x9b, + 0x34, 0x83, 0xb4, 0x93, 0xa7, 0x4f, 0x20, 0x7d, 0x17, 0xb9, 0xb1, 0xb6, 0xa8, 0xe8, 0xd5, 0x55, + 0xe2, 0xdc, 0x22, 0x36, 0x16, 0xae, 0xce, 0xba, 0x71, 0x61, 0x11, 0x1b, 0x0b, 0x2f, 0x4f, 0x83, + 0x45, 0xfb, 0x1f, 0x1b, 0x55, 0x09, 0x9d, 0xd1, 0x48, 0x49, 0x90, 0x88, 0x82, 0x9e, 0x03, 0xf4, + 0x44, 0x41, 0x4f, 0x2c, 0xe8, 0x39, 0x40, 0x4f, 0x2c, 0xe8, 0x89, 0x05, 0x3d, 0x07, 0xe8, 0x89, + 0x05, 0x3d, 0x51, 0xd2, 0x73, 0x80, 0x9e, 0x28, 0xe9, 0x89, 0x92, 0x9e, 0x03, 0xf4, 0x44, 0x49, + 0x4f, 0x94, 0xf4, 0x1c, 0xa0, 0x27, 0x4e, 0xaf, 0x75, 0x2d, 0xe8, 0x39, 0x40, 0x4f, 0x94, 0xf4, + 0xc4, 0x82, 0x9e, 0x03, 0xf4, 0xc4, 0x82, 0x9e, 0x28, 0xe9, 0x39, 0x40, 0x4f, 0x94, 0xf4, 0x44, + 0x49, 0xcf, 0x01, 0x7a, 0xa2, 0xa4, 0x27, 0x16, 0xf4, 0x1c, 0xa0, 0x27, 0x0c, 0xbd, 0x7f, 0x6d, + 0xe4, 0xbd, 0x49, 0xe2, 0x09, 0x55, 0xf8, 0x31, 0x72, 0xcf, 0x79, 0xca, 0x85, 0x26, 0xb7, 0xdd, + 0xdd, 0xe9, 0x98, 0x2b, 0xda, 0x31, 0xe5, 0xce, 0x73, 0xa8, 0x11, 0x23, 0xc1, 0x4f, 0xc0, 0xcf, + 0xa8, 0x61, 0xf3, 0xd6, 0xa9, 0x3d, 0xa1, 0xff, 0xe2, 0x47, 0xc8, 0x93, 0xfa, 0x2a, 0xe9, 0x53, + 0xd5, 0xe8, 0x6e, 0x17, 0x6a, 0x73, 0xc1, 0x48, 0x5e, 0xc5, 0x5f, 0x98, 0x0d, 0xd1, 0x4a, 0x58, + 0xe7, 0xaa, 0x12, 0x36, 0x28, 0x97, 0x56, 0x85, 0x01, 0xec, 0xef, 0x68, 0xcf, 0x3b, 0x85, 0x32, + 0xe7, 0x4e, 0x8a, 0x3a, 0xfe, 0x0a, 0xd5, 0xc5, 0xb0, 0x10, 0xdf, 0xd3, 0xb6, 0x2b, 0xe2, 0x9a, + 0xc8, 0x7f, 0xb5, 0x3f, 0x43, 0xae, 0x59, 0x74, 0x15, 0x39, 0x64, 0x70, 0xd4, 0xdc, 0xc0, 0x75, + 0xe4, 0x7e, 0x4f, 0x06, 0x83, 0x93, 0xa6, 0x85, 0x6b, 0xa8, 0x72, 0xf8, 0xea, 0x6c, 0xd0, 0xb4, + 0xdb, 0x7f, 0xd9, 0xa8, 0x72, 0x1c, 0xcd, 0x24, 0xfe, 0x16, 0x35, 0xa6, 0xe6, 0xb8, 0xc0, 0xde, + 0xeb, 0x33, 0xd6, 0xe8, 0x7e, 0x58, 0xf8, 0x83, 0xa4, 0x73, 0xac, 0xcf, 0xcf, 0xa9, 0x12, 0x03, + 0xa6, 0xc4, 0x9c, 0xd4, 0xa7, 0x45, 0x8c, 0x0f, 0xd0, 0xd6, 0x54, 0x9f, 0xcd, 0xe2, 0xab, 0x6d, + 0xdd, 0xfe, 0xd1, 0xcd, 0x76, 0x38, 0xaf, 0xe6, 0xb3, 0x8d, 0x41, 0x63, 0x5a, 0x66, 0x5a, 0xdf, + 0xa1, 0xed, 0x9b, 0xfe, 0xb8, 0x89, 0x9c, 0x5f, 0xe9, 0x5c, 0x63, 0x74, 0x08, 0xfc, 0xc4, 0x3b, + 0xc8, 0xbd, 0x88, 0xd2, 0x8c, 0xea, 0xeb, 0x57, 0x27, 0x26, 0x78, 0x66, 0x7f, 0x63, 0xb5, 0x4e, + 0x50, 0x73, 0xd9, 0xfe, 0x7a, 0x7f, 0xcd, 0xf4, 0x3f, 0xbc, 0xde, 0xbf, 0x0a, 0xa5, 0xf4, 0x6b, + 0xff, 0x61, 0xa1, 0xcd, 0x63, 0x39, 0x79, 0x93, 0xa8, 0xb7, 0x3f, 0x31, 0xca, 0xc7, 0xf8, 0x3e, + 0x72, 0x55, 0xa2, 0x52, 0xaa, 0xed, 0xea, 0x2f, 0x37, 0x88, 0x09, 0xb1, 0x8f, 0x3c, 0x19, 0xa5, + 0x91, 0x98, 0x6b, 0x4f, 0xe7, 0xe5, 0x06, 0xc9, 0x63, 0xdc, 0x42, 0xd5, 0xe7, 0x3c, 0x83, 0x95, + 0xe8, 0x67, 0x01, 0x7a, 0x8a, 0x04, 0xfe, 0x14, 0x6d, 0xbe, 0xe5, 0x53, 0x3a, 0x8c, 0xe2, 0x58, + 0x50, 0x29, 0xf5, 0x0b, 0x01, 0x82, 0x06, 0x64, 0x0f, 0x4c, 0xf2, 0xb0, 0x8a, 0xdc, 0x8c, 0x25, + 0x9c, 0xb5, 0x1f, 0xa1, 0x0a, 0xa1, 0x51, 0x5a, 0x7e, 0xbe, 0x65, 0xde, 0x08, 0x1d, 0x3c, 0xae, + 0xd5, 0xe2, 0xe6, 0xd5, 0xd5, 0xd5, 0x95, 0xdd, 0xbe, 0x84, 0xff, 0x08, 0x5f, 0xf2, 0x1e, 0xef, + 0xa1, 0x7a, 0x32, 0x8d, 0x26, 0x09, 0x83, 0x95, 0x19, 0x79, 0x99, 0x28, 0x5b, 0xba, 0x47, 0x68, + 0x5b, 0xd0, 0x28, 0x1d, 0xd2, 0xf7, 0x8a, 0x32, 0x99, 0x70, 0x86, 0x37, 0xcb, 0x23, 0x15, 0xa5, + 0xfe, 0x6f, 0x37, 0xcf, 0x64, 0x6e, 0x4f, 0xb6, 0xa0, 0x69, 0x50, 0xf4, 0xb4, 0xff, 0x73, 0x11, + 0xfa, 0x91, 0xf1, 0x4b, 0xf6, 0x7a, 0x3e, 0xa3, 0x12, 0x3f, 0x44, 0x76, 0xc4, 0xfc, 0x6d, 0xdd, + 0xba, 0xd3, 0x31, 0xf3, 0xa9, 0x53, 0xcc, 0xa7, 0xce, 0x01, 0x9b, 0x13, 0x3b, 0x62, 0xf8, 0x4b, + 0xe4, 0xc4, 0x99, 0xb9, 0xa5, 0x8d, 0xee, 0xee, 0x8a, 0xec, 0x28, 0x9f, 0x92, 0x04, 0x54, 0xf8, + 0x73, 0x64, 0x4b, 0xe5, 0x6f, 0x6a, 0xed, 0x83, 0x15, 0xed, 0xa9, 0x9e, 0x98, 0xc4, 0x96, 0x70, + 0xfb, 0x6d, 0x25, 0x73, 0xbe, 0xad, 0x15, 0xe1, 0xeb, 0x62, 0x78, 0x12, 0x5b, 0x49, 0xd0, 0xa6, + 0x17, 0xfe, 0x9d, 0x35, 0xda, 0x57, 0x89, 0x54, 0xbf, 0xc0, 0x0e, 0x13, 0x3b, 0xbd, 0xc0, 0x21, + 0x72, 0x2e, 0xa2, 0xd4, 0x6f, 0x6a, 0xf1, 0xfd, 0x15, 0xb1, 0x11, 0x82, 0x04, 0x77, 0x90, 0x13, + 0x8f, 0x52, 0xcd, 0xbc, 0xd1, 0xdd, 0x5b, 0xfd, 0x2e, 0xfd, 0xc8, 0xe5, 0xfa, 0x78, 0x94, 0xe2, + 0x27, 0xc8, 0x19, 0xa7, 0x4a, 0x1f, 0x01, 0xb8, 0x70, 0xcb, 0x7a, 0xfd, 0x5c, 0xe6, 0xf2, 0x71, + 0xaa, 0x40, 0x9e, 0xe4, 0xb3, 0xf5, 0x36, 0xb9, 0xbe, 0x42, 0xb9, 0x3c, 0xe9, 0xf7, 0x60, 0x35, + 0x59, 0xbf, 0xa7, 0xa7, 0xca, 0x6d, 0xab, 0x39, 0xbb, 0xae, 0xcf, 0xfa, 0x3d, 0x6d, 0xbf, 0xdf, + 0xd5, 0x43, 0x78, 0x8d, 0xfd, 0x7e, 0xb7, 0xb0, 0xdf, 0xef, 0x6a, 0xfb, 0xfd, 0xae, 0x9e, 0xcc, + 0xeb, 0xec, 0x17, 0xfa, 0x4c, 0xeb, 0x2b, 0x7a, 0x84, 0xd5, 0xd7, 0x6c, 0x3a, 0xdc, 0x61, 0x23, + 0xd7, 0x3a, 0xf0, 0x87, 0xd7, 0x08, 0xad, 0xf1, 0x37, 0x63, 0x21, 0xf7, 0x97, 0x4a, 0xe0, 0xaf, + 0x91, 0x5b, 0x0e, 0xf7, 0xdb, 0x3e, 0x40, 0x8f, 0x0b, 0xd3, 0x60, 0x94, 0xcf, 0x02, 0x54, 0x61, + 0xd1, 0x94, 0x2e, 0x1d, 0xfc, 0xdf, 0xf5, 0x0b, 0xa3, 0x2b, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, + 0xd5, 0x39, 0x32, 0x09, 0xf9, 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto new file mode 100644 index 0000000..0d2fc1f --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto @@ -0,0 +1,147 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +package jsonpb; + +// Test message for holding primitive types. +message Simple { + optional bool o_bool = 1; + optional int32 o_int32 = 2; + optional int64 o_int64 = 3; + optional uint32 o_uint32 = 4; + optional uint64 o_uint64 = 5; + optional sint32 o_sint32 = 6; + optional sint64 o_sint64 = 7; + optional float o_float = 8; + optional double o_double = 9; + optional string o_string = 10; + optional bytes o_bytes = 11; +} + +// Test message for holding special non-finites primitives. +message NonFinites { + optional float f_nan = 1; + optional float f_pinf = 2; + optional float f_ninf = 3; + optional double d_nan = 4; + optional double d_pinf = 5; + optional double d_ninf = 6; +} + +// Test message for holding repeated primitives. +message Repeats { + repeated bool r_bool = 1; + repeated int32 r_int32 = 2; + repeated int64 r_int64 = 3; + repeated uint32 r_uint32 = 4; + repeated uint64 r_uint64 = 5; + repeated sint32 r_sint32 = 6; + repeated sint64 r_sint64 = 7; + repeated float r_float = 8; + repeated double r_double = 9; + repeated string r_string = 10; + repeated bytes r_bytes = 11; +} + +// Test message for holding enums and nested messages. +message Widget { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color color = 1; + repeated Color r_color = 2; + + optional Simple simple = 10; + repeated Simple r_simple = 11; + + optional Repeats repeats = 20; + repeated Repeats r_repeats = 21; +} + +message Maps { + map m_int64_str = 1; + map m_bool_simple = 2; +} + +message MsgWithOneof { + oneof union { + string title = 1; + int64 salary = 2; + string Country = 3; + string home_address = 4; + } +} + +message Real { + optional double value = 1; + extensions 100 to max; +} + +extend Real { + optional string name = 124; +} + +message Complex { + extend Real { + optional Complex real_extension = 123; + } + optional double imaginary = 1; + extensions 100 to max; +} + +message KnownTypes { + optional google.protobuf.Any an = 14; + optional google.protobuf.Duration dur = 1; + optional google.protobuf.Struct st = 12; + optional google.protobuf.Timestamp ts = 2; + optional google.protobuf.ListValue lv = 15; + optional google.protobuf.Value val = 16; + + optional google.protobuf.DoubleValue dbl = 3; + optional google.protobuf.FloatValue flt = 4; + optional google.protobuf.Int64Value i64 = 5; + optional google.protobuf.UInt64Value u64 = 6; + optional google.protobuf.Int32Value i32 = 7; + optional google.protobuf.UInt32Value u32 = 8; + optional google.protobuf.BoolValue bool = 9; + optional google.protobuf.StringValue str = 10; + optional google.protobuf.BytesValue bytes = 11; +} diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 0000000..e2e0651 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go new file mode 100644 index 0000000..41451a4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/all_test.go @@ -0,0 +1,2278 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } +func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } +func (f *fakeMarshaler) ProtoMessage() {} +func (f *fakeMarshaler) Reset() {} + +type msgWithFakeMarshaler struct { + M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` +} + +func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } +func (m *msgWithFakeMarshaler) ProtoMessage() {} +func (m *msgWithFakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + errType reflect.Type + }{ + { + name: "Marshaler that fails", + m: &fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since the Marshal method returned bytes, they should be written to the + // buffer. (For efficiency, we assume that Marshal implementations are + // always correct w.r.t. RequiredNotSetError and output.) + want: []byte{5, 6, 7}, + errType: reflect.TypeOf(errors.New("some marshal err")), + }, + { + name: "Marshaler that fails with RequiredNotSetError", + m: &msgWithFakeMarshaler{ + M: &fakeMarshaler{ + err: &RequiredNotSetError{}, + b: []byte{5, 6, 7}, + }, + }, + // Since there's an error that can be continued after, + // the buffer should be written. + want: []byte{ + 10, 3, // for &msgWithFakeMarshaler + 5, 6, 7, // for &fakeMarshaler + }, + errType: reflect.TypeOf(&RequiredNotSetError{}), + }, + { + name: "Marshaler that succeeds", + m: &fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if reflect.TypeOf(err) != test.errType { + t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + if size := Size(test.m); size != len(b.Bytes()) { + t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) + } + + m, mErr := Marshal(test.m) + if !bytes.Equal(b.Bytes(), m) { + t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) + } + if !reflect.DeepEqual(err, mErr) { + t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", + test.name, fmt.Sprint(mErr), fmt.Sprint(err)) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + err := o.Marshal(pb) + if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + _ = fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcementGroups(t *testing.T) { + pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} + if _, err := Marshal(pb); err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { + t.Errorf("marshal: bad error type: %v", err) + } + + buf := []byte{11, 12} + if err := Unmarshal(buf, pb); err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + { + var m *GoEnum + if _, err := Marshal(m); err != ErrNil { + t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) + } + } + + { + m := &Communique{Union: &Communique_Msg{nil}} + if _, err := Marshal(m); err == nil || err == ErrNil { + t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) + } + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { + m := &MyMessage{ + Pet: []string{"turtle", "wombat"}, + } + expected := Clone(m) + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: &FloatingPoint{F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + +func TestMapFieldWithNil(t *testing.T) { + m1 := &MessageWithMap{ + MsgMapping: map[int64]*FloatingPoint{ + 1: nil, + }, + } + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.MsgMapping[1]; !ok { + t.Error("msg_mapping[1] not present") + } else if v != nil { + t.Errorf("msg_mapping[1] not nil: %v", v) + } +} + +func TestMapFieldWithNilBytes(t *testing.T) { + m1 := &MessageWithMap{ + ByteMapping: map[bool][]byte{ + false: []byte{}, + true: nil, + }, + } + n := Size(m1) + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if n != len(b) { + t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.ByteMapping[false]; !ok { + t.Error("byte_mapping[false] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[false] not empty: %#v", v) + } + if v, ok := m2.ByteMapping[true]; !ok { + t.Error("byte_mapping[true] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[true] not empty: %#v", v) + } +} + +func TestDecodeMapFieldMissingKey(t *testing.T) { + b := []byte{ + 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes + // no key + 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing key: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) + } +} + +func TestDecodeMapFieldMissingValue(t *testing.T) { + b := []byte{ + 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes + 0x08, 0x01, // varint key, value 1 + // no value + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing value: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) + } +} + +func TestOneof(t *testing.T) { + m := &Communique{} + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal of empty message with oneof: %v", err) + } + if len(b) != 0 { + t.Errorf("Marshal of empty message yielded too many bytes: %v", b) + } + + m = &Communique{ + Union: &Communique_Name{"Barry"}, + } + + // Round-trip. + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof: %v", err) + } + if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) + t.Errorf("Incorrect marshal of message with oneof: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof: %v", err) + } + if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { + t.Errorf("After round trip, Union = %+v", m.Union) + } + if name := m.GetName(); name != "Barry" { + t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") + } + + // Let's try with a message in the oneof. + m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof set to message: %v", err) + } + if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) + t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof set to message: %v", err) + } + ss, ok := m.Union.(*Communique_Msg) + if !ok || ss.Msg.GetStringField() != "deep deep string" { + t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) + } +} + +func TestInefficientPackedBool(t *testing.T) { + // https://github.com/golang/protobuf/issues/76 + inp := []byte{ + 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes + // Usually a bool should take a single byte, + // but it is permitted to be any varint. + 0xb9, 0x30, + } + if err := Unmarshal(inp, new(MoreRepeated)); err != nil { + t.Error(err) + } +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go new file mode 100644 index 0000000..1a3c22e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/any_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/proto/proto3_proto" + testpb "github.com/golang/protobuf/proto/testdata" + anypb "github.com/golang/protobuf/ptypes/any" +) + +var ( + expandedMarshaler = proto.TextMarshaler{ExpandAny: true} + expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} +) + +// anyEqual reports whether two messages which may be google.protobuf.Any or may +// contain google.protobuf.Any fields are equal. We can't use proto.Equal for +// comparison, because semantically equivalent messages may be marshaled to +// binary in different tag order. Instead, trust that TextMarshaler with +// ExpandAny option works and compare the text marshaling results. +func anyEqual(got, want proto.Message) bool { + // if messages are proto.Equal, no need to marshal. + if proto.Equal(got, want) { + return true + } + g := expandedMarshaler.Text(got) + w := expandedMarshaler.Text(want) + return g == w +} + +type golden struct { + m proto.Message + t, c string +} + +var goldenMessages = makeGolden() + +func makeGolden() []golden { + nested := &pb.Nested{Bunny: "Monty"} + nb, err := proto.Marshal(nested) + if err != nil { + panic(err) + } + m1 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m2 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m3 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, + } + m4 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, + } + m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} + + any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} + proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) + proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) + any1b, err := proto.Marshal(any1) + if err != nil { + panic(err) + } + any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} + proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) + any2b, err := proto.Marshal(any2) + if err != nil { + panic(err) + } + m6 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + ManyThings: []*anypb.Any{ + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + }, + } + + const ( + m1Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m2Golden = ` +name: "David" +result_count: 47 +anything: < + ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m3Golden = ` +name: "David" +result_count: 47 +anything: < + ["type.googleapis.com/\"/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m4Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m5Golden = ` +[type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" +> +` + m6Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 42 + bikeshed: GREEN + rep_bytes: "roboto" + [testdata.Ext.more]: < + data: "baz" + > + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +` + ) + return []golden{ + {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, + {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, + {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, + {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, + {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, + {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, + } +} + +func TestMarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { + t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) + } + if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { + t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) + } + } +} + +func TestUnmarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + want := tt.m + got := proto.Clone(tt.m) + got.Reset() + if err := proto.UnmarshalText(tt.t, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) + } + got.Reset() + if err := proto.UnmarshalText(tt.c, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) + } + } +} + +func TestMarshalUnknownAny(t *testing.T) { + m := &pb.Message{ + Anything: &anypb.Any{ + TypeUrl: "foo", + Value: []byte("bar"), + }, + } + want := `anything: < + type_url: "foo" + value: "bar" +> +` + got := expandedMarshaler.Text(m) + if got != want { + t.Errorf("got\n`%s`\nwant\n`%s`", got, want) + } +} + +func TestAmbiguousAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + type_url: "ttt/proto3_proto.Nested" + value: "\n\x05Monty" + `, pb) + t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) + if err != nil { + t.Errorf("failed to parse ambiguous Any message: %v", err) + } +} + +func TestUnmarshalOverwriteAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 7: Any message unpacked multiple times, or "type_url" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} + +func TestUnmarshalAnyMixAndMatch(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + value: "\n\x05Monty" + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 5: Any message unpacked multiple times, or "value" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 0000000..e392575 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,229 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go new file mode 100644 index 0000000..f607ff4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(3.0), + Exact: proto.Bool(true), + }, // the entire message should be overwritten + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, + // proto3 shouldn't merge zero values, + // in the same way that proto2 shouldn't merge nils. + { + src: &proto3pb.Message{ + Name: "Aaron", + Data: []byte(""), // zero value, but not nil + }, + dst: &proto3pb.Message{ + HeightInCm: 176, + Data: []byte("texas!"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + HeightInCm: 176, + Data: []byte("texas!"), + }, + }, + // Oneof fields should merge by assignment. + { + src: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + }, + // Oneof nil is the same as not set. + { + src: &pb.Communique{}, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + }, + { + src: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, // replace + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert + }, + }, + dst: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep + }, + }, + want: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, + }, + }, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 0000000..aa20729 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,970 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go new file mode 100644 index 0000000..2c4c31d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode_test.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.7 + +package proto_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + tpb "github.com/golang/protobuf/proto/proto3_proto" +) + +var ( + bytesBlackhole []byte + msgBlackhole = new(tpb.Message) +) + +// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and +// 2 bytes long). +func BenchmarkVarint32ArraySmall(b *testing.B) { + for i := uint(1); i <= 10; i++ { + dist := genInt32Dist([7]int{0, 3, 1}, 1<2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/encode_test.go b/vendor/github.com/golang/protobuf/proto/encode_test.go new file mode 100644 index 0000000..a720947 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode_test.go @@ -0,0 +1,85 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.7 + +package proto_test + +import ( + "strconv" + "testing" + + "github.com/golang/protobuf/proto" + tpb "github.com/golang/protobuf/proto/proto3_proto" + "github.com/golang/protobuf/ptypes" +) + +var ( + blackhole []byte +) + +// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the +// same. +func BenchmarkAny(b *testing.B) { + data := make([]byte, 1<<20) + quantum := 1 << 10 + for i := uint(0); i <= 10; i++ { + b.Run(strconv.Itoa(quantum<= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go new file mode 100644 index 0000000..b6d9114 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions_test.go @@ -0,0 +1,536 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" + "golang.org/x/sync/errgroup" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", err) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} + +func TestExtensionDescsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{Count: proto.Int32(0)} + extdesc1 := pb.E_Ext_More + if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { + t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) + } + + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", err) + } + extdesc2 := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 123456789, + Name: "a.b", + Tag: "varint,123456789,opt", + } + ext2 := proto.Bool(false) + if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { + t.Fatalf("Could not set ext2: %s", err) + } + + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Could not marshal msg: %v", err) + } + if err := proto.Unmarshal(b, msg); err != nil { + t.Fatalf("Could not unmarshal into msg: %v", err) + } + + descs, err := proto.ExtensionDescs(msg) + if err != nil { + t.Fatalf("proto.ExtensionDescs: got error %v", err) + } + sortExtDescs(descs) + wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}} + if !reflect.DeepEqual(descs, wantDescs) { + t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) + } +} + +type ExtensionDescSlice []*proto.ExtensionDesc + +func (s ExtensionDescSlice) Len() int { return len(s) } +func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } +func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func sortExtDescs(s []*proto.ExtensionDesc) { + sort.Sort(ExtensionDescSlice(s)) +} + +func TestGetExtensionStability(t *testing.T) { + check := func(m *pb.MyMessage) bool { + ext1, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + ext2, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + return ext1 == ext2 + } + msg := &pb.MyMessage{Count: proto.Int32(4)} + ext0 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { + t.Fatalf("Could not set ext1: %s", ext0) + } + if !check(msg) { + t.Errorf("GetExtension() not stable before marshaling") + } + bb, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Marshal() failed: %s", err) + } + msg1 := &pb.MyMessage{} + err = proto.Unmarshal(bb, msg1) + if err != nil { + t.Fatalf("Unmarshal() failed: %s", err) + } + if !check(msg1) { + t.Errorf("GetExtension() not stable after unmarshaling") + } +} + +func TestGetExtensionDefaults(t *testing.T) { + var setFloat64 float64 = 1 + var setFloat32 float32 = 2 + var setInt32 int32 = 3 + var setInt64 int64 = 4 + var setUint32 uint32 = 5 + var setUint64 uint64 = 6 + var setBool = true + var setBool2 = false + var setString = "Goodnight string" + var setBytes = []byte("Goodnight bytes") + var setEnum = pb.DefaultsMessage_TWO + + type testcase struct { + ext *proto.ExtensionDesc // Extension we are testing. + want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). + def interface{} // Expected value of extension after ClearExtension(). + } + tests := []testcase{ + {pb.E_NoDefaultDouble, setFloat64, nil}, + {pb.E_NoDefaultFloat, setFloat32, nil}, + {pb.E_NoDefaultInt32, setInt32, nil}, + {pb.E_NoDefaultInt64, setInt64, nil}, + {pb.E_NoDefaultUint32, setUint32, nil}, + {pb.E_NoDefaultUint64, setUint64, nil}, + {pb.E_NoDefaultSint32, setInt32, nil}, + {pb.E_NoDefaultSint64, setInt64, nil}, + {pb.E_NoDefaultFixed32, setUint32, nil}, + {pb.E_NoDefaultFixed64, setUint64, nil}, + {pb.E_NoDefaultSfixed32, setInt32, nil}, + {pb.E_NoDefaultSfixed64, setInt64, nil}, + {pb.E_NoDefaultBool, setBool, nil}, + {pb.E_NoDefaultBool, setBool2, nil}, + {pb.E_NoDefaultString, setString, nil}, + {pb.E_NoDefaultBytes, setBytes, nil}, + {pb.E_NoDefaultEnum, setEnum, nil}, + {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, + {pb.E_DefaultFloat, setFloat32, float32(3.14)}, + {pb.E_DefaultInt32, setInt32, int32(42)}, + {pb.E_DefaultInt64, setInt64, int64(43)}, + {pb.E_DefaultUint32, setUint32, uint32(44)}, + {pb.E_DefaultUint64, setUint64, uint64(45)}, + {pb.E_DefaultSint32, setInt32, int32(46)}, + {pb.E_DefaultSint64, setInt64, int64(47)}, + {pb.E_DefaultFixed32, setUint32, uint32(48)}, + {pb.E_DefaultFixed64, setUint64, uint64(49)}, + {pb.E_DefaultSfixed32, setInt32, int32(50)}, + {pb.E_DefaultSfixed64, setInt64, int64(51)}, + {pb.E_DefaultBool, setBool, true}, + {pb.E_DefaultBool, setBool2, true}, + {pb.E_DefaultString, setString, "Hello, string"}, + {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, + {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, + } + + checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { + val, err := proto.GetExtension(msg, test.ext) + if err != nil { + if valWant != nil { + return fmt.Errorf("GetExtension(): %s", err) + } + if want := proto.ErrMissingExtension; err != want { + return fmt.Errorf("Unexpected error: got %v, want %v", err, want) + } + return nil + } + + // All proto2 extension values are either a pointer to a value or a slice of values. + ty := reflect.TypeOf(val) + tyWant := reflect.TypeOf(test.ext.ExtensionType) + if got, want := ty, tyWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) + } + tye := ty.Elem() + tyeWant := tyWant.Elem() + if got, want := tye, tyeWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) + } + + // Check the name of the type of the value. + // If it is an enum it will be type int32 with the name of the enum. + if got, want := tye.Name(), tye.Name(); got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) + } + + // Check that value is what we expect. + // If we have a pointer in val, get the value it points to. + valExp := val + if ty.Kind() == reflect.Ptr { + valExp = reflect.ValueOf(val).Elem().Interface() + } + if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { + return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) + } + + return nil + } + + setTo := func(test testcase) interface{} { + setTo := reflect.ValueOf(test.want) + if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { + setTo = reflect.New(typ).Elem() + setTo.Set(reflect.New(setTo.Type().Elem())) + setTo.Elem().Set(reflect.ValueOf(test.want)) + } + return setTo.Interface() + } + + for _, test := range tests { + msg := &pb.DefaultsMessage{} + name := test.ext.Name + + // Check the initial value. + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + + // Set the per-type value and check value. + name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) + if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { + t.Errorf("%s: SetExtension(): %v", name, err) + continue + } + if err := checkVal(test, msg, test.want); err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Set and check the value. + name += " (cleared)" + proto.ClearExtension(msg, test.ext) + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + } +} + +func TestExtensionsRoundTrip(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{ + Data: proto.String("hi"), + } + ext2 := &pb.Ext{ + Data: proto.String("there"), + } + exists := proto.HasExtension(msg, pb.E_Ext_More) + if exists { + t.Error("Extension More present unexpectedly") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Error(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { + t.Error(err) + } + e, err := proto.GetExtension(msg, pb.E_Ext_More) + if err != nil { + t.Error(err) + } + x, ok := e.(*pb.Ext) + if !ok { + t.Errorf("e has type %T, expected testdata.Ext", e) + } else if *x.Data != "there" { + t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) + } + proto.ClearExtension(msg, pb.E_Ext_More) + if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { + t.Errorf("got %v, expected ErrMissingExtension", e) + } + if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { + t.Error("expected bad extension error, got nil") + } + if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { + t.Error("expected extension err") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { + t.Error("expected some sort of type mismatch error, got nil") + } +} + +func TestNilExtension(t *testing.T) { + msg := &pb.MyMessage{ + Count: proto.Int32(1), + } + if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { + t.Fatal(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { + t.Error("expected SetExtension to fail due to a nil extension") + } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { + t.Errorf("expected error %v, got %v", want, err) + } + // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update + // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. +} + +func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { + // Add a repeated extension to the result. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + // Marshal message with a repeated extension. + msg1 := new(pb.OtherMessage) + err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) + if err != nil { + t.Fatalf("[%s] Error setting extension: %v", test.name, err) + } + b, err := proto.Marshal(msg1) + if err != nil { + t.Fatalf("[%s] Error marshaling message: %v", test.name, err) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err = proto.Unmarshal(b, msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_RComplex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.([]*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(ext, test.ext) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) + } + } +} + +func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { + // We may see multiple instances of the same extension in the wire + // format. For example, the proto compiler may encode custom options in + // this way. Here, we verify that we merge the extensions together. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + var buf bytes.Buffer + var want pb.ComplexExtension + + // Generate a serialized representation of a repeated extension + // by catenating bytes together. + for i, e := range test.ext { + // Merge to create the wanted proto. + proto.Merge(&want, e) + + // serialize the message + msg := new(pb.OtherMessage) + err := proto.SetExtension(msg, pb.E_Complex, e) + if err != nil { + t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) + } + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) + } + buf.Write(b) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err := proto.Unmarshal(buf.Bytes(), msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_Complex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.(*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(*ext, want) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want) + } + } +} + +func TestClearAllExtensions(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + m := &pb.MyMessage{} + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + if !proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) + } + proto.ClearAllExtensions(m) + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } +} + +func TestMarshalRace(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + + m := &pb.MyMessage{Count: proto.Int32(4)} + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + + var g errgroup.Group + for n := 3; n > 0; n-- { + g.Go(func() error { + _, err := proto.Marshal(m) + return err + }) + } + if err := g.Wait(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 0000000..1c22550 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,897 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go new file mode 100644 index 0000000..313e879 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/map_test.go @@ -0,0 +1,46 @@ +package proto_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + ppb "github.com/golang/protobuf/proto/proto3_proto" +) + +func marshalled() []byte { + m := &ppb.IntMaps{} + for i := 0; i < 1000; i++ { + m.Maps = append(m.Maps, &ppb.IntMap{ + Rtt: map[int32]int32{1: 2}, + }) + } + b, err := proto.Marshal(m) + if err != nil { + panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) + } + return b +} + +func BenchmarkConcurrentMapUnmarshal(b *testing.B) { + in := marshalled() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } + }) +} + +func BenchmarkSequentialMapUnmarshal(b *testing.B) { + in := marshalled() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 0000000..fd982de --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go new file mode 100644 index 0000000..353a3ea --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set_test.go @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &messageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + var extensions XXX_InternalExtensions + if err := UnmarshalMessageSet(b, &extensions); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := extensions.p.extensionMap[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000..fb512e2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000..6b5567d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 0000000..ec2289c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,872 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 0000000..cc4d048 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap + IntMap + IntMaps +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} +func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"` + ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` + ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` + Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"` + Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Message) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Message) GetHilarity() Message_Humour { + if m != nil { + return m.Hilarity + } + return Message_UNKNOWN +} + +func (m *Message) GetHeightInCm() uint32 { + if m != nil { + return m.HeightInCm + } + return 0 +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Message) GetResultCount() int64 { + if m != nil { + return m.ResultCount + } + return 0 +} + +func (m *Message) GetTrueScotsman() bool { + if m != nil { + return m.TrueScotsman + } + return false +} + +func (m *Message) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *Message) GetKey() []uint64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Message) GetShortKey() []int32 { + if m != nil { + return m.ShortKey + } + return nil +} + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetRFunny() []Message_Humour { + if m != nil { + return m.RFunny + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +func (m *Message) GetAnything() *google_protobuf.Any { + if m != nil { + return m.Anything + } + return nil +} + +func (m *Message) GetManyThings() []*google_protobuf.Any { + if m != nil { + return m.ManyThings + } + return nil +} + +func (m *Message) GetSubmessage() *Message { + if m != nil { + return m.Submessage + } + return nil +} + +func (m *Message) GetChildren() []*Message { + if m != nil { + return m.Children + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` + Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Nested) GetBunny() string { + if m != nil { + return m.Bunny + } + return "" +} + +func (m *Nested) GetCute() bool { + if m != nil { + return m.Cute + } + return false +} + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +type IntMap struct { + Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *IntMap) Reset() { *m = IntMap{} } +func (m *IntMap) String() string { return proto.CompactTextString(m) } +func (*IntMap) ProtoMessage() {} +func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *IntMap) GetRtt() map[int32]int32 { + if m != nil { + return m.Rtt + } + return nil +} + +type IntMaps struct { + Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"` +} + +func (m *IntMaps) Reset() { *m = IntMaps{} } +func (m *IntMaps) String() string { return proto.CompactTextString(m) } +func (*IntMaps) ProtoMessage() {} +func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *IntMaps) GetMaps() []*IntMap { + if m != nil { + return m.Maps + } + return nil +} + +func init() { + proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") + proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") + proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap") + proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps") + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} + +func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 733 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34, + 0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a, + 0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98, + 0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f, + 0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3, + 0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8, + 0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef, + 0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7, + 0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35, + 0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc, + 0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5, + 0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c, + 0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58, + 0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61, + 0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8, + 0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71, + 0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a, + 0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88, + 0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35, + 0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63, + 0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a, + 0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78, + 0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2, + 0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62, + 0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f, + 0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11, + 0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97, + 0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76, + 0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8, + 0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19, + 0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3, + 0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0, + 0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d, + 0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1, + 0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9, + 0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c, + 0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca, + 0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51, + 0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0, + 0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7, + 0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05, + 0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09, + 0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47, + 0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91, + 0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde, + 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto new file mode 100644 index 0000000..2048655 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -0,0 +1,87 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "testdata/test.proto"; + +package proto3_proto; + +message Message { + enum Humour { + UNKNOWN = 0; + PUNS = 1; + SLAPSTICK = 2; + BILL_BAILEY = 3; + } + + string name = 1; + Humour hilarity = 2; + uint32 height_in_cm = 3; + bytes data = 4; + int64 result_count = 7; + bool true_scotsman = 8; + float score = 9; + + repeated uint64 key = 5; + repeated int32 short_key = 19; + Nested nested = 6; + repeated Humour r_funny = 16; + + map terrain = 10; + testdata.SubDefaults proto2_field = 11; + map proto2_value = 13; + + google.protobuf.Any anything = 14; + repeated google.protobuf.Any many_things = 15; + + Message submessage = 17; + repeated Message children = 18; +} + +message Nested { + string bunny = 1; + bool cute = 2; +} + +message MessageWithMap { + map byte_mapping = 1; +} + + +message IntMap { + map rtt = 1; +} + +message IntMaps { + repeated IntMap maps = 1; +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go new file mode 100644 index 0000000..735837f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_test.go @@ -0,0 +1,135 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/proto3_proto" + tpb "github.com/golang/protobuf/proto/testdata" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} + +func TestGettersForBasicTypesExist(t *testing.T) { + var m pb.Message + if got := m.GetNested().GetBunny(); got != "" { + t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) + } + if got := m.GetNested().GetCute(); got { + t.Errorf("m.GetNested().GetCute() = %t, want false", got) + } +} + +func TestProto3SetDefaults(t *testing.T) { + in := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: new(tpb.SubDefaults), + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": new(tpb.SubDefaults), + }, + } + + got := proto.Clone(in).(*pb.Message) + proto.SetDefaults(got) + + // There are no defaults in proto3. Everything should be the zero value, but + // we need to remember to set defaults for nested proto2 messages. + want := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, + }, + } + + if !proto.Equal(got, want) { + t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go new file mode 100644 index 0000000..a2729c3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go new file mode 100644 index 0000000..af1034d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/size_test.go @@ -0,0 +1,164 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "strings" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, + {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, + + {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, + {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, + {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, + + {"oneof not set", &pb.Oneof{}}, + {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}}, + {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}}, + {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}}, + {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}}, + {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}}, + {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}}, + {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}}, + {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}}, + {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}}, + {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}}, + {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}}, + {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}}, + {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}}, + {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}}, + {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}}, + {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, + {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}}, + {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, + {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, + {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/Makefile b/vendor/github.com/golang/protobuf/proto/testdata/Makefile new file mode 100644 index 0000000..fc28862 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/Makefile @@ -0,0 +1,50 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +include ../../Make.protobuf + +all: regenerate + +regenerate: + rm -f test.pb.go + make test.pb.go + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + git diff test.pb.go + +restore: + cp test.pb.go.golden test.pb.go + +preserve: + cp test.pb.go test.pb.go.golden diff --git a/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go b/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go new file mode 100644 index 0000000..7172d0e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go new file mode 100644 index 0000000..e980d1a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go @@ -0,0 +1,4147 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: test.proto + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoTestRequiredGroupField + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + RequiredInnerMessage + MyMessage + Ext + ComplexExtension + DefaultsMessage + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint + MessageWithMap + Oneof + Communique +*/ +package testdata + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} +func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} +func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} +func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} +func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{16, 0} +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} +func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} } + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} +func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} } + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} +func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} +func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} +func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} +func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} +func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} } + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing a group containing a required field. +type GoTestRequiredGroupField struct { + Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} } +func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField) ProtoMessage() {} +func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group { + if m != nil { + return m.Group + } + return nil +} + +type GoTestRequiredGroupField_Group struct { + Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} } +func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField_Group) ProtoMessage() {} +func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 0} +} + +func (m *GoTestRequiredGroupField_Group) GetField() int32 { + if m != nil && m.Field != nil { + return *m.Field + } + return 0 +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} +func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} +func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} +func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} +func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} +func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} +func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} +func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} +func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} +func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} +func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_OtherMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherMessage +} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type RequiredInnerMessage struct { + LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } +func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } +func (*RequiredInnerMessage) ProtoMessage() {} +func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { + if m != nil { + return m.LeoFinallyWonAnOscar + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} +func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { + if m != nil { + return m.WeMustGoDeeper + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} +func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} +func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", + Filename: "test.proto", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", + Filename: "test.proto", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", + Filename: "test.proto", +} + +type ComplexExtension struct { + First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` + Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` + Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } +func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } +func (*ComplexExtension) ProtoMessage() {} +func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *ComplexExtension) GetFirst() int32 { + if m != nil && m.First != nil { + return *m.First + } + return 0 +} + +func (m *ComplexExtension) GetSecond() int32 { + if m != nil && m.Second != nil { + return *m.Second + } + return 0 +} + +func (m *ComplexExtension) GetThird() []int32 { + if m != nil { + return m.Third + } + return nil +} + +type DefaultsMessage struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} +func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} + +type MyMessageSet struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} +func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} +func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} +func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} +func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} +func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} +func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} +func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} +func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} +func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} +func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} } + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} +func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} +func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} } + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} +func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +func (m *FloatingPoint) GetExact() bool { + if m != nil && m.Exact != nil { + return *m.Exact + } + return false +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +type Oneof struct { + // Types that are valid to be assigned to Union: + // *Oneof_F_Bool + // *Oneof_F_Int32 + // *Oneof_F_Int64 + // *Oneof_F_Fixed32 + // *Oneof_F_Fixed64 + // *Oneof_F_Uint32 + // *Oneof_F_Uint64 + // *Oneof_F_Float + // *Oneof_F_Double + // *Oneof_F_String + // *Oneof_F_Bytes + // *Oneof_F_Sint32 + // *Oneof_F_Sint64 + // *Oneof_F_Enum + // *Oneof_F_Message + // *Oneof_FGroup + // *Oneof_F_Largest_Tag + Union isOneof_Union `protobuf_oneof:"union"` + // Types that are valid to be assigned to Tormato: + // *Oneof_Value + Tormato isOneof_Tormato `protobuf_oneof:"tormato"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof) Reset() { *m = Oneof{} } +func (m *Oneof) String() string { return proto.CompactTextString(m) } +func (*Oneof) ProtoMessage() {} +func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type isOneof_Union interface { + isOneof_Union() +} +type isOneof_Tormato interface { + isOneof_Tormato() +} + +type Oneof_F_Bool struct { + F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"` +} +type Oneof_F_Int32 struct { + F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"` +} +type Oneof_F_Int64 struct { + F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"` +} +type Oneof_F_Fixed32 struct { + F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"` +} +type Oneof_F_Fixed64 struct { + F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"` +} +type Oneof_F_Uint32 struct { + F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"` +} +type Oneof_F_Uint64 struct { + F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"` +} +type Oneof_F_Float struct { + F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"` +} +type Oneof_F_Double struct { + F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"` +} +type Oneof_F_String struct { + F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"` +} +type Oneof_F_Bytes struct { + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"` +} +type Oneof_F_Sint32 struct { + F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"` +} +type Oneof_F_Sint64 struct { + F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"` +} +type Oneof_F_Enum struct { + F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.MyMessage_Color,oneof"` +} +type Oneof_F_Message struct { + F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"` +} +type Oneof_FGroup struct { + FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` +} +type Oneof_F_Largest_Tag struct { + F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"` +} +type Oneof_Value struct { + Value int32 `protobuf:"varint,100,opt,name=value,oneof"` +} + +func (*Oneof_F_Bool) isOneof_Union() {} +func (*Oneof_F_Int32) isOneof_Union() {} +func (*Oneof_F_Int64) isOneof_Union() {} +func (*Oneof_F_Fixed32) isOneof_Union() {} +func (*Oneof_F_Fixed64) isOneof_Union() {} +func (*Oneof_F_Uint32) isOneof_Union() {} +func (*Oneof_F_Uint64) isOneof_Union() {} +func (*Oneof_F_Float) isOneof_Union() {} +func (*Oneof_F_Double) isOneof_Union() {} +func (*Oneof_F_String) isOneof_Union() {} +func (*Oneof_F_Bytes) isOneof_Union() {} +func (*Oneof_F_Sint32) isOneof_Union() {} +func (*Oneof_F_Sint64) isOneof_Union() {} +func (*Oneof_F_Enum) isOneof_Union() {} +func (*Oneof_F_Message) isOneof_Union() {} +func (*Oneof_FGroup) isOneof_Union() {} +func (*Oneof_F_Largest_Tag) isOneof_Union() {} +func (*Oneof_Value) isOneof_Tormato() {} + +func (m *Oneof) GetUnion() isOneof_Union { + if m != nil { + return m.Union + } + return nil +} +func (m *Oneof) GetTormato() isOneof_Tormato { + if m != nil { + return m.Tormato + } + return nil +} + +func (m *Oneof) GetF_Bool() bool { + if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { + return x.F_Bool + } + return false +} + +func (m *Oneof) GetF_Int32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { + return x.F_Int32 + } + return 0 +} + +func (m *Oneof) GetF_Int64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { + return x.F_Int64 + } + return 0 +} + +func (m *Oneof) GetF_Fixed32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { + return x.F_Fixed32 + } + return 0 +} + +func (m *Oneof) GetF_Fixed64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { + return x.F_Fixed64 + } + return 0 +} + +func (m *Oneof) GetF_Uint32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { + return x.F_Uint32 + } + return 0 +} + +func (m *Oneof) GetF_Uint64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { + return x.F_Uint64 + } + return 0 +} + +func (m *Oneof) GetF_Float() float32 { + if x, ok := m.GetUnion().(*Oneof_F_Float); ok { + return x.F_Float + } + return 0 +} + +func (m *Oneof) GetF_Double() float64 { + if x, ok := m.GetUnion().(*Oneof_F_Double); ok { + return x.F_Double + } + return 0 +} + +func (m *Oneof) GetF_String() string { + if x, ok := m.GetUnion().(*Oneof_F_String); ok { + return x.F_String + } + return "" +} + +func (m *Oneof) GetF_Bytes() []byte { + if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { + return x.F_Bytes + } + return nil +} + +func (m *Oneof) GetF_Sint32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { + return x.F_Sint32 + } + return 0 +} + +func (m *Oneof) GetF_Sint64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { + return x.F_Sint64 + } + return 0 +} + +func (m *Oneof) GetF_Enum() MyMessage_Color { + if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { + return x.F_Enum + } + return MyMessage_RED +} + +func (m *Oneof) GetF_Message() *GoTestField { + if x, ok := m.GetUnion().(*Oneof_F_Message); ok { + return x.F_Message + } + return nil +} + +func (m *Oneof) GetFGroup() *Oneof_F_Group { + if x, ok := m.GetUnion().(*Oneof_FGroup); ok { + return x.FGroup + } + return nil +} + +func (m *Oneof) GetF_Largest_Tag() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { + return x.F_Largest_Tag + } + return 0 +} + +func (m *Oneof) GetValue() int32 { + if x, ok := m.GetTormato().(*Oneof_Value); ok { + return x.Value + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ + (*Oneof_F_Bool)(nil), + (*Oneof_F_Int32)(nil), + (*Oneof_F_Int64)(nil), + (*Oneof_F_Fixed32)(nil), + (*Oneof_F_Fixed64)(nil), + (*Oneof_F_Uint32)(nil), + (*Oneof_F_Uint64)(nil), + (*Oneof_F_Float)(nil), + (*Oneof_F_Double)(nil), + (*Oneof_F_String)(nil), + (*Oneof_F_Bytes)(nil), + (*Oneof_F_Sint32)(nil), + (*Oneof_F_Sint64)(nil), + (*Oneof_F_Enum)(nil), + (*Oneof_F_Message)(nil), + (*Oneof_FGroup)(nil), + (*Oneof_F_Largest_Tag)(nil), + (*Oneof_Value)(nil), + } +} + +func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + t := uint64(0) + if x.F_Bool { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Oneof_F_Int32: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + b.EncodeVarint(4<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(x.F_Fixed32)) + case *Oneof_F_Fixed64: + b.EncodeVarint(5<<3 | proto.WireFixed64) + b.EncodeFixed64(uint64(x.F_Fixed64)) + case *Oneof_F_Uint32: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + b.EncodeVarint(7<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + b.EncodeVarint(8<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) + case *Oneof_F_Double: + b.EncodeVarint(9<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.F_Double)) + case *Oneof_F_String: + b.EncodeVarint(10<<3 | proto.WireBytes) + b.EncodeStringBytes(x.F_String) + case *Oneof_F_Bytes: + b.EncodeVarint(11<<3 | proto.WireBytes) + b.EncodeRawBytes(x.F_Bytes) + case *Oneof_F_Sint32: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.F_Sint32)) + case *Oneof_F_Sint64: + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeZigzag64(uint64(x.F_Sint64)) + case *Oneof_F_Enum: + b.EncodeVarint(14<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.F_Message); err != nil { + return err + } + case *Oneof_FGroup: + b.EncodeVarint(16<<3 | proto.WireStartGroup) + if err := b.Marshal(x.FGroup); err != nil { + return err + } + b.EncodeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + b.EncodeVarint(536870911<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + return fmt.Errorf("Oneof.Union has unexpected type %T", x) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + b.EncodeVarint(100<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Value)) + case nil: + default: + return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) + } + return nil +} + +func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Oneof) + switch tag { + case 1: // union.F_Bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Bool{x != 0} + return true, err + case 2: // union.F_Int32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int32{int32(x)} + return true, err + case 3: // union.F_Int64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int64{int64(x)} + return true, err + case 4: // union.F_Fixed32 + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Fixed32{uint32(x)} + return true, err + case 5: // union.F_Fixed64 + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Fixed64{x} + return true, err + case 6: // union.F_Uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint32{uint32(x)} + return true, err + case 7: // union.F_Uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint64{x} + return true, err + case 8: // union.F_Float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} + return true, err + case 9: // union.F_Double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Double{math.Float64frombits(x)} + return true, err + case 10: // union.F_String + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Oneof_F_String{x} + return true, err + case 11: // union.F_Bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Oneof_F_Bytes{x} + return true, err + case 12: // union.F_Sint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Oneof_F_Sint32{int32(x)} + return true, err + case 13: // union.F_Sint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag64() + m.Union = &Oneof_F_Sint64{int64(x)} + return true, err + case 14: // union.F_Enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Enum{MyMessage_Color(x)} + return true, err + case 15: // union.F_Message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GoTestField) + err := b.DecodeMessage(msg) + m.Union = &Oneof_F_Message{msg} + return true, err + case 16: // union.f_group + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Oneof_F_Group) + err := b.DecodeGroup(msg) + m.Union = &Oneof_FGroup{msg} + return true, err + case 536870911: // union.F_Largest_Tag + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Largest_Tag{int32(x)} + return true, err + case 100: // tormato.value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Tormato = &Oneof_Value{int32(x)} + return true, err + default: + return false, nil + } +} + +func _Oneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Oneof_F_Int32: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + n += proto.SizeVarint(4<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Fixed64: + n += proto.SizeVarint(5<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_Uint32: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + n += proto.SizeVarint(7<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + n += proto.SizeVarint(8<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Double: + n += proto.SizeVarint(9<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_String: + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_String))) + n += len(x.F_String) + case *Oneof_F_Bytes: + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_Bytes))) + n += len(x.F_Bytes) + case *Oneof_F_Sint32: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) + case *Oneof_F_Sint64: + n += proto.SizeVarint(13<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) + case *Oneof_F_Enum: + n += proto.SizeVarint(14<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + s := proto.Size(x.F_Message) + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Oneof_FGroup: + n += proto.SizeVarint(16<<3 | proto.WireStartGroup) + n += proto.Size(x.FGroup) + n += proto.SizeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + n += proto.SizeVarint(536870911<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + n += proto.SizeVarint(100<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Value)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oneof_F_Group struct { + X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } +func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } +func (*Oneof_F_Group) ProtoMessage() {} +func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29, 0} } + +func (m *Oneof_F_Group) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Col + // *Communique_Msg + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Col struct { + Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` +} +type Communique_Msg struct { + Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Col) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetCol() MyMessage_Color { + if x, ok := m.GetUnion().(*Communique_Col); ok { + return x.Col + } + return MyMessage_RED +} + +func (m *Communique) GetMsg() *Strings { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Col)(nil), + (*Communique_Msg)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Col: + b.EncodeVarint(9<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Col)) + case *Communique_Msg: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.col + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Col{MyMessage_Color(x)} + return true, err + case 10: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Strings) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Col: + n += proto.SizeVarint(9<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Col)) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", + Filename: "test.proto", +} + +var E_Complex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: (*ComplexExtension)(nil), + Field: 200, + Name: "testdata.complex", + Tag: "bytes,200,opt,name=complex", + Filename: "test.proto", +} + +var E_RComplex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: ([]*ComplexExtension)(nil), + Field: 201, + Name: "testdata.r_complex", + Tag: "bytes,201,rep,name=r_complex,json=rComplex", + Filename: "test.proto", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "testdata.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", + Filename: "test.proto", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "testdata.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", + Filename: "test.proto", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "testdata.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", + Filename: "test.proto", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "testdata.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", + Filename: "test.proto", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "testdata.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", + Filename: "test.proto", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "testdata.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", + Filename: "test.proto", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "testdata.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", + Filename: "test.proto", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "testdata.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", + Filename: "test.proto", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "testdata.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", + Filename: "test.proto", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "testdata.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", + Filename: "test.proto", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "testdata.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", + Filename: "test.proto", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "testdata.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", + Filename: "test.proto", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "testdata.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", + Filename: "test.proto", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "testdata.no_default_string", + Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", + Filename: "test.proto", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "testdata.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", + Filename: "test.proto", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "testdata.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum", + Filename: "test.proto", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "testdata.default_double", + Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", + Filename: "test.proto", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "testdata.default_float", + Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", + Filename: "test.proto", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "testdata.default_int32", + Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", + Filename: "test.proto", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "testdata.default_int64", + Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", + Filename: "test.proto", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "testdata.default_uint32", + Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", + Filename: "test.proto", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "testdata.default_uint64", + Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", + Filename: "test.proto", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "testdata.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", + Filename: "test.proto", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "testdata.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", + Filename: "test.proto", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "testdata.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", + Filename: "test.proto", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "testdata.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", + Filename: "test.proto", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "testdata.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", + Filename: "test.proto", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "testdata.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", + Filename: "test.proto", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "testdata.default_bool", + Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", + Filename: "test.proto", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "testdata.default_string", + Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string", + Filename: "test.proto", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "testdata.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", + Filename: "test.proto", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "testdata.default_enum", + Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", + Filename: "test.proto", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", + Filename: "test.proto", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", + Filename: "test.proto", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", + Filename: "test.proto", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", + Filename: "test.proto", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", + Filename: "test.proto", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", + Filename: "test.proto", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", + Filename: "test.proto", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", + Filename: "test.proto", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", + Filename: "test.proto", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", + Filename: "test.proto", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", + Filename: "test.proto", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", + Filename: "test.proto", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", + Filename: "test.proto", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", + Filename: "test.proto", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", + Filename: "test.proto", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", + Filename: "test.proto", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", + Filename: "test.proto", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", + Filename: "test.proto", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", + Filename: "test.proto", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", + Filename: "test.proto", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", + Filename: "test.proto", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", + Filename: "test.proto", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", + Filename: "test.proto", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", + Filename: "test.proto", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", + Filename: "test.proto", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", + Filename: "test.proto", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", + Filename: "test.proto", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", + Filename: "test.proto", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", + Filename: "test.proto", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", + Filename: "test.proto", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", + Filename: "test.proto", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", + Filename: "test.proto", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", + Filename: "test.proto", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", + Filename: "test.proto", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", + Filename: "test.proto", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", + Filename: "test.proto", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", + Filename: "test.proto", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", + Filename: "test.proto", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", + Filename: "test.proto", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", + Filename: "test.proto", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", + Filename: "test.proto", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", + Filename: "test.proto", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", + Filename: "test.proto", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", + Filename: "test.proto", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", + Filename: "test.proto", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", + Filename: "test.proto", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", + Filename: "test.proto", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", + Filename: "test.proto", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", + Filename: "test.proto", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", + Filename: "test.proto", +} + +func init() { + proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum") + proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField") + proto.RegisterType((*GoTest)(nil), "testdata.GoTest") + proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup") + proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup") + proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup") + proto.RegisterType((*GoTestRequiredGroupField)(nil), "testdata.GoTestRequiredGroupField") + proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "testdata.GoTestRequiredGroupField.Group") + proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest") + proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup") + proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest") + proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest") + proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag") + proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage") + proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested") + proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage") + proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested") + proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage") + proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage") + proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage") + proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage") + proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup") + proto.RegisterType((*Ext)(nil), "testdata.Ext") + proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension") + proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage") + proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet") + proto.RegisterType((*Empty)(nil), "testdata.Empty") + proto.RegisterType((*MessageList)(nil), "testdata.MessageList") + proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message") + proto.RegisterType((*Strings)(nil), "testdata.Strings") + proto.RegisterType((*Defaults)(nil), "testdata.Defaults") + proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults") + proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum") + proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated") + proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld") + proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G") + proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew") + proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G") + proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint") + proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap") + proto.RegisterType((*Oneof)(nil), "testdata.Oneof") + proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group") + proto.RegisterType((*Communique)(nil), "testdata.Communique") + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_Complex) + proto.RegisterExtension(E_RComplex) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 4453 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xc9, 0x77, 0xdb, 0x48, + 0x7a, 0x37, 0xc0, 0xfd, 0x23, 0x25, 0x42, 0x65, 0xb5, 0x9b, 0x96, 0xbc, 0xc0, 0x9c, 0xe9, 0x6e, + 0x7a, 0xd3, 0x48, 0x20, 0x44, 0xdb, 0x74, 0xa7, 0xdf, 0xf3, 0x42, 0xca, 0x7a, 0x63, 0x89, 0x0a, + 0xa4, 0xee, 0x7e, 0xd3, 0x39, 0xf0, 0x51, 0x22, 0x44, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52, + 0x72, 0xe9, 0x4b, 0x72, 0xcd, 0x76, 0xc9, 0x35, 0xa7, 0x9c, 0x92, 0xbc, 0x97, 0x7f, 0x22, 0xe9, + 0xee, 0x59, 0x7b, 0xd6, 0xac, 0x93, 0x7d, 0x99, 0xec, 0xdb, 0x4c, 0x92, 0x4b, 0xcf, 0xab, 0xaf, + 0x0a, 0x40, 0x01, 0x24, 0x20, 0xf9, 0x24, 0x56, 0xd5, 0xef, 0xf7, 0xd5, 0xf6, 0xab, 0xef, 0xab, + 0xaf, 0x20, 0x00, 0xc7, 0x9c, 0x38, 0x2b, 0xa3, 0xb1, 0xed, 0xd8, 0x24, 0x4b, 0x7f, 0x77, 0x3b, + 0x4e, 0xa7, 0x7c, 0x1d, 0xd2, 0x1b, 0x76, 0xc3, 0x3a, 0x1a, 0x92, 0xab, 0x90, 0x38, 0xb4, 0xed, + 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x34, 0x5b, 0x2d, 0x83, 0xb6, 0x94, + 0xef, 0x40, 0x7e, 0xc3, 0xde, 0x33, 0x27, 0x4e, 0xb3, 0x6f, 0x0e, 0xba, 0x64, 0x11, 0x52, 0x4f, + 0x3b, 0xfb, 0xe6, 0x00, 0x19, 0x39, 0x83, 0x15, 0x08, 0x81, 0xe4, 0xde, 0xc9, 0xc8, 0x2c, 0xc9, + 0x58, 0x89, 0xbf, 0xcb, 0xbf, 0x72, 0x85, 0x76, 0x42, 0x99, 0xe4, 0x3a, 0x24, 0xbf, 0xdc, 0xb7, + 0xba, 0xbc, 0x97, 0xd7, 0xfc, 0x5e, 0x58, 0xfb, 0xca, 0x97, 0x37, 0xb7, 0x1f, 0x1b, 0x08, 0xa1, + 0xf6, 0xf7, 0x3a, 0xfb, 0x03, 0x6a, 0x4a, 0xa2, 0xf6, 0xb1, 0x40, 0x6b, 0x77, 0x3a, 0xe3, 0xce, + 0xb0, 0x94, 0x50, 0xa5, 0x4a, 0xca, 0x60, 0x05, 0x72, 0x1f, 0xe6, 0x0c, 0xf3, 0xc5, 0x51, 0x7f, + 0x6c, 0x76, 0x71, 0x70, 0xa5, 0xa4, 0x2a, 0x57, 0xf2, 0xd3, 0xf6, 0xb1, 0xd1, 0x08, 0x62, 0x19, + 0x79, 0x64, 0x76, 0x1c, 0x97, 0x9c, 0x52, 0x13, 0xb1, 0x64, 0x01, 0x4b, 0xc9, 0xad, 0x91, 0xd3, + 0xb7, 0xad, 0xce, 0x80, 0x91, 0xd3, 0xaa, 0x14, 0x43, 0x0e, 0x60, 0xc9, 0x9b, 0x50, 0x6c, 0xb6, + 0x1f, 0xda, 0xf6, 0xa0, 0x3d, 0xe6, 0x23, 0x2a, 0x81, 0x2a, 0x57, 0xb2, 0xc6, 0x5c, 0x93, 0xd6, + 0xba, 0xc3, 0x24, 0x15, 0x50, 0x9a, 0xed, 0x4d, 0xcb, 0xa9, 0x6a, 0x3e, 0x30, 0xaf, 0xca, 0x95, + 0x94, 0x31, 0xdf, 0xc4, 0xea, 0x29, 0x64, 0x4d, 0xf7, 0x91, 0x05, 0x55, 0xae, 0x24, 0x18, 0xb2, + 0xa6, 0x7b, 0xc8, 0x5b, 0x40, 0x9a, 0xed, 0x66, 0xff, 0xd8, 0xec, 0x8a, 0x56, 0xe7, 0x54, 0xb9, + 0x92, 0x31, 0x94, 0x26, 0x6f, 0x98, 0x81, 0x16, 0x2d, 0xcf, 0xab, 0x72, 0x25, 0xed, 0xa2, 0x05, + 0xdb, 0x37, 0x60, 0xa1, 0xd9, 0x7e, 0xb7, 0x1f, 0x1c, 0x70, 0x51, 0x95, 0x2b, 0x73, 0x46, 0xb1, + 0xc9, 0xea, 0xa7, 0xb1, 0xa2, 0x61, 0x45, 0x95, 0x2b, 0x49, 0x8e, 0x15, 0xec, 0xe2, 0xec, 0x9a, + 0x03, 0xbb, 0xe3, 0xf8, 0xd0, 0x05, 0x55, 0xae, 0xc8, 0xc6, 0x7c, 0x13, 0xab, 0x83, 0x56, 0x1f, + 0xdb, 0x47, 0xfb, 0x03, 0xd3, 0x87, 0x12, 0x55, 0xae, 0x48, 0x46, 0xb1, 0xc9, 0xea, 0x83, 0xd8, + 0x5d, 0x67, 0xdc, 0xb7, 0x7a, 0x3e, 0xf6, 0x3c, 0xea, 0xb7, 0xd8, 0x64, 0xf5, 0xc1, 0x11, 0x3c, + 0x3c, 0x71, 0xcc, 0x89, 0x0f, 0x35, 0x55, 0xb9, 0x52, 0x30, 0xe6, 0x9b, 0x58, 0x1d, 0xb2, 0x1a, + 0x5a, 0x83, 0x43, 0x55, 0xae, 0x2c, 0x50, 0xab, 0x33, 0xd6, 0x60, 0x37, 0xb4, 0x06, 0x3d, 0x55, + 0xae, 0x10, 0x8e, 0x15, 0xd6, 0x40, 0xd4, 0x0c, 0x13, 0x62, 0x69, 0x51, 0x4d, 0x08, 0x9a, 0x61, + 0x95, 0x41, 0xcd, 0x70, 0xe0, 0x6b, 0x6a, 0x42, 0xd4, 0x4c, 0x08, 0x89, 0x9d, 0x73, 0xe4, 0x05, + 0x35, 0x21, 0x6a, 0x86, 0x23, 0x43, 0x9a, 0xe1, 0xd8, 0xd7, 0xd5, 0x44, 0x50, 0x33, 0x53, 0x68, + 0xd1, 0x72, 0x49, 0x4d, 0x04, 0x35, 0xc3, 0xd1, 0x41, 0xcd, 0x70, 0xf0, 0x45, 0x35, 0x11, 0xd0, + 0x4c, 0x18, 0x2b, 0x1a, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0xb3, 0x73, 0x35, 0xc3, 0xa1, 0xcb, + 0x6a, 0x42, 0xd4, 0x8c, 0x68, 0xd5, 0xd3, 0x0c, 0x87, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0x58, + 0x4f, 0x33, 0x1c, 0x7b, 0x59, 0x4d, 0x04, 0x34, 0xc3, 0xb1, 0xd7, 0x45, 0xcd, 0x70, 0xe8, 0xc7, + 0x92, 0x9a, 0x10, 0x45, 0xc3, 0xa1, 0x37, 0x03, 0xa2, 0xe1, 0xd8, 0x4f, 0x28, 0x56, 0x54, 0x4d, + 0x18, 0x2c, 0xae, 0xc2, 0xa7, 0x14, 0x2c, 0xca, 0x86, 0x83, 0x7d, 0xd9, 0xd8, 0xdc, 0x05, 0x95, + 0xae, 0xa8, 0x92, 0x27, 0x1b, 0xd7, 0x2f, 0x89, 0xb2, 0xf1, 0x80, 0x57, 0xd1, 0xd5, 0x72, 0xd9, + 0x4c, 0x21, 0x6b, 0xba, 0x8f, 0x54, 0x55, 0xc9, 0x97, 0x8d, 0x87, 0x0c, 0xc8, 0xc6, 0xc3, 0x5e, + 0x53, 0x25, 0x51, 0x36, 0x33, 0xd0, 0xa2, 0xe5, 0xb2, 0x2a, 0x89, 0xb2, 0xf1, 0xd0, 0xa2, 0x6c, + 0x3c, 0xf0, 0x17, 0x54, 0x49, 0x90, 0xcd, 0x34, 0x56, 0x34, 0xfc, 0x45, 0x55, 0x12, 0x64, 0x13, + 0x9c, 0x1d, 0x93, 0x8d, 0x07, 0x7d, 0x43, 0x95, 0x7c, 0xd9, 0x04, 0xad, 0x72, 0xd9, 0x78, 0xd0, + 0x37, 0x55, 0x49, 0x90, 0x4d, 0x10, 0xcb, 0x65, 0xe3, 0x61, 0xdf, 0xc2, 0xf8, 0xe6, 0xca, 0xc6, + 0xc3, 0x0a, 0xb2, 0xf1, 0xa0, 0xbf, 0x43, 0x63, 0xa1, 0x27, 0x1b, 0x0f, 0x2a, 0xca, 0xc6, 0xc3, + 0xfe, 0x2e, 0xc5, 0xfa, 0xb2, 0x99, 0x06, 0x8b, 0xab, 0xf0, 0x7b, 0x14, 0xec, 0xcb, 0xc6, 0x03, + 0xaf, 0xe0, 0x20, 0xa8, 0x6c, 0xba, 0xe6, 0x61, 0xe7, 0x68, 0x40, 0x25, 0x56, 0xa1, 0xba, 0xa9, + 0x27, 0x9d, 0xf1, 0x91, 0x49, 0x47, 0x62, 0xdb, 0x83, 0xc7, 0x6e, 0x1b, 0x59, 0xa1, 0xc6, 0x99, + 0x7c, 0x7c, 0xc2, 0x75, 0xaa, 0x9f, 0xba, 0x5c, 0xd5, 0x8c, 0x22, 0xd3, 0xd0, 0x34, 0xbe, 0xa6, + 0x0b, 0xf8, 0x1b, 0x54, 0x45, 0x75, 0xb9, 0xa6, 0x33, 0x7c, 0x4d, 0xf7, 0xf1, 0x55, 0x38, 0xef, + 0x4b, 0xc9, 0x67, 0xdc, 0xa4, 0x5a, 0xaa, 0x27, 0xaa, 0xda, 0xaa, 0xb1, 0xe0, 0x0a, 0x6a, 0x16, + 0x29, 0xd0, 0xcd, 0x2d, 0x2a, 0xa9, 0x7a, 0xa2, 0xa6, 0x7b, 0x24, 0xb1, 0x27, 0x8d, 0xca, 0x90, + 0x0b, 0xcb, 0xe7, 0xdc, 0xa6, 0xca, 0xaa, 0x27, 0xab, 0xda, 0xea, 0xaa, 0xa1, 0x70, 0x7d, 0xcd, + 0xe0, 0x04, 0xfa, 0x59, 0xa1, 0x0a, 0xab, 0x27, 0x6b, 0xba, 0xc7, 0x09, 0xf6, 0xb3, 0xe0, 0x0a, + 0xcd, 0xa7, 0x7c, 0x89, 0x2a, 0xad, 0x9e, 0xae, 0xae, 0xe9, 0x6b, 0xeb, 0xf7, 0x8c, 0x22, 0x53, + 0x9c, 0xcf, 0xd1, 0x69, 0x3f, 0x5c, 0x72, 0x3e, 0x69, 0x95, 0x6a, 0xae, 0x9e, 0xd6, 0xee, 0xac, + 0xdd, 0xd5, 0xee, 0x1a, 0x0a, 0xd7, 0x9e, 0xcf, 0x7a, 0x87, 0xb2, 0xb8, 0xf8, 0x7c, 0xd6, 0x1a, + 0x55, 0x5f, 0x5d, 0x79, 0x66, 0x0e, 0x06, 0xf6, 0x2d, 0xb5, 0xfc, 0xd2, 0x1e, 0x0f, 0xba, 0xd7, + 0xca, 0x60, 0x28, 0x5c, 0x8f, 0x62, 0xaf, 0x0b, 0xae, 0x20, 0x7d, 0xfa, 0xaf, 0xd1, 0x7b, 0x58, + 0xa1, 0x9e, 0x79, 0xd8, 0xef, 0x59, 0xf6, 0xc4, 0x34, 0x8a, 0x4c, 0x9a, 0xa1, 0x35, 0xd9, 0x0d, + 0xaf, 0xe3, 0xaf, 0x53, 0xda, 0x42, 0x3d, 0x71, 0xbb, 0xaa, 0xd1, 0x9e, 0x66, 0xad, 0xe3, 0x6e, + 0x78, 0x1d, 0x7f, 0x83, 0x72, 0x48, 0x3d, 0x71, 0xbb, 0xa6, 0x73, 0x8e, 0xb8, 0x8e, 0x77, 0xe0, + 0x42, 0x28, 0x2e, 0xb6, 0x47, 0x9d, 0x83, 0xe7, 0x66, 0xb7, 0xa4, 0xd1, 0xf0, 0xf8, 0x50, 0x56, + 0x24, 0xe3, 0x7c, 0x20, 0x44, 0xee, 0x60, 0x33, 0xb9, 0x07, 0xaf, 0x87, 0x03, 0xa5, 0xcb, 0xac, + 0xd2, 0x78, 0x89, 0xcc, 0xc5, 0x60, 0xcc, 0x0c, 0x51, 0x05, 0x07, 0xec, 0x52, 0x75, 0x1a, 0x40, + 0x7d, 0xaa, 0xef, 0x89, 0x39, 0xf5, 0x67, 0xe0, 0xe2, 0x74, 0x28, 0x75, 0xc9, 0xeb, 0x34, 0xa2, + 0x22, 0xf9, 0x42, 0x38, 0xaa, 0x4e, 0xd1, 0x67, 0xf4, 0x5d, 0xa3, 0x21, 0x56, 0xa4, 0x4f, 0xf5, + 0x7e, 0x1f, 0x4a, 0x53, 0xc1, 0xd6, 0x65, 0xdf, 0xa1, 0x31, 0x17, 0xd9, 0xaf, 0x85, 0xe2, 0x6e, + 0x98, 0x3c, 0xa3, 0xeb, 0xbb, 0x34, 0x08, 0x0b, 0xe4, 0xa9, 0x9e, 0x71, 0xc9, 0x82, 0xe1, 0xd8, + 0xe5, 0xde, 0xa3, 0x51, 0x99, 0x2f, 0x59, 0x20, 0x32, 0x8b, 0xfd, 0x86, 0xe2, 0xb3, 0xcb, 0xad, + 0xd3, 0x30, 0xcd, 0xfb, 0x0d, 0x86, 0x6a, 0x4e, 0x7e, 0x9b, 0x92, 0x77, 0x67, 0xcf, 0xf8, 0xc7, + 0x09, 0x1a, 0x60, 0x39, 0x7b, 0x77, 0xd6, 0x94, 0x3d, 0xf6, 0x8c, 0x29, 0xff, 0x84, 0xb2, 0x89, + 0xc0, 0x9e, 0x9a, 0xf3, 0x63, 0x98, 0x73, 0x6f, 0x75, 0xbd, 0xb1, 0x7d, 0x34, 0x2a, 0x35, 0x55, + 0xb9, 0x02, 0xda, 0x95, 0xa9, 0xec, 0xc7, 0xbd, 0xe4, 0x6d, 0x50, 0x94, 0x11, 0x24, 0x31, 0x2b, + 0xcc, 0x2e, 0xb3, 0xb2, 0xa3, 0x26, 0x22, 0xac, 0x30, 0x94, 0x67, 0x45, 0x20, 0x51, 0x2b, 0xae, + 0xd3, 0x67, 0x56, 0x3e, 0x50, 0xa5, 0x99, 0x56, 0xdc, 0x10, 0xc0, 0xad, 0x04, 0x48, 0x4b, 0xeb, + 0x7e, 0xbe, 0x85, 0xed, 0xe4, 0x8b, 0xe1, 0x04, 0x6c, 0x03, 0xef, 0xcf, 0xc1, 0x4a, 0x46, 0x13, + 0x06, 0x37, 0x4d, 0xfb, 0xd9, 0x08, 0x5a, 0x60, 0x34, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff, + 0xa6, 0x04, 0x49, 0x9a, 0x4f, 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6d, 0x3e, 0x56, 0xce, 0xd1, 0x5f, + 0x0f, 0x5b, 0xad, 0xa7, 0x8a, 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xbd, 0xc6, 0xae, 0x22, 0x93, + 0x22, 0xe4, 0x9b, 0x9b, 0xdb, 0x1b, 0x0d, 0x63, 0xc7, 0xd8, 0xdc, 0xde, 0x53, 0x12, 0xb4, 0xad, + 0xf9, 0xb4, 0xf5, 0x60, 0x4f, 0x49, 0x92, 0x0c, 0x24, 0x68, 0x5d, 0x8a, 0x00, 0xa4, 0x77, 0xf7, + 0x8c, 0xcd, 0xed, 0x0d, 0x25, 0x4d, 0xad, 0xec, 0x6d, 0x6e, 0x35, 0x94, 0x0c, 0x45, 0xee, 0xbd, + 0xbb, 0xf3, 0xb4, 0xa1, 0x64, 0xe9, 0xcf, 0x07, 0x86, 0xf1, 0xe0, 0x2b, 0x4a, 0x8e, 0x92, 0xb6, + 0x1e, 0xec, 0x28, 0x80, 0xcd, 0x0f, 0x1e, 0x3e, 0x6d, 0x28, 0x79, 0x52, 0x80, 0x6c, 0xf3, 0xdd, + 0xed, 0x47, 0x7b, 0x9b, 0xad, 0x6d, 0xa5, 0x50, 0x3e, 0x81, 0x12, 0x5b, 0xe6, 0xc0, 0x2a, 0xb2, + 0xa4, 0xf0, 0x1d, 0x48, 0xb1, 0x9d, 0x91, 0x50, 0x25, 0x95, 0xf0, 0xce, 0x4c, 0x53, 0x56, 0xd8, + 0x1e, 0x31, 0xda, 0xd2, 0x65, 0x48, 0xb1, 0x55, 0x5a, 0x84, 0x14, 0x5b, 0x1d, 0x19, 0x53, 0x45, + 0x56, 0x28, 0xff, 0x96, 0x0c, 0xb0, 0x61, 0xef, 0x3e, 0xef, 0x8f, 0x30, 0x21, 0xbf, 0x0c, 0x30, + 0x79, 0xde, 0x1f, 0xb5, 0x51, 0xf5, 0x3c, 0xa9, 0xcc, 0xd1, 0x1a, 0xf4, 0x77, 0xe4, 0x1a, 0x14, + 0xb0, 0xf9, 0x90, 0x79, 0x21, 0xcc, 0x25, 0x33, 0x46, 0x9e, 0xd6, 0x71, 0xc7, 0x14, 0x84, 0xd4, + 0x74, 0x4c, 0x21, 0xd3, 0x02, 0xa4, 0xa6, 0x93, 0xab, 0x80, 0xc5, 0xf6, 0x04, 0x23, 0x0a, 0xa6, + 0x8d, 0x39, 0x03, 0xfb, 0x65, 0x31, 0x86, 0xbc, 0x0d, 0xd8, 0x27, 0x9b, 0x77, 0x71, 0xfa, 0x74, + 0xb8, 0xc3, 0x5d, 0xa1, 0x3f, 0xd8, 0x6c, 0x7d, 0xc2, 0x52, 0x0b, 0x72, 0x5e, 0x3d, 0xed, 0x0b, + 0x6b, 0xf9, 0x8c, 0x14, 0x9c, 0x11, 0x60, 0x95, 0x37, 0x25, 0x06, 0xe0, 0xa3, 0x59, 0xc0, 0xd1, + 0x30, 0x12, 0x1b, 0x4e, 0xf9, 0x32, 0xcc, 0x6d, 0xdb, 0x16, 0x3b, 0xbd, 0xb8, 0x4a, 0x05, 0x90, + 0x3a, 0x25, 0x09, 0xb3, 0x27, 0xa9, 0x53, 0xbe, 0x02, 0x20, 0xb4, 0x29, 0x20, 0xed, 0xb3, 0x36, + 0xf4, 0x01, 0xd2, 0x7e, 0xf9, 0x26, 0xa4, 0xb7, 0x3a, 0xc7, 0x7b, 0x9d, 0x1e, 0xb9, 0x06, 0x30, + 0xe8, 0x4c, 0x9c, 0xf6, 0x21, 0xee, 0xc3, 0xe7, 0x9f, 0x7f, 0xfe, 0xb9, 0x84, 0x97, 0xbd, 0x1c, + 0xad, 0x65, 0xfb, 0xf1, 0x02, 0xa0, 0x35, 0xe8, 0x6e, 0x99, 0x93, 0x49, 0xa7, 0x67, 0x92, 0x2a, + 0xa4, 0x2d, 0x73, 0x42, 0xa3, 0x9d, 0x84, 0xef, 0x08, 0xcb, 0xfe, 0x2a, 0xf8, 0xa8, 0x95, 0x6d, + 0x84, 0x18, 0x1c, 0x4a, 0x14, 0x48, 0x58, 0x47, 0x43, 0x7c, 0x27, 0x49, 0x19, 0xf4, 0xe7, 0xd2, + 0x25, 0x48, 0x33, 0x0c, 0x21, 0x90, 0xb4, 0x3a, 0x43, 0xb3, 0xc4, 0xfa, 0xc5, 0xdf, 0xe5, 0x5f, + 0x95, 0x00, 0xb6, 0xcd, 0x97, 0x67, 0xe8, 0xd3, 0x47, 0xc5, 0xf4, 0x99, 0x60, 0x7d, 0xde, 0x8f, + 0xeb, 0x93, 0xea, 0xec, 0xd0, 0xb6, 0xbb, 0x6d, 0xb6, 0xc5, 0xec, 0x49, 0x27, 0x47, 0x6b, 0x70, + 0xd7, 0xca, 0x1f, 0x40, 0x61, 0xd3, 0xb2, 0xcc, 0xb1, 0x3b, 0x26, 0x02, 0xc9, 0x67, 0xf6, 0xc4, + 0xe1, 0x6f, 0x4b, 0xf8, 0x9b, 0x94, 0x20, 0x39, 0xb2, 0xc7, 0x0e, 0x9b, 0x67, 0x3d, 0xa9, 0xaf, + 0xae, 0xae, 0x1a, 0x58, 0x43, 0x2e, 0x41, 0xee, 0xc0, 0xb6, 0x2c, 0xf3, 0x80, 0x4e, 0x22, 0x81, + 0x69, 0x8d, 0x5f, 0x51, 0xfe, 0x65, 0x09, 0x0a, 0x2d, 0xe7, 0x99, 0x6f, 0x5c, 0x81, 0xc4, 0x73, + 0xf3, 0x04, 0x87, 0x97, 0x30, 0xe8, 0x4f, 0x7a, 0x54, 0x7e, 0xbe, 0x33, 0x38, 0x62, 0x6f, 0x4d, + 0x05, 0x83, 0x15, 0xc8, 0x05, 0x48, 0xbf, 0x34, 0xfb, 0xbd, 0x67, 0x0e, 0xda, 0x94, 0x0d, 0x5e, + 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0x83, 0x2d, 0x25, 0x71, 0xbd, 0x2e, 0xf8, 0xeb, 0x25, 0xce, 0xc1, + 0x60, 0xa0, 0x1b, 0xd9, 0x6c, 0x57, 0xf9, 0xe8, 0xa3, 0x8f, 0x3e, 0x92, 0xcb, 0x87, 0xb0, 0xe8, + 0x1e, 0xde, 0xc0, 0x64, 0xb7, 0xa1, 0x34, 0x30, 0xed, 0xf6, 0x61, 0xdf, 0xea, 0x0c, 0x06, 0x27, + 0xed, 0x97, 0xb6, 0xd5, 0xee, 0x58, 0x6d, 0x7b, 0x72, 0xd0, 0x19, 0xe3, 0x02, 0x44, 0x77, 0xb1, + 0x38, 0x30, 0xed, 0x26, 0xa3, 0xbd, 0x6f, 0x5b, 0x0f, 0xac, 0x16, 0xe5, 0x94, 0xff, 0x20, 0x09, + 0xb9, 0xad, 0x13, 0xd7, 0xfa, 0x22, 0xa4, 0x0e, 0xec, 0x23, 0x8b, 0xad, 0x65, 0xca, 0x60, 0x05, + 0x6f, 0x8f, 0x64, 0x61, 0x8f, 0x16, 0x21, 0xf5, 0xe2, 0xc8, 0x76, 0x4c, 0x9c, 0x6e, 0xce, 0x60, + 0x05, 0xba, 0x5a, 0x23, 0xd3, 0x29, 0x25, 0x31, 0xb9, 0xa5, 0x3f, 0xfd, 0xf9, 0xa7, 0xce, 0x30, + 0x7f, 0xb2, 0x02, 0x69, 0x9b, 0xae, 0xfe, 0xa4, 0x94, 0xc6, 0x77, 0x35, 0x01, 0x2e, 0xee, 0x8a, + 0xc1, 0x51, 0x64, 0x13, 0x16, 0x5e, 0x9a, 0xed, 0xe1, 0xd1, 0xc4, 0x69, 0xf7, 0xec, 0x76, 0xd7, + 0x34, 0x47, 0xe6, 0xb8, 0x34, 0x87, 0x3d, 0x09, 0x3e, 0x61, 0xd6, 0x42, 0x1a, 0xf3, 0x2f, 0xcd, + 0xad, 0xa3, 0x89, 0xb3, 0x61, 0x3f, 0x46, 0x16, 0xa9, 0x42, 0x6e, 0x6c, 0x52, 0x4f, 0x40, 0x07, + 0x5b, 0x08, 0xf7, 0x1e, 0xa0, 0x66, 0xc7, 0xe6, 0x08, 0x2b, 0xc8, 0x3a, 0x64, 0xf7, 0xfb, 0xcf, + 0xcd, 0xc9, 0x33, 0xb3, 0x5b, 0xca, 0xa8, 0x52, 0x65, 0x5e, 0xbb, 0xe8, 0x73, 0xbc, 0x65, 0x5d, + 0x79, 0x64, 0x0f, 0xec, 0xb1, 0xe1, 0x41, 0xc9, 0x7d, 0xc8, 0x4d, 0xec, 0xa1, 0xc9, 0xf4, 0x9d, + 0xc5, 0xa0, 0x7a, 0x79, 0x16, 0x6f, 0xd7, 0x1e, 0x9a, 0xae, 0x07, 0x73, 0xf1, 0x64, 0x99, 0x0d, + 0x74, 0x9f, 0x5e, 0x9d, 0x4b, 0x80, 0x4f, 0x03, 0x74, 0x40, 0x78, 0x95, 0x26, 0x4b, 0x74, 0x40, + 0xbd, 0x43, 0x7a, 0x23, 0x2a, 0xe5, 0x31, 0xaf, 0xf4, 0xca, 0x4b, 0xb7, 0x20, 0xe7, 0x19, 0xf4, + 0x5d, 0x1f, 0x73, 0x37, 0x39, 0xf4, 0x07, 0xcc, 0xf5, 0x31, 0x5f, 0xf3, 0x06, 0xa4, 0x70, 0xd8, + 0x34, 0x42, 0x19, 0x0d, 0x1a, 0x10, 0x73, 0x90, 0xda, 0x30, 0x1a, 0x8d, 0x6d, 0x45, 0xc2, 0xd8, + 0xf8, 0xf4, 0xdd, 0x86, 0x22, 0x0b, 0x8a, 0xfd, 0x6d, 0x09, 0x12, 0x8d, 0x63, 0x54, 0x0b, 0x9d, + 0x86, 0x7b, 0xa2, 0xe9, 0x6f, 0xad, 0x06, 0xc9, 0xa1, 0x3d, 0x36, 0xc9, 0xf9, 0x19, 0xb3, 0x2c, + 0xf5, 0x70, 0xbf, 0x84, 0x57, 0xe4, 0xc6, 0xb1, 0x63, 0x20, 0x5e, 0x7b, 0x0b, 0x92, 0x8e, 0x79, + 0xec, 0xcc, 0xe6, 0x3d, 0x63, 0x1d, 0x50, 0x80, 0x76, 0x13, 0xd2, 0xd6, 0xd1, 0x70, 0xdf, 0x1c, + 0xcf, 0x86, 0xf6, 0x71, 0x7a, 0x1c, 0x52, 0x7e, 0x0f, 0x94, 0x47, 0xf6, 0x70, 0x34, 0x30, 0x8f, + 0x1b, 0xc7, 0x8e, 0x69, 0x4d, 0xfa, 0xb6, 0x45, 0xf5, 0x7c, 0xd8, 0x1f, 0xa3, 0x17, 0xc1, 0xb7, + 0x62, 0x2c, 0xd0, 0x53, 0x3d, 0x31, 0x0f, 0x6c, 0xab, 0xcb, 0x1d, 0x26, 0x2f, 0x51, 0xb4, 0xf3, + 0xac, 0x3f, 0xa6, 0x0e, 0x84, 0xfa, 0x79, 0x56, 0x28, 0x6f, 0x40, 0x91, 0xe7, 0x18, 0x13, 0xde, + 0x71, 0xf9, 0x06, 0x14, 0xdc, 0x2a, 0x7c, 0x38, 0xcf, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x52, 0xce, + 0xd1, 0x65, 0x6d, 0x6d, 0x37, 0x14, 0x89, 0xfe, 0xd8, 0x7b, 0xbf, 0x15, 0x58, 0xca, 0x4b, 0x50, + 0xf0, 0xc6, 0xbe, 0x6b, 0x3a, 0xd8, 0x42, 0x03, 0x42, 0xa6, 0x2e, 0x67, 0xa5, 0x72, 0x06, 0x52, + 0x8d, 0xe1, 0xc8, 0x39, 0x29, 0xff, 0x22, 0xe4, 0x39, 0xe8, 0x69, 0x7f, 0xe2, 0x90, 0x3b, 0x90, + 0x19, 0xf2, 0xf9, 0x4a, 0x78, 0xdd, 0x13, 0x35, 0xe5, 0xe3, 0xdc, 0xdf, 0x86, 0x8b, 0x5e, 0xaa, + 0x42, 0x46, 0xf0, 0xa5, 0xfc, 0xa8, 0xcb, 0xe2, 0x51, 0x67, 0x4e, 0x21, 0x21, 0x38, 0x85, 0xf2, + 0x16, 0x64, 0x58, 0x04, 0x9c, 0x60, 0x54, 0x67, 0xa9, 0x22, 0x13, 0x13, 0xdb, 0xf9, 0x3c, 0xab, + 0x63, 0x17, 0x95, 0xab, 0x90, 0x47, 0xc1, 0x72, 0x04, 0x73, 0x9d, 0x80, 0x55, 0x4c, 0x6e, 0xbf, + 0x9f, 0x82, 0xac, 0xbb, 0x52, 0x64, 0x19, 0xd2, 0x2c, 0x3f, 0x43, 0x53, 0xee, 0xfb, 0x41, 0x0a, + 0x33, 0x32, 0xb2, 0x0c, 0x19, 0x9e, 0x83, 0x71, 0xef, 0x2e, 0x57, 0x35, 0x23, 0xcd, 0x72, 0x2e, + 0xaf, 0xb1, 0xa6, 0xa3, 0x63, 0x62, 0x2f, 0x03, 0x69, 0x96, 0x55, 0x11, 0x15, 0x72, 0x5e, 0x1e, + 0x85, 0xfe, 0x98, 0x3f, 0x03, 0x64, 0xdd, 0xc4, 0x49, 0x40, 0xd4, 0x74, 0xf4, 0x58, 0x3c, 0xe7, + 0xcf, 0x36, 0xfd, 0xeb, 0x49, 0xd6, 0xcd, 0x86, 0xf0, 0xf9, 0xde, 0x4d, 0xf0, 0x33, 0x3c, 0xff, + 0xf1, 0x01, 0x35, 0x1d, 0x5d, 0x82, 0x9b, 0xcd, 0x67, 0x78, 0x8e, 0x43, 0xae, 0xd2, 0x21, 0x62, + 0xce, 0x82, 0x47, 0xdf, 0x4f, 0xdd, 0xd3, 0x2c, 0x93, 0x21, 0xd7, 0xa8, 0x05, 0x96, 0x98, 0xe0, + 0xb9, 0xf4, 0xf3, 0xf4, 0x0c, 0xcf, 0x57, 0xc8, 0x4d, 0x0a, 0x61, 0xcb, 0x5f, 0x82, 0x88, 0xa4, + 0x3c, 0xc3, 0x93, 0x72, 0xa2, 0xd2, 0x0e, 0xd1, 0x3d, 0xa0, 0x4b, 0x10, 0x12, 0xf0, 0x34, 0x4b, + 0xc0, 0xc9, 0x15, 0x34, 0xc7, 0x26, 0x55, 0xf0, 0x93, 0xed, 0x0c, 0x4f, 0x70, 0xfc, 0x76, 0xbc, + 0xb2, 0x79, 0x89, 0x75, 0x86, 0xa7, 0x30, 0xa4, 0x46, 0xf7, 0x8b, 0xea, 0xbb, 0x34, 0x8f, 0x4e, + 0xb0, 0xe4, 0x0b, 0xcf, 0xdd, 0x53, 0xe6, 0x03, 0xeb, 0xcc, 0x83, 0x18, 0xa9, 0x26, 0x9e, 0x86, + 0x25, 0xca, 0xdb, 0xe9, 0x5b, 0x87, 0xa5, 0x22, 0xae, 0x44, 0xa2, 0x6f, 0x1d, 0x1a, 0xa9, 0x26, + 0xad, 0x61, 0x1a, 0xd8, 0xa6, 0x6d, 0x0a, 0xb6, 0x25, 0x6f, 0xb3, 0x46, 0x5a, 0x45, 0x4a, 0x90, + 0x6a, 0xb6, 0xb7, 0x3b, 0x56, 0x69, 0x81, 0xf1, 0xac, 0x8e, 0x65, 0x24, 0x9b, 0xdb, 0x1d, 0x8b, + 0xbc, 0x05, 0x89, 0xc9, 0xd1, 0x7e, 0x89, 0x84, 0xbf, 0xac, 0xec, 0x1e, 0xed, 0xbb, 0x43, 0x31, + 0x28, 0x82, 0x2c, 0x43, 0x76, 0xe2, 0x8c, 0xdb, 0xbf, 0x60, 0x8e, 0xed, 0xd2, 0x79, 0x5c, 0xc2, + 0x73, 0x46, 0x66, 0xe2, 0x8c, 0x3f, 0x30, 0xc7, 0xf6, 0x19, 0x9d, 0x5f, 0xf9, 0x0a, 0xe4, 0x05, + 0xbb, 0xa4, 0x08, 0x92, 0xc5, 0x6e, 0x0a, 0x75, 0xe9, 0x8e, 0x21, 0x59, 0xe5, 0x3d, 0x28, 0xb8, + 0x39, 0x0c, 0xce, 0x57, 0xa3, 0x27, 0x69, 0x60, 0x8f, 0xf1, 0x7c, 0xce, 0x6b, 0x97, 0xc4, 0x10, + 0xe5, 0xc3, 0x78, 0xb8, 0x60, 0xd0, 0xb2, 0x12, 0x1a, 0x8a, 0x54, 0xfe, 0xa1, 0x04, 0x85, 0x2d, + 0x7b, 0xec, 0x3f, 0x30, 0x2f, 0x42, 0x6a, 0xdf, 0xb6, 0x07, 0x13, 0x34, 0x9b, 0x35, 0x58, 0x81, + 0xbc, 0x01, 0x05, 0xfc, 0xe1, 0xe6, 0x9e, 0xb2, 0xf7, 0xb4, 0x91, 0xc7, 0x7a, 0x9e, 0x70, 0x12, + 0x48, 0xf6, 0x2d, 0x67, 0xc2, 0x3d, 0x19, 0xfe, 0x26, 0x5f, 0x80, 0x3c, 0xfd, 0xeb, 0x32, 0x93, + 0xde, 0x85, 0x15, 0x68, 0x35, 0x27, 0xbe, 0x05, 0x73, 0xb8, 0xfb, 0x1e, 0x2c, 0xe3, 0x3d, 0x63, + 0x14, 0x58, 0x03, 0x07, 0x96, 0x20, 0xc3, 0x5c, 0xc1, 0x04, 0xbf, 0x96, 0xe5, 0x0c, 0xb7, 0x48, + 0xdd, 0x2b, 0x66, 0x02, 0x2c, 0xdc, 0x67, 0x0c, 0x5e, 0x2a, 0x3f, 0x80, 0x2c, 0x46, 0xa9, 0xd6, + 0xa0, 0x4b, 0xca, 0x20, 0xf5, 0x4a, 0x26, 0xc6, 0xc8, 0x45, 0xe1, 0x9a, 0xcf, 0x9b, 0x57, 0x36, + 0x0c, 0xa9, 0xb7, 0xb4, 0x00, 0xd2, 0x06, 0xbd, 0x77, 0x1f, 0x73, 0x37, 0x2d, 0x1d, 0x97, 0x5b, + 0xdc, 0xc4, 0xb6, 0xf9, 0x32, 0xce, 0xc4, 0xb6, 0xf9, 0x92, 0x99, 0xb8, 0x3a, 0x65, 0x82, 0x96, + 0x4e, 0xf8, 0xa7, 0x43, 0xe9, 0xa4, 0x5c, 0x85, 0x39, 0x3c, 0x9e, 0x7d, 0xab, 0xb7, 0x63, 0xf7, + 0x2d, 0xbc, 0xe7, 0x1f, 0xe2, 0x3d, 0x49, 0x32, 0xa4, 0x43, 0xba, 0x07, 0xe6, 0x71, 0xe7, 0x80, + 0xdd, 0x38, 0xb3, 0x06, 0x2b, 0x94, 0x3f, 0x4b, 0xc2, 0x3c, 0x77, 0xad, 0xef, 0xf7, 0x9d, 0x67, + 0x5b, 0x9d, 0x11, 0x79, 0x0a, 0x05, 0xea, 0x55, 0xdb, 0xc3, 0xce, 0x68, 0x44, 0x8f, 0xaf, 0x84, + 0x57, 0x8d, 0xeb, 0x53, 0xae, 0x9a, 0xe3, 0x57, 0xb6, 0x3b, 0x43, 0x73, 0x8b, 0x61, 0x1b, 0x96, + 0x33, 0x3e, 0x31, 0xf2, 0x96, 0x5f, 0x43, 0x36, 0x21, 0x3f, 0x9c, 0xf4, 0x3c, 0x63, 0x32, 0x1a, + 0xab, 0x44, 0x1a, 0xdb, 0x9a, 0xf4, 0x02, 0xb6, 0x60, 0xe8, 0x55, 0xd0, 0x81, 0x51, 0x7f, 0xec, + 0xd9, 0x4a, 0x9c, 0x32, 0x30, 0xea, 0x3a, 0x82, 0x03, 0xdb, 0xf7, 0x6b, 0xc8, 0x63, 0x00, 0x7a, + 0xbc, 0x1c, 0x9b, 0xa6, 0x4e, 0xa8, 0xa0, 0xbc, 0xf6, 0x66, 0xa4, 0xad, 0x5d, 0x67, 0xbc, 0x67, + 0xef, 0x3a, 0x63, 0x66, 0x88, 0x1e, 0x4c, 0x2c, 0x2e, 0xbd, 0x03, 0x4a, 0x78, 0xfe, 0xe2, 0x8d, + 0x3c, 0x35, 0xe3, 0x46, 0x9e, 0xe3, 0x37, 0xf2, 0xba, 0x7c, 0x57, 0x5a, 0x7a, 0x0f, 0x8a, 0xa1, + 0x29, 0x8b, 0x74, 0xc2, 0xe8, 0xb7, 0x45, 0x7a, 0x5e, 0x7b, 0x5d, 0xf8, 0x9c, 0x2d, 0x6e, 0xb8, + 0x68, 0xf7, 0x1d, 0x50, 0xc2, 0xd3, 0x17, 0x0d, 0x67, 0x63, 0x32, 0x05, 0xe4, 0xdf, 0x87, 0xb9, + 0xc0, 0x94, 0x45, 0x72, 0xee, 0x94, 0x49, 0x95, 0x7f, 0x29, 0x05, 0xa9, 0x96, 0x65, 0xda, 0x87, + 0xe4, 0xf5, 0x60, 0x9c, 0x7c, 0x72, 0xce, 0x8d, 0x91, 0x17, 0x43, 0x31, 0xf2, 0xc9, 0x39, 0x2f, + 0x42, 0x5e, 0x0c, 0x45, 0x48, 0xb7, 0xa9, 0xa6, 0x93, 0xcb, 0x53, 0xf1, 0xf1, 0xc9, 0x39, 0x21, + 0x38, 0x5e, 0x9e, 0x0a, 0x8e, 0x7e, 0x73, 0x4d, 0xa7, 0x0e, 0x35, 0x18, 0x19, 0x9f, 0x9c, 0xf3, + 0xa3, 0xe2, 0x72, 0x38, 0x2a, 0x7a, 0x8d, 0x35, 0x9d, 0x0d, 0x49, 0x88, 0x88, 0x38, 0x24, 0x16, + 0x0b, 0x97, 0xc3, 0xb1, 0x10, 0x79, 0x3c, 0x0a, 0x2e, 0x87, 0xa3, 0x20, 0x36, 0xf2, 0xa8, 0x77, + 0x31, 0x14, 0xf5, 0xd0, 0x28, 0x0b, 0x77, 0xcb, 0xe1, 0x70, 0xc7, 0x78, 0xc2, 0x48, 0xc5, 0x58, + 0xe7, 0x35, 0xd6, 0x74, 0xa2, 0x85, 0x02, 0x5d, 0xf4, 0x6d, 0x1f, 0xf7, 0x02, 0x9d, 0xbe, 0x4e, + 0x97, 0xcd, 0xbd, 0x88, 0x16, 0x63, 0xbe, 0xf8, 0xe3, 0x6a, 0xba, 0x17, 0x31, 0x0d, 0x32, 0x87, + 0x3c, 0x01, 0x56, 0xd0, 0x73, 0x09, 0xb2, 0xc4, 0xcd, 0x5f, 0x69, 0xb6, 0xd1, 0x83, 0xd1, 0x79, + 0x1d, 0xb2, 0x3b, 0x7d, 0x05, 0xe6, 0x9a, 0xed, 0xa7, 0x9d, 0x71, 0xcf, 0x9c, 0x38, 0xed, 0xbd, + 0x4e, 0xcf, 0x7b, 0x44, 0xa0, 0xfb, 0x9f, 0x6f, 0xf2, 0x96, 0xbd, 0x4e, 0x8f, 0x5c, 0x70, 0xc5, + 0xd5, 0xc5, 0x56, 0x89, 0xcb, 0x6b, 0xe9, 0x75, 0xba, 0x68, 0xcc, 0x18, 0xfa, 0xc2, 0x05, 0xee, + 0x0b, 0x1f, 0x66, 0x20, 0x75, 0x64, 0xf5, 0x6d, 0xeb, 0x61, 0x0e, 0x32, 0x8e, 0x3d, 0x1e, 0x76, + 0x1c, 0xbb, 0xfc, 0x23, 0x09, 0xe0, 0x91, 0x3d, 0x1c, 0x1e, 0x59, 0xfd, 0x17, 0x47, 0x26, 0xb9, + 0x02, 0xf9, 0x61, 0xe7, 0xb9, 0xd9, 0x1e, 0x9a, 0xed, 0x83, 0xb1, 0x7b, 0x0e, 0x72, 0xb4, 0x6a, + 0xcb, 0x7c, 0x34, 0x3e, 0x21, 0x25, 0xf7, 0x8a, 0x8e, 0xda, 0x41, 0x49, 0xf2, 0x2b, 0xfb, 0x22, + 0xbf, 0x74, 0xa6, 0xf9, 0x1e, 0xba, 0xd7, 0x4e, 0x96, 0x47, 0x64, 0xf8, 0xee, 0x61, 0x89, 0x4a, + 0xde, 0x31, 0x87, 0xa3, 0xf6, 0x01, 0x4a, 0x85, 0xca, 0x21, 0x45, 0xcb, 0x8f, 0xc8, 0x6d, 0x48, + 0x1c, 0xd8, 0x03, 0x14, 0xc9, 0x29, 0xfb, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, 0x27, 0x4c, 0x36, + 0x79, 0x6d, 0x41, 0xb8, 0x27, 0xb0, 0xd0, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, 0xf7, 0x8d, 0x22, + 0x24, 0x9a, 0xad, 0x16, 0x8d, 0xfd, 0xcd, 0x56, 0x6b, 0x4d, 0x91, 0xea, 0x5f, 0x82, 0x6c, 0x6f, + 0x6c, 0x9a, 0xd4, 0x3d, 0xcc, 0xce, 0x39, 0x3e, 0xc4, 0x58, 0xe7, 0x81, 0xea, 0x5b, 0x90, 0x39, + 0x60, 0x59, 0x07, 0x89, 0x48, 0x6b, 0x4b, 0x7f, 0xc8, 0x1e, 0x55, 0x96, 0xfc, 0xe6, 0x70, 0x9e, + 0x62, 0xb8, 0x36, 0xea, 0x3b, 0x90, 0x1b, 0xb7, 0x4f, 0x33, 0xf8, 0x31, 0x8b, 0x2e, 0x71, 0x06, + 0xb3, 0x63, 0x5e, 0x55, 0x6f, 0xc0, 0x82, 0x65, 0xbb, 0xdf, 0x50, 0xda, 0x5d, 0x76, 0xc6, 0x2e, + 0x4e, 0x5f, 0xe5, 0x5c, 0xe3, 0x26, 0xfb, 0x6e, 0x69, 0xd9, 0xbc, 0x81, 0x9d, 0xca, 0xfa, 0x23, + 0x50, 0x04, 0x33, 0x98, 0x7a, 0xc6, 0x59, 0x39, 0x64, 0x1f, 0x4a, 0x3d, 0x2b, 0x78, 0xee, 0x43, + 0x46, 0xd8, 0xc9, 0x8c, 0x31, 0xd2, 0x63, 0x5f, 0x9d, 0x3d, 0x23, 0xe8, 0xea, 0xa6, 0x8d, 0x50, + 0x5f, 0x13, 0x6d, 0xe4, 0x19, 0xfb, 0x20, 0x2d, 0x1a, 0xa9, 0xe9, 0xa1, 0x55, 0x39, 0x3a, 0x75, + 0x28, 0x7d, 0xf6, 0x3d, 0xd9, 0xb3, 0xc2, 0x1c, 0xe0, 0x0c, 0x33, 0xf1, 0x83, 0xf9, 0x90, 0x7d, + 0x6a, 0x0e, 0x98, 0x99, 0x1a, 0xcd, 0xe4, 0xd4, 0xd1, 0x3c, 0x67, 0xdf, 0x75, 0x3d, 0x33, 0xbb, + 0xb3, 0x46, 0x33, 0x39, 0x75, 0x34, 0x03, 0xf6, 0xc5, 0x37, 0x60, 0xa6, 0xa6, 0xd7, 0x37, 0x80, + 0x88, 0x5b, 0xcd, 0xe3, 0x44, 0x8c, 0x9d, 0x21, 0xfb, 0x8e, 0xef, 0x6f, 0x36, 0xa3, 0xcc, 0x32, + 0x14, 0x3f, 0x20, 0x8b, 0x7d, 0xe2, 0x0f, 0x1a, 0xaa, 0xe9, 0xf5, 0x4d, 0x38, 0x2f, 0x4e, 0xec, + 0x0c, 0x43, 0xb2, 0x55, 0xa9, 0x52, 0x34, 0x16, 0xfc, 0xa9, 0x71, 0xce, 0x4c, 0x53, 0xf1, 0x83, + 0x1a, 0xa9, 0x52, 0x45, 0x99, 0x32, 0x55, 0xd3, 0xeb, 0x0f, 0xa0, 0x28, 0x98, 0xda, 0xc7, 0x08, + 0x1d, 0x6d, 0xe6, 0x05, 0xfb, 0x5f, 0x0b, 0xcf, 0x0c, 0x8d, 0xe8, 0xe1, 0x1d, 0xe3, 0x31, 0x2e, + 0xda, 0xc8, 0x98, 0xfd, 0xa3, 0x80, 0x3f, 0x16, 0x64, 0x84, 0x8e, 0x04, 0xe6, 0xdf, 0x71, 0x56, + 0x26, 0xec, 0x5f, 0x08, 0xfc, 0xa1, 0x50, 0x42, 0xbd, 0x1f, 0x98, 0x8e, 0x49, 0x83, 0x5c, 0x8c, + 0x0d, 0x07, 0x3d, 0xf2, 0x9b, 0x91, 0x80, 0x15, 0xf1, 0x81, 0x44, 0x98, 0x36, 0x2d, 0xd6, 0x37, + 0x61, 0xfe, 0xec, 0x0e, 0xe9, 0x63, 0x89, 0x65, 0xcb, 0xd5, 0x15, 0x9a, 0x50, 0x1b, 0x73, 0xdd, + 0x80, 0x5f, 0x6a, 0xc0, 0xdc, 0x99, 0x9d, 0xd2, 0x27, 0x12, 0xcb, 0x39, 0xa9, 0x25, 0xa3, 0xd0, + 0x0d, 0x7a, 0xa6, 0xb9, 0x33, 0xbb, 0xa5, 0x4f, 0x25, 0xf6, 0x40, 0xa1, 0x6b, 0x9e, 0x11, 0xd7, + 0x33, 0xcd, 0x9d, 0xd9, 0x2d, 0x7d, 0x95, 0x65, 0x94, 0xb2, 0x5e, 0x15, 0x8d, 0xa0, 0x2f, 0x98, + 0x3f, 0xbb, 0x5b, 0xfa, 0x9a, 0x84, 0x8f, 0x15, 0xb2, 0xae, 0x7b, 0xeb, 0xe2, 0x79, 0xa6, 0xf9, + 0xb3, 0xbb, 0xa5, 0xaf, 0x4b, 0xf8, 0xa4, 0x21, 0xeb, 0xeb, 0x01, 0x33, 0xc1, 0xd1, 0x9c, 0xee, + 0x96, 0xbe, 0x21, 0xe1, 0x2b, 0x83, 0xac, 0xd7, 0x3c, 0x33, 0xbb, 0x53, 0xa3, 0x39, 0xdd, 0x2d, + 0x7d, 0x13, 0x6f, 0xf1, 0x75, 0x59, 0xbf, 0x13, 0x30, 0x83, 0x9e, 0xa9, 0xf8, 0x0a, 0x6e, 0xe9, + 0x5b, 0x12, 0x3e, 0x06, 0xc9, 0xfa, 0x5d, 0xc3, 0xed, 0xdd, 0xf7, 0x4c, 0xc5, 0x57, 0x70, 0x4b, + 0x9f, 0x49, 0xf8, 0x66, 0x24, 0xeb, 0xf7, 0x82, 0x86, 0xd0, 0x33, 0x29, 0xaf, 0xe2, 0x96, 0xbe, + 0x4d, 0x2d, 0x15, 0xeb, 0xf2, 0xfa, 0xaa, 0xe1, 0x0e, 0x40, 0xf0, 0x4c, 0xca, 0xab, 0xb8, 0xa5, + 0xef, 0x50, 0x53, 0x4a, 0x5d, 0x5e, 0x5f, 0x0b, 0x99, 0xaa, 0xe9, 0xf5, 0x47, 0x50, 0x38, 0xab, + 0x5b, 0xfa, 0xae, 0xf8, 0x16, 0x97, 0xef, 0x0a, 0xbe, 0x69, 0x47, 0xd8, 0xb3, 0x53, 0x1d, 0xd3, + 0xf7, 0x30, 0xc7, 0xa9, 0xcf, 0x3d, 0x61, 0xef, 0x55, 0x8c, 0xe0, 0x6f, 0x1f, 0x73, 0x53, 0x5b, + 0xfe, 0xf9, 0x38, 0xd5, 0x47, 0x7d, 0x5f, 0xc2, 0x47, 0xad, 0x02, 0x37, 0x88, 0x78, 0xef, 0xa4, + 0x30, 0x87, 0xf5, 0xa1, 0x3f, 0xcb, 0xd3, 0xbc, 0xd5, 0x0f, 0xa4, 0x57, 0x71, 0x57, 0xf5, 0x44, + 0x6b, 0xbb, 0xe1, 0x2d, 0x06, 0xd6, 0xbc, 0x0d, 0xc9, 0x63, 0x6d, 0x75, 0x4d, 0xbc, 0x92, 0x89, + 0x6f, 0xb9, 0xcc, 0x49, 0xe5, 0xb5, 0xa2, 0xf0, 0xdc, 0x3d, 0x1c, 0x39, 0x27, 0x06, 0xb2, 0x38, + 0x5b, 0x8b, 0x64, 0x7f, 0x12, 0xc3, 0xd6, 0x38, 0xbb, 0x1a, 0xc9, 0xfe, 0x34, 0x86, 0x5d, 0xe5, + 0x6c, 0x3d, 0x92, 0xfd, 0xd5, 0x18, 0xb6, 0xce, 0xd9, 0xeb, 0x91, 0xec, 0xaf, 0xc5, 0xb0, 0xd7, + 0x39, 0xbb, 0x16, 0xc9, 0xfe, 0x7a, 0x0c, 0xbb, 0xc6, 0xd9, 0x77, 0x22, 0xd9, 0xdf, 0x88, 0x61, + 0xdf, 0xe1, 0xec, 0xbb, 0x91, 0xec, 0x6f, 0xc6, 0xb0, 0xef, 0x72, 0xf6, 0xbd, 0x48, 0xf6, 0xb7, + 0x62, 0xd8, 0xf7, 0x18, 0x7b, 0x6d, 0x35, 0x92, 0xfd, 0x59, 0x34, 0x7b, 0x6d, 0x95, 0xb3, 0xa3, + 0xb5, 0xf6, 0xed, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x13, 0xc3, 0xe6, 0x5a, 0x5b, + 0x8b, 0xd6, 0xda, 0x77, 0x63, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x5e, 0x0c, 0x9b, 0x6b, + 0x6d, 0x2d, 0x5a, 0x6b, 0xdf, 0x8f, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x07, 0x31, 0x6c, + 0xae, 0xb5, 0xb5, 0x68, 0xad, 0xfd, 0x51, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x7f, 0x1c, + 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0x9f, 0xc4, 0xb0, 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd, + 0x69, 0x34, 0x5b, 0xe3, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x67, 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a, + 0x6b, 0x7f, 0x1e, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0xc3, 0x18, 0x36, 0xd7, 0x9a, 0x16, + 0xad, 0xb5, 0xbf, 0x88, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xcb, 0x18, 0x36, 0xd7, 0x9a, + 0x16, 0xad, 0xb5, 0xbf, 0x8a, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xeb, 0x18, 0x36, 0xd7, + 0x9a, 0x16, 0xad, 0xb5, 0xbf, 0x89, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xdb, 0x18, 0x36, + 0xd7, 0x5a, 0x35, 0x5a, 0x6b, 0x7f, 0x17, 0xcd, 0xae, 0x72, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xf7, + 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x21, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, + 0x3f, 0xc6, 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0x51, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad, + 0xb5, 0x7f, 0x8a, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xcf, 0x31, 0x6c, 0xae, 0xb5, 0x6a, + 0xb4, 0xd6, 0xfe, 0x25, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc6, 0xb0, 0xb9, 0xd6, + 0xaa, 0xd1, 0x5a, 0xfb, 0xb7, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0x7f, 0x8f, 0x66, 0xeb, + 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x23, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0x3f, 0x63, + 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x2b, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0xbf, + 0x63, 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x27, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, + 0xc7, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0x3f, 0x89, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a, + 0xfb, 0xdf, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0xff, 0x8b, 0x61, 0x73, 0xad, 0xad, 0x47, + 0x6b, 0xed, 0xff, 0xa3, 0xd9, 0xeb, 0xab, 0x3f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x00, 0xcd, + 0x32, 0x57, 0x39, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.proto b/vendor/github.com/golang/protobuf/proto/testdata/test.proto new file mode 100644 index 0000000..70e3cfc --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/test.proto @@ -0,0 +1,548 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing a group containing a required field. +message GoTestRequiredGroupField { + required group Group = 1 { + required int32 Field = 2; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; + + extensions 100 to max; +} + +message RequiredInnerMessage { + required InnerMessage leo_finally_won_an_oscar = 1; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + optional RequiredInnerMessage we_must_go_deeper = 13; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message ComplexExtension { + optional int32 first = 1; + optional int32 second = 2; + repeated int32 third = 3; +} + +extend OtherMessage { + optional ComplexExtension complex = 200; + repeated ComplexExtension r_complex = 201; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; + optional bool exact = 2; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} + +message Oneof { + oneof union { + bool F_Bool = 1; + int32 F_Int32 = 2; + int64 F_Int64 = 3; + fixed32 F_Fixed32 = 4; + fixed64 F_Fixed64 = 5; + uint32 F_Uint32 = 6; + uint64 F_Uint64 = 7; + float F_Float = 8; + double F_Double = 9; + string F_String = 10; + bytes F_Bytes = 11; + sint32 F_Sint32 = 12; + sint64 F_Sint64 = 13; + MyMessage.Color F_Enum = 14; + GoTestField F_Message = 15; + group F_Group = 16 { + optional int32 x = 17; + } + int32 F_Largest_Tag = 536870911; + } + + oneof tormato { + int32 value = 100; + } +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + MyMessage.Color col = 9; + Strings msg = 10; + } +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 0000000..965876b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,854 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, ok := extendable(pv.Interface()); ok { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 0000000..5e14513 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,895 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go new file mode 100644 index 0000000..8f7cb4d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser_test.go @@ -0,0 +1,673 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + . "github.com/golang/protobuf/proto/testdata" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation with double quotes + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenation with single quotes + { + in: "count:42 name: 'My name is '\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenations with mixed quotes + { + in: "count:42 name: 'My name is '\n\"elsewhere\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + { + in: "count:42 name: \"My name is \"\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated field with list notation + { + in: `count:42 pet: ["horsey", "bunny"]`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, + }, + + // Missing required field in a required submessage + { + in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, + err: `proto: required field "testdata.InnerMessage.host" not set`, + out: &MyMessage{ + Count: Int32(42), + WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, + }, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Boolean false + { + in: `count:42 inner { host: "example.com" connected: false }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean true + { + in: `count:42 inner { host: "example.com" connected: true }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean 0 + { + in: `count:42 inner { host: "example.com" connected: 0 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean 1 + { + in: `count:42 inner { host: "example.com" connected: 1 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean f + { + in: `count:42 inner { host: "example.com" connected: f }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean t + { + in: `count:42 inner { host: "example.com" connected: t }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean False + { + in: `count:42 inner { host: "example.com" connected: False }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean True + { + in: `count:42 inner { host: "example.com" connected: True }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:,>` + // separating commas are okay + `msg_mapping>` + // no colon after "value" + `msg_mapping:>` + // omitted key + `msg_mapping:` + // omitted value + `byte_mapping:` + + `byte_mapping:<>` // omitted key and value + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + 0: {F: Float64(5.0)}, + 1: nil, + }, + ByteMapping: map[bool][]byte{ + false: nil, + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestOneofParsing(t *testing.T) { + const in = `name:"Shrek"` + m := new(Communique) + want := &Communique{Union: &Communique_Name{"Shrek"}} + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } + + const inOverwrite = `name:"Shrek" number:42` + m = new(Communique) + testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" + if err := UnmarshalText(inOverwrite, m); err == nil { + t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) + } else if err.Error() != testErr { + t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", + err.Error(), testErr) + } + +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go new file mode 100644 index 0000000..3eabaca --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_test.go @@ -0,0 +1,474 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func TestTextOneof(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&pb.Communique{}, ``}, + // scalar field + {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`}, + // message field + {&pb.Communique{Union: &pb.Communique_Msg{ + &pb.Strings{StringField: proto.String("why hello!")}, + }}, `msg:`}, + // bad oneof (should not panic) + {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`}, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + &pb.MessageList_Message{ + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; map format is the same as a repeated struct, + // and they are sorted by key (numerically for numeric keys). + { + &pb.MessageWithMap{NameMapping: map[int32]string{ + -1: "Negatory", + 7: "Lucky", + 1234: "Feist", + 6345789: "Otis", + }}, + `name_mapping: ` + + `name_mapping: ` + + `name_mapping: ` + + `name_mapping:`, + }, + // map with nil value; not well-defined, but we shouldn't crash + { + &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, + `msg_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile new file mode 100644 index 0000000..a42cc37 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile @@ -0,0 +1,33 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +test: + cd testdata && make test diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile new file mode 100644 index 0000000..f706871 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile @@ -0,0 +1,37 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/descriptor.proto +regenerate: + @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . + protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 0000000..c6a91bc --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2215 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, + 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, + 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, + 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, + 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, + 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, + 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, + 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, + 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, + 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, + 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, + 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, + 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, + 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, + 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, + 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, + 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, + 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, + 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, + 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, + 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, + 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, + 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, + 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, + 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, + 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, + 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, + 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, + 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, + 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, + 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, + 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, + 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, + 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, + 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, + 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, + 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, + 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, + 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, + 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, + 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, + 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, + 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, + 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, + 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, + 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, + 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, + 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, + 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, + 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, + 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, + 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, + 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, + 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, + 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, + 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, + 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, + 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, + 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, + 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, + 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, + 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, + 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, + 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, + 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, + 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, + 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, + 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, + 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, + 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, + 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, + 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, + 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, + 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, + 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, + 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, + 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, + 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, + 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, + 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, + 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, + 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, + 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, + 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, + 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, + 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, + 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, + 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, + 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, + 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, + 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, + 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, + 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, + 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, + 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, + 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, + 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, + 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, + 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, + 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, + 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, + 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, + 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, + 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, + 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, + 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, + 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, + 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, + 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, + 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, + 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, + 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, + 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, + 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, + 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, + 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, + 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, + 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, + 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, + 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, + 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, + 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, + 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, + 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, + 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, + 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, + 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, + 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, + 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, + 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, + 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, + 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, + 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, + 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, + 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, + 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, + 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, + 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, + 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, + 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, + 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, + 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, + 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, + 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, + 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, + 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, + 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, + 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, + 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, + 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, + 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, + 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, + 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, + 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, + 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, + 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, + 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 0000000..4d4fb37 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,849 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go new file mode 100644 index 0000000..0d6055d --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go @@ -0,0 +1,51 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + A plugin for the Google protocol buffer compiler to generate Go code. + Run it by building this program and putting it in your path with the name + protoc-gen-go + That word 'go' at the end becomes part of the option string set for the + protocol compiler, so once the protocol compiler (protoc) is installed + you can run + protoc --go_out=output_directory input_directory/file.proto + to generate Go bindings for the protocol defined by file.proto. + With that input, the output will be written to + output_directory/file.pb.go + + The generated code is documented in the package comment for + the library. + + See the README and documentation for protocol buffers to learn more: + https://developers.google.com/protocol-buffers/ + +*/ +package documentation diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile new file mode 100644 index 0000000..b5715c3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(GOROOT)/src/Make.inc + +TARG=github.com/golang/protobuf/compiler/generator +GOFILES=\ + generator.go\ + +DEPS=../descriptor ../plugin ../../proto + +include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go new file mode 100644 index 0000000..60d5246 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -0,0 +1,2866 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + plugin "github.com/golang/protobuf/protoc-gen-go/plugin" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// proto package is introduced; the generated code references +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 2 + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +var plugins []Plugin + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *descriptor.FileDescriptorProto // File this object comes from. +} + +// PackageName is name in the package clause in the generated file. +func (c *common) PackageName() string { return uniquePackageOf(c.file) } + +func (c *common) File() *descriptor.FileDescriptorProto { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + enums []*EnumDescriptor // Inner enums, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(*e.Name) + "_" + } + typeName := e.TypeName() + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + index int // The index of this file in the list of files to generate code for + + proto3 bool // whether to generate proto3 code for this file +} + +// PackageName is the package name we'll use in the generated code to refer to this file. +func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) } + +// VarName is the variable name we'll use in the generated code to refer +// to the compressed bytes of this descriptor. It is not exported, so +// it is only valid inside the generated package. +func (d *FileDescriptor) VarName() string { return fmt.Sprintf("fileDescriptor%d", d.index) } + +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) { + pkg = d.GetOptions().GetGoPackage() + if pkg == "" { + return + } + ok = true + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(pkg, "/") + if slash < 0 { + return + } + impPath, pkg = pkg, pkg[slash+1:] + // A semicolon-delimited suffix overrides the package name. + sc := strings.IndexByte(impPath, ';') + if sc < 0 { + return + } + impPath, pkg = impPath[:sc], impPath[sc+1:] + return +} + +// goPackageName returns the Go package name to use in the +// generated Go file. The result explicit reports whether the name +// came from an option go_package statement. If explicit is false, +// the name was derived from the protocol buffer's package statement +// or the input file name. +func (d *FileDescriptor) goPackageName() (name string, explicit bool) { + // Does the file have a "go_package" option? + if _, pkg, ok := d.goPackageOption(); ok { + return pkg, true + } + + // Does the file have a package clause? + if pkg := d.GetPackage(); pkg != "" { + return pkg, false + } + // Use the file base name. + return baseName(d.GetName()), false +} + +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName() string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(impPath, name) + return name + } + + return name +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, pkg string) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + hasOneof bool + getters []getterSymbol +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ contains a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) { + remoteSym := pkg + "." + ms.sym + + g.P("type ", ms.sym, " ", remoteSym) + g.P("func (m *", ms.sym, ") Reset() { (*", remoteSym, ")(m).Reset() }") + g.P("func (m *", ms.sym, ") String() string { return (*", remoteSym, ")(m).String() }") + g.P("func (*", ms.sym, ") ProtoMessage() {}") + if ms.hasExtensions { + g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ", + "{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }") + if ms.isMessageSet { + g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ", + "{ return (*", remoteSym, ")(m).Marshal() }") + g.P("func (m *", ms.sym, ") Unmarshal(buf []byte) error ", + "{ return (*", remoteSym, ")(m).Unmarshal(buf) }") + } + } + if ms.hasOneof { + // Oneofs and public imports do not mix well. + // We can make them work okay for the binary format, + // but they're going to break weirdly for text/JSON. + enc := "_" + ms.sym + "_OneofMarshaler" + dec := "_" + ms.sym + "_OneofUnmarshaler" + size := "_" + ms.sym + "_OneofSizer" + encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" + decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" + sizeSig := "(msg " + g.Pkg["proto"] + ".Message) int" + g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") + g.P("return ", enc, ", ", dec, ", ", size, ", nil") + g.P("}") + + g.P("func ", enc, encSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("enc, _, _, _ := m0.XXX_OneofFuncs()") + g.P("return enc(m0, b)") + g.P("}") + + g.P("func ", dec, decSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("_, dec, _, _ := m0.XXX_OneofFuncs()") + g.P("return dec(m0, tag, wire, b)") + g.P("}") + + g.P("func ", size, sizeSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("_, _, size, _ := m0.XXX_OneofFuncs()") + g.P("return size(m0)") + g.P("}") + } + for _, get := range ms.getters { + + if get.typeName != "" { + g.RecordTypeUse(get.typeName) + } + typ := get.typ + val := "(*" + remoteSym + ")(m)." + get.name + "()" + if get.genType { + // typ will be "*pkg.T" (message/group) or "pkg.T" (enum) + // or "map[t]*pkg.T" (map to message/enum). + // The first two of those might have a "[]" prefix if it is repeated. + // Drop any package qualifier since we have hoisted the type into this package. + rep := strings.HasPrefix(typ, "[]") + if rep { + typ = typ[2:] + } + isMap := strings.HasPrefix(typ, "map[") + star := typ[0] == '*' + if !isMap { // map types handled lower down + typ = typ[strings.Index(typ, ".")+1:] + } + if star { + typ = "*" + typ + } + if rep { + // Go does not permit conversion between slice types where both + // element types are named. That means we need to generate a bit + // of code in this situation. + // typ is the element type. + // val is the expression to get the slice from the imported type. + + ctyp := typ // conversion type expression; "Foo" or "(*Foo)" + if star { + ctyp = "(" + typ + ")" + } + + g.P("func (m *", ms.sym, ") ", get.name, "() []", typ, " {") + g.In() + g.P("o := ", val) + g.P("if o == nil {") + g.In() + g.P("return nil") + g.Out() + g.P("}") + g.P("s := make([]", typ, ", len(o))") + g.P("for i, x := range o {") + g.In() + g.P("s[i] = ", ctyp, "(x)") + g.Out() + g.P("}") + g.P("return s") + g.Out() + g.P("}") + continue + } + if isMap { + // Split map[keyTyp]valTyp. + bra, ket := strings.Index(typ, "["), strings.Index(typ, "]") + keyTyp, valTyp := typ[bra+1:ket], typ[ket+1:] + // Drop any package qualifier. + // Only the value type may be foreign. + star := valTyp[0] == '*' + valTyp = valTyp[strings.Index(valTyp, ".")+1:] + if star { + valTyp = "*" + valTyp + } + + typ := "map[" + keyTyp + "]" + valTyp + g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " {") + g.P("o := ", val) + g.P("if o == nil { return nil }") + g.P("s := make(", typ, ", len(o))") + g.P("for k, v := range o {") + g.P("s[k] = (", valTyp, ")(v)") + g.P("}") + g.P("return s") + g.P("}") + continue + } + // Convert imported type into the forwarding type. + val = "(" + typ + ")(" + val + ")" + } + + g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " { return ", val, " }") + } + +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, pkg string) { + s := es.name + g.P("type ", s, " ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") + g.P("func (x ", s, ") String() string { return (", pkg, ".", s, ")(x).String() }") + if !es.proto3 { + g.P("func (x ", s, ") Enum() *", s, "{ return (*", s, ")((", pkg, ".", s, ")(x).Enum()) }") + g.P("func (x *", s, ") UnmarshalJSON(data []byte) error { return (*", pkg, ".", s, ")(x).UnmarshalJSON(data) }") + } +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) { + v := pkg + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + PackageName() string // The name we use in our output (a_b_c), possibly renamed for uniqueness. + TypeName() []string + File() *descriptor.FileDescriptorProto +} + +// Each package name we generate must be unique. The package we're generating +// gets its own name but every other package must have a unique name that does +// not conflict in the code we generate. These names are chosen globally (although +// they don't have to be, it simplifies things to do them globally). +func uniquePackageOf(fd *descriptor.FileDescriptorProto) string { + s, ok := uniquePackageName[fd] + if !ok { + log.Fatal("internal error: no package name defined for " + fd.GetName()) + } + return s +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from .proto file name to import path + + Pkg map[string]string // The names under which we import support packages + + packageName string // What we're calling ourselves. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + usedPackages map[string]bool // Names of packages used in current file. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. + indent string + writeOutput bool +} + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "plugins": + pluginList = v + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins []Plugin + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + pkg := obj.PackageName() + if pkg == g.packageName { + return "" + } + return pkg + "." +} + +// For each input file, the unique package name to use, underscored. +var uniquePackageName = make(map[*descriptor.FileDescriptorProto]string) + +// Package names already registered. Key is the name from the .proto file; +// value is the name that appears in the generated code. +var pkgNamesInUse = make(map[string]bool) + +// Create and remember a guaranteed unique package name for this file descriptor. +// Pkg is the candidate name. If f is nil, it's a builtin package like "proto" and +// has no file descriptor. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + // Convert dots to underscores before finding a unique alias. + pkg = strings.Map(badToUnderscore, pkg) + + for i, orig := 1, pkg; pkgNamesInUse[pkg]; i++ { + // It's a duplicate; must rename. + pkg = orig + strconv.Itoa(i) + } + // Install it. + pkgNamesInUse[pkg] = true + if f != nil { + uniquePackageName[f.FileDescriptorProto] = pkg + } + return pkg +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() string { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + if p == "" { + return "" + } + + p = strings.Map(badToUnderscore, p) + // Identifier must not be keyword: insert _. + if isGoKeyword[p] { + p = "_" + p + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(p); unicode.IsDigit(r) { + p = "_" + p + } + return p +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + // Register the name for this package. It will be the first name + // registered so is guaranteed to be unmodified. + pkg, explicit := g.genFiles[0].goPackageName() + + // Check all files for an explicit go_package option. + for _, f := range g.genFiles { + thisPkg, thisExplicit := f.goPackageName() + if thisExplicit { + if !explicit { + // Let this file's go_package option serve for all input files. + pkg, explicit = thisPkg, true + } else if thisPkg != pkg { + g.Fail("inconsistent package names:", thisPkg, pkg) + } + } + } + + // If we don't have an explicit go_package option but we have an + // import path, use that. + if !explicit { + p := g.defaultGoPackage() + if p != "" { + pkg, explicit = p, true + } + } + + // If there was no go_package and no import path to use, + // double-check that all the inputs have the same implicit + // Go package name. + if !explicit { + for _, f := range g.genFiles { + thisPkg, _ := f.goPackageName() + if thisPkg != pkg { + g.Fail("inconsistent package names:", thisPkg, pkg) + } + } + } + + g.packageName = RegisterUniquePackageName(pkg, g.genFiles[0]) + + // Register the support package names. They might collide with the + // name of a package we import. + g.Pkg = map[string]string{ + "fmt": RegisterUniquePackageName("fmt", nil), + "math": RegisterUniquePackageName("math", nil), + "proto": RegisterUniquePackageName("proto", nil), + } + +AllFiles: + for _, f := range g.allFiles { + for _, genf := range g.genFiles { + if f == genf { + // In this package already. + uniquePackageName[f.FileDescriptorProto] = g.packageName + continue AllFiles + } + } + // The file is a dependency, so we want to ignore its go_package option + // because that is only relevant for its specific generated output. + pkg := f.GetPackage() + if pkg == "" { + pkg = baseName(*f.Name) + } + RegisterUniquePackageName(pkg, f) + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + for _, f := range g.Request.ProtoFile { + // We must wrap the descriptors before we wrap the enums + descs := wrapDescriptors(f) + g.buildNestedDescriptors(descs) + enums := wrapEnumDescriptors(f, descs) + g.buildNestedEnums(descs, enums) + exts := wrapExtensions(f) + fd := &FileDescriptor{ + FileDescriptorProto: f, + desc: descs, + enum: enums, + ext: exts, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + extractComments(fd) + g.allFiles = append(g.allFiles, fd) + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd.FileDescriptorProto, g) + } + + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { + g.Fail("could not find file named", fileName) + } + fd.index = len(g.genFiles) + g.genFiles = append(g.genFiles, fd) + } +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + for _, nest := range descs { + if nest.parent == desc { + desc.nested = append(desc.nested, nest) + } + } + if len(desc.nested) != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { + for _, desc := range descs { + if len(desc.EnumType) != 0 { + for _, enum := range enums { + if enum.parent == desc { + desc.enums = append(desc.enums, enum) + } + } + if len(desc.enums) != len(desc.EnumType) { + g.Fail("internal error: enum nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor { + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *descriptor.FileDescriptorProto, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + if d.GetOptions().GetMapEntry() { + continue + } + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + + // If the file of this object isn't a direct dependency of the current file, + // or in the current file, then this object has been publicly imported into + // a dependency of the current file. + // We should return the ImportedDescriptor object for it instead. + direct := *o.File().Name == *g.file.Name + if !direct { + for _, dep := range g.file.Dependency { + if *g.fileByName(dep).Name == *o.File().Name { + direct = true + break + } + } + } + if !direct { + found := false + Loop: + for _, dep := range g.file.Dependency { + df := g.fileByName(*g.fileByName(dep).Name) + for _, td := range df.imp { + if td.o == o { + // Found it! + o = td + found = true + break Loop + } + } + } + if !found { + log.Printf("protoc-gen-go: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name) + } + } + + return o +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. +func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } + g.WriteString(g.indent) + for _, v := range str { + switch s := v.(type) { + case string: + g.WriteString(s) + case *string: + g.WriteString(*s) + case bool: + fmt.Fprintf(g, "%t", s) + case *bool: + fmt.Fprintf(g, "%t", *s) + case int: + fmt.Fprintf(g, "%d", s) + case *int32: + fmt.Fprintf(g, "%d", *s) + case *int64: + fmt.Fprintf(g, "%d", *s) + case float64: + fmt.Fprintf(g, "%g", s) + case *float64: + fmt.Fprintf(g, "%g", *s) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } + } + g.WriteByte('\n') +} + +// addInitf stores the given statement to be printed inside the file's init function. +// The statement is given as a format specifier and arguments. +func (g *Generator) addInitf(stmt string, a ...interface{}) { + g.init = append(g.init, fmt.Sprintf(stmt, a...)) +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + for _, file := range g.allFiles { + g.Reset() + g.writeOutput = genFileMap[file] + g.generate(file) + if !g.writeOutput { + continue + } + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName()), + Content: proto.String(g.String()), + }) + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// FileOf return the FileDescriptor for this FileDescriptorProto. +func (g *Generator) FileOf(fd *descriptor.FileDescriptorProto) *FileDescriptor { + for _, file := range g.allFiles { + if file.FileDescriptorProto == fd { + return file + } + } + g.Fail("could not find file in table:", fd.GetName()) + return nil +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.file = g.FileOf(file.FileDescriptorProto) + g.usedPackages = make(map[string]bool) + + if g.file.index == 0 { + // For one file in the package, assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + } + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + g.generateFileDescriptor(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + g.Buffer = new(bytes.Buffer) + g.generateHeader() + g.generateImports() + if !g.writeOutput { + return + } + g.Write(rem.Bytes()) + + // Reformat generated code. + fset := token.NewFileSet() + raw := g.Bytes() + ast, err := parser.ParseFile(fset, "", g, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code, + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(raw)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) + } + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + g.P("// source: ", g.file.Name) + g.P() + + name := g.file.PackageName() + + if g.file.index == 0 { + // Generate package docs for the first file in the package. + g.P("/*") + g.P("Package ", name, " is a generated protocol buffer package.") + g.P() + if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { + // not using g.PrintComments because this is a /* */ comment block. + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + line = strings.TrimPrefix(line, " ") + // ensure we don't escape from the block comment + line = strings.Replace(line, "*/", "* /", -1) + g.P(line) + } + g.P() + } + var topMsgs []string + g.P("It is generated from these files:") + for _, f := range g.genFiles { + g.P("\t", f.Name) + for _, msg := range f.desc { + if msg.parent != nil { + continue + } + topMsgs = append(topMsgs, CamelCaseSlice(msg.TypeName())) + } + } + g.P() + g.P("It has these top-level messages:") + for _, msg := range topMsgs { + g.P("\t", msg) + } + g.P("*/") + } + + g.P("package ", name) + g.P() +} + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// It returns an indication of whether any comments were printed. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } + if loc, ok := g.file.comments[path]; ok { + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + g.P("// ", strings.TrimPrefix(line, " ")) + } + return true + } + return false +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + return g.allFilesByName[filename] +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the fmt and math packages. + g.P("import " + g.Pkg["proto"] + " " + strconv.Quote(g.ImportPrefix+"github.com/golang/protobuf/proto")) + g.P("import " + g.Pkg["fmt"] + ` "fmt"`) + g.P("import " + g.Pkg["math"] + ` "math"`) + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + // Do not import our own package. + if fd.PackageName() == g.packageName { + continue + } + filename := fd.goFileName() + // By default, import path is the dirname of the Go filename. + importPath := path.Dir(filename) + if substitution, ok := g.ImportMap[s]; ok { + importPath = substitution + } + importPath = g.ImportPrefix + importPath + // Skip weak imports. + if g.weak(int32(i)) { + g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath)) + continue + } + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + pname := fd.PackageName() + if _, ok := g.usedPackages[pname]; !ok { + pname = "_" + } + g.P("import ", pname, " ", strconv.Quote(importPath)) + } + g.P() + // TODO: may need to worry about uniqueness across plugins + for _, p := range plugins { + p.GenerateImports(g.file) + g.P() + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + g.P("var _ = ", g.Pkg["fmt"], ".Errorf") + g.P("var _ = ", g.Pkg["math"], ".Inf") + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + // Don't generate public import symbols for files that we are generating + // code for, since those symbols will already be in this package. + // We can't simply avoid creating the ImportedDescriptor objects, + // because g.genFiles isn't populated at that stage. + tn := id.TypeName() + sn := tn[len(tn)-1] + df := g.FileOf(id.o.File()) + filename := *df.Name + for _, fd := range g.genFiles { + if *fd.Name == filename { + g.P("// Ignoring public import of ", sn, " from ", filename) + g.P() + return + } + } + g.P("// ", sn, " from public import ", filename) + g.usedPackages[df.PackageName()] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, df.PackageName()) + } + + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + g.PrintComments(enum.path) + g.P("type ", ccTypeName, " int32") + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + g.In() + for i, e := range enum.Value { + g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)) + + name := ccPrefix + *e.Name + g.P(name, " ", ccTypeName, " = ", e.Number) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.Out() + g.P(")") + g.P("var ", ccTypeName, "_name = map[int32]string{") + g.In() + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.Out() + g.P("}") + g.P("var ", ccTypeName, "_value = map[string]int32{") + g.In() + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.Out() + g.P("}") + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.In() + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.Out() + g.P("}") + } + + g.P("func (x ", ccTypeName, ") String() string {") + g.In() + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.Out() + g.P("}") + + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.In() + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.In() + g.P("return err") + g.Out() + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.Out() + g.P("}") + } + + var indexes []string + for m := enum.parent; m != nil; m = m.parent { + // XXX: skip groups? + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + indexes = append(indexes, strconv.Itoa(enum.index)) + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + } + + g.P() +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.PackageName(), because we want to use the + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && isScalar(field)) { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + if json := field.GetJsonName(); json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } + name = ",name=" + name + if message.proto3() { + // We only need the extra tag for []byte fields; + // no need to add noise for the others. + if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES { + name += ",proto3" + } + + } + oneof := "" + if field.OneofIndex != nil { + oneof = ",oneof" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + oneof, + defaultValue)) +} + +func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { + switch typ { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + return false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + return false + case descriptor.FieldDescriptorProto_TYPE_BYTES: + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// TypeNameWithPackage is like TypeName, but always includes the package +// name even if the object is in our own package. +func (g *Generator) TypeNameWithPackage(obj Object) string { + return obj.PackageName() + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + if isRepeated(field) { + typ = "[]" + typ + } else if message != nil && message.proto3() { + return + } else if field.OneofIndex != nil && message != nil { + return + } else if needsStar(*field.Type) { + typ = "*" + typ + } + return +} + +func (g *Generator) RecordTypeUse(t string) { + if obj, ok := g.typeNameToObject[t]; ok { + // Call ObjectNamed to get the true object to record the use. + obj = g.ObjectNamed(t) + g.usedPackages[obj.PackageName()] = true + } +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. Any change to this set is a potential incompatible +// API change because it changes generated field names. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", +} + +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + +// Generate the type and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + fieldNames := make(map[*descriptor.FieldDescriptorProto]string) + fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string) + fieldTypes := make(map[*descriptor.FieldDescriptorProto]string) + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) + + oneofFieldName := make(map[int32]string) // indexed by oneof_index field of FieldDescriptorProto + oneofDisc := make(map[int32]string) // name of discriminator method + oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star + oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer + + g.PrintComments(message.path) + g.P("type ", ccTypeName, " struct {") + g.In() + + // allocNames finds a conflict-free variation of the given strings, + // consistently mutating their suffixes. + // It returns the same number of strings. + allocNames := func(ns ...string) []string { + Loop: + for { + for _, n := range ns { + if usedNames[n] { + for i := range ns { + ns[i] += "_" + } + continue Loop + } + } + for _, n := range ns { + usedNames[n] = true + } + return ns + } + } + + for i, field := range message.Field { + // Allocate the getter and the field at the same time so name + // collisions create field/method consistent names. + // TODO: This allocation occurs based on the order of the fields + // in the proto file, meaning that a change in the field + // ordering can change generated Method/Field names. + base := CamelCase(*field.Name) + ns := allocNames(base, "Get"+base) + fieldName, fieldGetterName := ns[0], ns[1] + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") + + fieldNames[field] = fieldName + fieldGetterNames[field] = fieldGetterName + + oneof := field.OneofIndex != nil + if oneof && oneofFieldName[*field.OneofIndex] == "" { + odp := message.OneofDecl[int(*field.OneofIndex)] + fname := allocNames(CamelCase(odp.GetName()))[0] + + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)) + if com { + g.P("//") + } + g.P("// Types that are valid to be assigned to ", fname, ":") + // Generate the rest of this comment later, + // when we've computed any disambiguation. + oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len() + + dname := "is" + ccTypeName + "_" + fname + oneofFieldName[*field.OneofIndex] = fname + oneofDisc[*field.OneofIndex] = dname + tag := `protobuf_oneof:"` + odp.GetName() + `"` + g.P(fname, " ", dname, " `", tag, "`") + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + // Figure out the Go types and tags for the key and value types. + keyField, valField := d.Field[0], d.Field[1] + keyType, keyWire := g.GoType(d, keyField) + valType, valWire := g.GoType(d, valField) + keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *valField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(valField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.RecordTypeUse(valField.GetTypeName()) + default: + valType = strings.TrimPrefix(valType, "*") + } + + typename = fmt.Sprintf("map[%s]%s", keyType, valType) + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) + } + } + + fieldTypes[field] = typename + + if oneof { + tname := ccTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } + } + if !ok { + tname += "_" + continue + } + break + } + + oneofTypeName[field] = tname + continue + } + + g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) + g.P(fieldName, "\t", typename, "\t`", tag, "`") + g.RecordTypeUse(field.GetTypeName()) + } + if len(message.ExtensionRange) > 0 { + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`") + } + if !message.proto3() { + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + } + g.Out() + g.P("}") + + // Update g.Buffer to list valid oneof types. + // We do this down here, after we've disambiguated the oneof type names. + // We go in reverse order of insertion point to avoid invalidating offsets. + for oi := int32(len(message.OneofDecl)); oi >= 0; oi-- { + ip := oneofInsertPoints[oi] + all := g.Buffer.Bytes() + rem := all[ip:] + g.Buffer = bytes.NewBuffer(all[:ip:ip]) // set cap so we don't scribble on rem + for _, field := range message.Field { + if field.OneofIndex == nil || *field.OneofIndex != oi { + continue + } + g.P("//\t*", oneofTypeName[field]) + } + g.Buffer.Write(rem) + } + + // Reset, String and ProtoMessage methods. + g.P("func (m *", ccTypeName, ") Reset() { *m = ", ccTypeName, "{} }") + g.P("func (m *", ccTypeName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + g.P("func (*", ccTypeName, ") ProtoMessage() {}") + var indexes []string + for m := message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] { + g.P("func (*", ccTypeName, `) XXX_WellKnownType() string { return "`, message.GetName(), `" }`) + } + + // Extension support methods + var hasExtensions, isMessageSet bool + if len(message.ExtensionRange) > 0 { + hasExtensions = true + // message_set_wire_format only makes sense when extensions are defined. + if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() { + isMessageSet = true + g.P() + g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(&m.XXX_InternalExtensions)") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {") + g.In() + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") UnmarshalJSON(buf []byte) error {") + g.In() + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)") + g.Out() + g.P("}") + g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler") + g.P("var _ ", g.Pkg["proto"], ".Marshaler = (*", ccTypeName, ")(nil)") + g.P("var _ ", g.Pkg["proto"], ".Unmarshaler = (*", ccTypeName, ")(nil)") + } + + g.P() + g.P("var extRange_", ccTypeName, " = []", g.Pkg["proto"], ".ExtensionRange{") + g.In() + for _, r := range message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{", r.Start, ", ", end, "},") + } + g.Out() + g.P("}") + g.P("func (*", ccTypeName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.In() + g.P("return extRange_", ccTypeName) + g.Out() + g.P("}") + } + + // Default constants + defNames := make(map[*descriptor.FieldDescriptorProto]string) + for _, field := range message.Field { + def := field.GetDefaultValue() + if def == "" { + continue + } + fieldname := "Default_" + ccTypeName + "_" + CamelCase(*field.Name) + defNames[field] = fieldname + typename, _ := g.GoType(message, field) + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(unescape(def)) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + def = g.DefaultPackageName(obj) + enum.prefix() + def + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() + + // Oneof per-field types, discriminants and getters. + // + // Generate unexported named types for the discriminant interfaces. + // We shouldn't have to do this, but there was (~19 Aug 2015) a compiler/linker bug + // that was triggered by using anonymous interfaces here. + // TODO: Revisit this and consider reverting back to anonymous interfaces. + for oi := range message.OneofDecl { + dname := oneofDisc[int32(oi)] + g.P("type ", dname, " interface { ", dname, "() }") + } + g.P() + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + _, wiretype := g.GoType(message, field) + tag := "protobuf:" + g.goTag(message, field, wiretype) + g.P("type ", oneofTypeName[field], " struct{ ", fieldNames[field], " ", fieldTypes[field], " `", tag, "` }") + g.RecordTypeUse(field.GetTypeName()) + } + g.P() + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + g.P("func (*", oneofTypeName[field], ") ", oneofDisc[*field.OneofIndex], "() {}") + } + g.P() + for oi := range message.OneofDecl { + fname := oneofFieldName[int32(oi)] + g.P("func (m *", ccTypeName, ") Get", fname, "() ", oneofDisc[int32(oi)], " {") + g.P("if m != nil { return m.", fname, " }") + g.P("return nil") + g.P("}") + } + g.P() + + // Field getters + var getters []getterSymbol + for _, field := range message.Field { + oneof := field.OneofIndex != nil + + fname := fieldNames[field] + typename, _ := g.GoType(message, field) + if t, ok := mapFieldTypes[field]; ok { + typename = t + } + mname := fieldGetterNames[field] + star := "" + if needsStar(*field.Type) && typename[0] == '*' { + typename = typename[1:] + star = "*" + } + + // Only export getter symbols for basic types, + // and for messages and enums in the same package. + // Groups are not exported. + // Foreign types can't be hoisted through a public import because + // the importer may not already be importing the defining .proto. + // As an example, imagine we have an import tree like this: + // A.proto -> B.proto -> C.proto + // If A publicly imports B, we need to generate the getters from B in A's output, + // but if one such getter returns something from C then we cannot do that + // because A is not importing C already. + var getter, genType bool + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + getter = false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_ENUM: + // Only export getter if its return type is in this package. + getter = g.ObjectNamed(field.GetTypeName()).PackageName() == message.PackageName() + genType = true + default: + getter = true + } + if getter { + getters = append(getters, getterSymbol{ + name: mname, + typ: typename, + typeName: field.GetTypeName(), + genType: genType, + }) + } + + g.P("func (m *", ccTypeName, ") "+mname+"() "+typename+" {") + g.In() + def, hasDef := defNames[field] + typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typeDefaultIsNil = !hasDef + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE: + typeDefaultIsNil = true + } + if isRepeated(field) { + typeDefaultIsNil = true + } + if typeDefaultIsNil && !oneof { + // A bytes field with no explicit default needs less generated code, + // as does a message or group field, or a repeated field. + g.P("if m != nil {") + g.In() + g.P("return m." + fname) + g.Out() + g.P("}") + g.P("return nil") + g.Out() + g.P("}") + g.P() + continue + } + if !oneof { + if message.proto3() { + g.P("if m != nil {") + } else { + g.P("if m != nil && m." + fname + " != nil {") + } + g.In() + g.P("return " + star + "m." + fname) + g.Out() + g.P("}") + } else { + uname := oneofFieldName[*field.OneofIndex] + tname := oneofTypeName[field] + g.P("if x, ok := m.Get", uname, "().(*", tname, "); ok {") + g.P("return x.", fname) + g.P("}") + } + if hasDef { + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + g.P("return " + def) + } else { + // The default is a []byte var. + // Make a copy when returning it to be safe. + g.P("return append([]byte(nil), ", def, "...)") + } + } else { + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + g.P("return false") + case descriptor.FieldDescriptorProto_TYPE_STRING: + g.P(`return ""`) + case descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // This is only possible for oneof fields. + g.P("return nil") + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // The default default for an enum is the first value in the enum, + // not zero. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + continue + } + if len(enum.Value) == 0 { + g.P("return 0 // empty enum") + } else { + first := enum.Value[0].GetName() + g.P("return ", g.DefaultPackageName(obj)+enum.prefix()+first) + } + default: + g.P("return 0") + } + } + g.Out() + g.P("}") + g.P() + } + + if !message.group { + ms := &messageSymbol{ + sym: ccTypeName, + hasExtensions: hasExtensions, + isMessageSet: isMessageSet, + hasOneof: len(message.OneofDecl) > 0, + getters: getters, + } + g.file.addExport(message, ms) + } + + // Oneof functions + if len(message.OneofDecl) > 0 { + fieldWire := make(map[*descriptor.FieldDescriptorProto]string) + + // method + enc := "_" + ccTypeName + "_OneofMarshaler" + dec := "_" + ccTypeName + "_OneofUnmarshaler" + size := "_" + ccTypeName + "_OneofSizer" + encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" + decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" + sizeSig := "(msg " + g.Pkg["proto"] + ".Message) (n int)" + + g.P("// XXX_OneofFuncs is for the internal use of the proto package.") + g.P("func (*", ccTypeName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") + g.P("return ", enc, ", ", dec, ", ", size, ", []interface{}{") + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + g.P("(*", oneofTypeName[field], ")(nil),") + } + g.P("}") + g.P("}") + g.P() + + // marshaler + g.P("func ", enc, encSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + for oi, odp := range message.OneofDecl { + g.P("// ", odp.GetName()) + fname := oneofFieldName[int32(oi)] + g.P("switch x := m.", fname, ".(type) {") + for _, field := range message.Field { + if field.OneofIndex == nil || int(*field.OneofIndex) != oi { + continue + } + g.P("case *", oneofTypeName[field], ":") + var wire, pre, post string + val := "x." + fieldNames[field] // overridden for TYPE_BOOL + canFail := false // only TYPE_MESSAGE and TYPE_GROUP can fail + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + wire = "WireFixed64" + pre = "b.EncodeFixed64(" + g.Pkg["math"] + ".Float64bits(" + post = "))" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + wire = "WireFixed32" + pre = "b.EncodeFixed32(uint64(" + g.Pkg["math"] + ".Float32bits(" + post = ")))" + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64: + wire = "WireVarint" + pre, post = "b.EncodeVarint(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + wire = "WireVarint" + pre, post = "b.EncodeVarint(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + wire = "WireFixed64" + pre, post = "b.EncodeFixed64(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + wire = "WireFixed32" + pre, post = "b.EncodeFixed32(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + // bool needs special handling. + g.P("t := uint64(0)") + g.P("if ", val, " { t = 1 }") + val = "t" + wire = "WireVarint" + pre, post = "b.EncodeVarint(", ")" + case descriptor.FieldDescriptorProto_TYPE_STRING: + wire = "WireBytes" + pre, post = "b.EncodeStringBytes(", ")" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + wire = "WireStartGroup" + pre, post = "b.Marshal(", ")" + canFail = true + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + wire = "WireBytes" + pre, post = "b.EncodeMessage(", ")" + canFail = true + case descriptor.FieldDescriptorProto_TYPE_BYTES: + wire = "WireBytes" + pre, post = "b.EncodeRawBytes(", ")" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + wire = "WireVarint" + pre, post = "b.EncodeZigzag32(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + wire = "WireVarint" + pre, post = "b.EncodeZigzag64(uint64(", "))" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + fieldWire[field] = wire + g.P("b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") + if !canFail { + g.P(pre, val, post) + } else { + g.P("if err := ", pre, val, post, "; err != nil {") + g.P("return err") + g.P("}") + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + g.P("b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)") + } + } + g.P("case nil:") + g.P("default: return ", g.Pkg["fmt"], `.Errorf("`, ccTypeName, ".", fname, ` has unexpected type %T", x)`) + g.P("}") + } + g.P("return nil") + g.P("}") + g.P() + + // unmarshaler + g.P("func ", dec, decSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + g.P("switch tag {") + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + odp := message.OneofDecl[int(*field.OneofIndex)] + g.P("case ", field.Number, ": // ", odp.GetName(), ".", *field.Name) + g.P("if wire != ", g.Pkg["proto"], ".", fieldWire[field], " {") + g.P("return true, ", g.Pkg["proto"], ".ErrInternalBadWireType") + g.P("}") + lhs := "x, err" // overridden for TYPE_MESSAGE and TYPE_GROUP + var dec, cast, cast2 string + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + dec, cast = "b.DecodeFixed64()", g.Pkg["math"]+".Float64frombits" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + dec, cast, cast2 = "b.DecodeFixed32()", "uint32", g.Pkg["math"]+".Float32frombits" + case descriptor.FieldDescriptorProto_TYPE_INT64: + dec, cast = "b.DecodeVarint()", "int64" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + dec = "b.DecodeVarint()" + case descriptor.FieldDescriptorProto_TYPE_INT32: + dec, cast = "b.DecodeVarint()", "int32" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + dec = "b.DecodeFixed64()" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + dec, cast = "b.DecodeFixed32()", "uint32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + dec = "b.DecodeVarint()" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_STRING: + dec = "b.DecodeStringBytes()" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + g.P("msg := new(", fieldTypes[field][1:], ")") // drop star + lhs = "err" + dec = "b.DecodeGroup(msg)" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.P("msg := new(", fieldTypes[field][1:], ")") // drop star + lhs = "err" + dec = "b.DecodeMessage(msg)" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_BYTES: + dec = "b.DecodeRawBytes(true)" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + dec, cast = "b.DecodeVarint()", "uint32" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + dec, cast = "b.DecodeVarint()", fieldTypes[field] + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + dec, cast = "b.DecodeFixed32()", "int32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + dec, cast = "b.DecodeFixed64()", "int64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + dec, cast = "b.DecodeZigzag32()", "int32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + dec, cast = "b.DecodeZigzag64()", "int64" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + g.P(lhs, " := ", dec) + val := "x" + if cast != "" { + val = cast + "(" + val + ")" + } + if cast2 != "" { + val = cast2 + "(" + val + ")" + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + val += " != 0" + case descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE: + val = "msg" + } + g.P("m.", oneofFieldName[*field.OneofIndex], " = &", oneofTypeName[field], "{", val, "}") + g.P("return true, err") + } + g.P("default: return false, nil") + g.P("}") + g.P("}") + g.P() + + // sizer + g.P("func ", size, sizeSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + for oi, odp := range message.OneofDecl { + g.P("// ", odp.GetName()) + fname := oneofFieldName[int32(oi)] + g.P("switch x := m.", fname, ".(type) {") + for _, field := range message.Field { + if field.OneofIndex == nil || int(*field.OneofIndex) != oi { + continue + } + g.P("case *", oneofTypeName[field], ":") + val := "x." + fieldNames[field] + var wire, varint, fixed string + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + wire = "WireFixed64" + fixed = "8" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + wire = "WireFixed32" + fixed = "4" + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + wire = "WireVarint" + varint = val + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + wire = "WireFixed64" + fixed = "8" + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + wire = "WireFixed32" + fixed = "4" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + wire = "WireVarint" + fixed = "1" + case descriptor.FieldDescriptorProto_TYPE_STRING: + wire = "WireBytes" + fixed = "len(" + val + ")" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_GROUP: + wire = "WireStartGroup" + fixed = g.Pkg["proto"] + ".Size(" + val + ")" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + wire = "WireBytes" + g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") + fixed = "s" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_BYTES: + wire = "WireBytes" + fixed = "len(" + val + ")" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_SINT32: + wire = "WireVarint" + varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + wire = "WireVarint" + varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") + if varint != "" { + g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))") + } + if fixed != "" { + g.P("n += ", fixed) + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)") + } + } + g.P("case nil:") + g.P("default:") + g.P("panic(", g.Pkg["fmt"], ".Sprintf(\"proto: unexpected type %T in oneof\", x))") + g.P("}") + } + g.P("return n") + g.P("}") + g.P() + } + + for _, ext := range message.ext { + g.generateExtension(ext) + } + + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName) +} + +var escapeChars = [256]byte{ + 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', +} + +// unescape reverses the "C" escaping that protoc does for default values of bytes fields. +// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape +// sequences are conveyed, unmodified, into the decoded result. +func unescape(s string) string { + // NB: Sadly, we can't use strconv.Unquote because protoc will escape both + // single and double quotes, but strconv.Unquote only allows one or the + // other (based on actual surrounding quotes of its input argument). + + var out []byte + for len(s) > 0 { + // regular character, or too short to be valid escape + if s[0] != '\\' || len(s) < 2 { + out = append(out, s[0]) + s = s[1:] + } else if c := escapeChars[s[1]]; c != 0 { + // escape sequence + out = append(out, c) + s = s[2:] + } else if s[1] == 'x' || s[1] == 'X' { + // hex escape, e.g. "\x80 + if len(s) < 4 { + // too short to be valid + out = append(out, s[:2]...) + s = s[2:] + continue + } + v, err := strconv.ParseUint(s[2:4], 16, 8) + if err != nil { + out = append(out, s[:4]...) + } else { + out = append(out, byte(v)) + } + s = s[4:] + } else if '0' <= s[1] && s[1] <= '7' { + // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" + // so consume up to 2 more bytes or up to end-of-string + n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(v)) + } + s = s[1+n:] + } else { + // bad escape, just propagate the slash as-is + out = append(out, s[0]) + s = s[1:] + } + } + + return string(out) +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extObj := g.ObjectNamed(*ext.Extendee) + var extDesc *Descriptor + if id, ok := extObj.(*ImportedDescriptor); ok { + // This is extending a publicly imported message. + // We need the underlying type for goTag. + extDesc = id.o.(*Descriptor) + } else { + extDesc = extObj.(*Descriptor) + } + extendedType := "*" + g.TypeName(extObj) // always use the original + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2_bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + mset := false + if extendedType == "*proto2_bridge.MessageSet" && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + mset = true + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.In() + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + g.P(`Filename: "`, g.file.GetName(), `",`) + + g.Out() + g.P("}") + g.P() + + if mset { + // Generate a bit more code to register with message_set.go. + g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName) + } + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + for _, enum := range g.file.enum { + g.generateEnumRegistration(enum) + } + for _, d := range g.file.desc { + for _, ext := range d.ext { + g.generateExtensionRegistration(ext) + } + } + for _, ext := range g.file.ext { + g.generateExtensionRegistration(ext) + } + if len(g.init) == 0 { + return + } + g.P("func init() {") + g.In() + for _, l := range g.init { + g.P(l) + } + g.Out() + g.P("}") + g.init = nil +} + +func (g *Generator) generateFileDescriptor(file *FileDescriptor) { + // Make a copy and trim source_code_info data. + // TODO: Trim this more when we know exactly what we need. + pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) + pb.SourceCodeInfo = nil + + b, err := proto.Marshal(pb) + if err != nil { + g.Fail(err.Error()) + } + + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + + v := file.VarName() + g.P() + g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + g.P("var ", v, " = []byte{") + g.In() + g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.Out() + g.P("}") +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) +} + +func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) { + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// Is this field a scalar numeric type? +func isScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + messageOneofPath = 8 // oneof_decl + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go new file mode 100644 index 0000000..76808f3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go @@ -0,0 +1,114 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2013 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package generator + +import ( + "testing" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +func TestCamelCase(t *testing.T) { + tests := []struct { + in, want string + }{ + {"one", "One"}, + {"one_two", "OneTwo"}, + {"_my_field_name_2", "XMyFieldName_2"}, + {"Something_Capped", "Something_Capped"}, + {"my_Name", "My_Name"}, + {"OneTwo", "OneTwo"}, + {"_", "X"}, + {"_a_", "XA_"}, + } + for _, tc := range tests { + if got := CamelCase(tc.in); got != tc.want { + t.Errorf("CamelCase(%q) = %q, want %q", tc.in, got, tc.want) + } + } +} + +func TestGoPackageOption(t *testing.T) { + tests := []struct { + in string + impPath, pkg string + ok bool + }{ + {"", "", "", false}, + {"foo", "", "foo", true}, + {"github.com/golang/bar", "github.com/golang/bar", "bar", true}, + {"github.com/golang/bar;baz", "github.com/golang/bar", "baz", true}, + } + for _, tc := range tests { + d := &FileDescriptor{ + FileDescriptorProto: &descriptor.FileDescriptorProto{ + Options: &descriptor.FileOptions{ + GoPackage: &tc.in, + }, + }, + } + impPath, pkg, ok := d.goPackageOption() + if impPath != tc.impPath || pkg != tc.pkg || ok != tc.ok { + t.Errorf("go_package = %q => (%q, %q, %t), want (%q, %q, %t)", tc.in, + impPath, pkg, ok, tc.impPath, tc.pkg, tc.ok) + } + } +} + +func TestUnescape(t *testing.T) { + tests := []struct { + in string + out string + }{ + // successful cases, including all kinds of escapes + {"", ""}, + {"foo bar baz frob nitz", "foo bar baz frob nitz"}, + {`\000\001\002\003\004\005\006\007`, string([]byte{0, 1, 2, 3, 4, 5, 6, 7})}, + {`\a\b\f\n\r\t\v\\\?\'\"`, string([]byte{'\a', '\b', '\f', '\n', '\r', '\t', '\v', '\\', '?', '\'', '"'})}, + {`\x10\x20\x30\x40\x50\x60\x70\x80`, string([]byte{16, 32, 48, 64, 80, 96, 112, 128})}, + // variable length octal escapes + {`\0\018\222\377\3\04\005\6\07`, string([]byte{0, 1, '8', 0222, 255, 3, 4, 5, 6, 7})}, + // malformed escape sequences left as is + {"foo \\g bar", "foo \\g bar"}, + {"foo \\xg0 bar", "foo \\xg0 bar"}, + {"\\", "\\"}, + {"\\x", "\\x"}, + {"\\xf", "\\xf"}, + {"\\777", "\\777"}, // overflows byte + } + for _, tc := range tests { + s := unescape(tc.in) + if s != tc.out { + t.Errorf("doUnescape(%q) = %q; should have been %q", tc.in, s, tc.out) + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go new file mode 100644 index 0000000..2660e47 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -0,0 +1,463 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package grpc outputs gRPC service descriptions in Go code. +// It runs as a plugin for the Go protocol buffer compiler plugin. +// It is linked in to protoc-gen-go. +package grpc + +import ( + "fmt" + "path" + "strconv" + "strings" + + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// the grpc package is introduced; the generated code references +// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 4 + +// Paths for packages used by code generated in this file, +// relative to the import_prefix of the generator.Generator. +const ( + contextPkgPath = "golang.org/x/net/context" + grpcPkgPath = "google.golang.org/grpc" +) + +func init() { + generator.RegisterPlugin(new(grpc)) +} + +// grpc is an implementation of the Go protocol buffer compiler's +// plugin architecture. It generates bindings for gRPC support. +type grpc struct { + gen *generator.Generator +} + +// Name returns the name of this plugin, "grpc". +func (g *grpc) Name() string { + return "grpc" +} + +// The names for packages imported in the generated code. +// They may vary from the final path component of the import path +// if the name is used by other packages. +var ( + contextPkg string + grpcPkg string +) + +// Init initializes the plugin. +func (g *grpc) Init(gen *generator.Generator) { + g.gen = gen + contextPkg = generator.RegisterUniquePackageName("context", nil) + grpcPkg = generator.RegisterUniquePackageName("grpc", nil) +} + +// Given a type name defined in a .proto, return its object. +// Also record that we're using it, to guarantee the associated import. +func (g *grpc) objectNamed(name string) generator.Object { + g.gen.RecordTypeUse(name) + return g.gen.ObjectNamed(name) +} + +// Given a type name defined in a .proto, return its name as we will print it. +func (g *grpc) typeName(str string) string { + return g.gen.TypeName(g.objectNamed(str)) +} + +// P forwards to g.gen.P. +func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } + +// Generate generates code for the services in the given file. +func (g *grpc) Generate(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPkg, ".Context") + g.P("var _ ", grpcPkg, ".ClientConn") + g.P() + + // Assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion) + g.P() + + for i, service := range file.FileDescriptorProto.Service { + g.generateService(file, service, i) + } +} + +// GenerateImports generates the import declaration for this file. +func (g *grpc) GenerateImports(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + g.P("import (") + g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) + g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) + g.P(")") + g.P() +} + +// reservedClientName records whether a client name is reserved on the client side. +var reservedClientName = map[string]bool{ +// TODO: do we need any in gRPC? +} + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } + +// generateService generates all the code for the named service. +func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { + path := fmt.Sprintf("6,%d", index) // 6 means service. + + origServName := service.GetName() + fullServName := origServName + if pkg := file.GetPackage(); pkg != "" { + fullServName = pkg + "." + fullServName + } + servName := generator.CamelCase(origServName) + + g.P() + g.P("// Client API for ", servName, " service") + g.P() + + // Client interface. + g.P("type ", servName, "Client interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateClientSignature(servName, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(servName), "Client struct {") + g.P("cc *", grpcPkg, ".ClientConn") + g.P("}") + g.P() + + // NewClient factory. + g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") + g.P("return &", unexport(servName), "Client{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + serviceDescVar := "_" + servName + "_serviceDesc" + // Client method implementations. + for _, method := range service.Method { + var descExpr string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + // Unary RPC method + descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) + streamIndex++ + } + g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) + } + + g.P("// Server API for ", servName, " service") + g.P() + + // Server interface. + serverType := servName + "Server" + g.P("type ", serverType, " interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateServerSignature(servName, method)) + } + g.P("}") + g.P() + + // Server registration. + g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Method { + hname := g.generateServerMethod(servName, fullServName, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") + g.P("ServiceName: ", strconv.Quote(fullServName), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPkg, ".MethodDesc{") + for i, method := range service.Method { + if method.GetServerStreaming() || method.GetClientStreaming() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPkg, ".StreamDesc{") + for i, method := range service.Method { + if !method.GetServerStreaming() && !method.GetClientStreaming() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.GetServerStreaming() { + g.P("ServerStreams: true,") + } + if method.GetClientStreaming() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.GetName(), "\",") + g.P("}") + g.P() +} + +// generateClientSignature returns the client-side signature for a method. +func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + reqArg := ", in *" + g.typeName(method.GetInputType()) + if method.GetClientStreaming() { + reqArg = "" + } + respName := "*" + g.typeName(method.GetOutputType()) + if method.GetServerStreaming() || method.GetClientStreaming() { + respName = servName + "_" + generator.CamelCase(origMethName) + "Client" + } + return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) +} + +func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { + sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) + methName := generator.CamelCase(method.GetName()) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("out := new(", outType, ")") + // TODO: Pass descExpr to Invoke. + g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(servName) + methName + "Client" + g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.GetClientStreaming() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.GetClientStreaming() + genRecv := method.GetServerStreaming() + genCloseAndRecv := !method.GetServerStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Client interface {") + if genSend { + g.P("Send(*", inType, ") error") + } + if genRecv { + g.P("Recv() (*", outType, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", outType, ", error)") + } + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", inType, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +// generateServerSignature returns the server-side signature for a method. +func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string { + methName := generator.CamelCase(method.GetName()) + hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {") + g.P("in := new(", inType, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }") + g.P("info := &", grpcPkg, ".UnaryServerInfo{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {") + g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(servName) + methName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") + if !method.GetClientStreaming() { + g.P("m := new(", inType, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.GetServerStreaming() + genSendAndClose := !method.GetServerStreaming() + genRecv := method.GetClientStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Server interface {") + if genSend { + g.P("Send(*", outType, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", outType, ") error") + } + if genRecv { + g.P("Recv() (*", inType, ", error)") + } + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") + g.P("m := new(", inType, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go new file mode 100644 index 0000000..532a550 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go @@ -0,0 +1,34 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import _ "github.com/golang/protobuf/protoc-gen-go/grpc" diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go new file mode 100644 index 0000000..8e2486d --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go @@ -0,0 +1,98 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate +// Go code. Run it by building this program and putting it in your path with +// the name +// protoc-gen-go +// That word 'go' at the end becomes part of the option string set for the +// protocol compiler, so once the protocol compiler (protoc) is installed +// you can run +// protoc --go_out=output_directory input_directory/file.proto +// to generate Go bindings for the protocol defined by file.proto. +// With that input, the output will be written to +// output_directory/file.pb.go +// +// The generated code is documented in the package comment for +// the library. +// +// See the README and documentation for protocol buffers to learn more: +// https://developers.google.com/protocol-buffers/ +package main + +import ( + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +func main() { + // Begin by allocating a generator. The request and response structures are stored there + // so we can do error handling easily - the response structure contains the field to + // report failure. + g := generator.New() + + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + g.Error(err, "reading input") + } + + if err := proto.Unmarshal(data, g.Request); err != nil { + g.Error(err, "parsing input proto") + } + + if len(g.Request.FileToGenerate) == 0 { + g.Fail("no files to generate") + } + + g.CommandLineParameters(g.Request.GetParameter()) + + // Create a wrapped version of the Descriptors and EnumDescriptors that + // point to the file that defines them. + g.WrapTypes() + + g.SetPackageNames() + g.BuildTypeNameMap() + + g.GenerateAllFiles() + + // Send back the results. + data, err = proto.Marshal(g.Response) + if err != nil { + g.Error(err, "failed to marshal output proto") + } + _, err = os.Stdout.Write(data) + if err != nil { + g.Error(err, "failed to write output proto") + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile new file mode 100644 index 0000000..bc0463d --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile @@ -0,0 +1,45 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but plugin.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/compiler/plugin.proto +# Also we need to fix an import. +regenerate: + @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cp $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto . + protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor:../../../../.. \ + -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto + +restore: + cp plugin.pb.golden plugin.pb.go + +preserve: + cp plugin.pb.go plugin.pb.golden diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go new file mode 100644 index 0000000..c608a24 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +/* +Package plugin_go is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/compiler/plugin.proto + +It has these top-level messages: + Version + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package plugin_go + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of protocol compiler. +type Version struct { + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil && m.Patch != nil { + return *m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil && m.Suffix != nil { + return *m.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, + 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, + 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, + 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, + 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, + 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, + 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, + 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, + 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, + 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, + 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, + 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, + 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, + 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, + 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, + 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, + 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, + 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, + 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, + 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, + 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, + 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, + 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, + 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, + 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, + 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden new file mode 100644 index 0000000..8953d0f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/compiler/plugin.proto +// DO NOT EDIT! + +package google_protobuf_compiler + +import proto "github.com/golang/protobuf/proto" +import "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference proto and math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +type CodeGeneratorRequest struct { + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } +func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (this *CodeGeneratorRequest) GetParameter() string { + if this != nil && this.Parameter != nil { + return *this.Parameter + } + return "" +} + +type CodeGeneratorResponse struct { + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } +func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (this *CodeGeneratorResponse) GetError() string { + if this != nil && this.Error != nil { + return *this.Error + } + return "" +} + +type CodeGeneratorResponse_File struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } +func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (this *CodeGeneratorResponse_File) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { + if this != nil && this.InsertionPoint != nil { + return *this.InsertionPoint + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetContent() string { + if this != nil && this.Content != nil { + return *this.Content + } + return "" +} + +func init() { +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto new file mode 100644 index 0000000..5b55745 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile new file mode 100644 index 0000000..a0bf9fe --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile @@ -0,0 +1,73 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +all: + @echo run make test + +include ../../Make.protobuf + +test: golden testbuild + +#test: golden testbuild extension_test +# ./extension_test +# @echo PASS + +my_test/test.pb.go: my_test/test.proto + protoc --go_out=Mmulti/multi1.proto=github.com/golang/protobuf/protoc-gen-go/testdata/multi:. $< + +golden: + make -B my_test/test.pb.go + sed -i -e '/return.*fileDescriptor/d' my_test/test.pb.go + sed -i -e '/^var fileDescriptor/,/^}/d' my_test/test.pb.go + sed -i -e '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go + gofmt -w my_test/test.pb.go + diff -w my_test/test.pb.go my_test/test.pb.go.golden + +nuke: clean + +testbuild: regenerate + go test + +regenerate: + # Invoke protoc once to generate three independent .pb.go files in the same package. + protoc --go_out=. multi/multi1.proto multi/multi2.proto multi/multi3.proto + +#extension_test: extension_test.$O +# $(LD) -L. -o $@ $< + +#multi.a: multi3.pb.$O multi2.pb.$O multi1.pb.$O +# rm -f multi.a +# $(QUOTED_GOBIN)/gopack grc $@ $< + +#test.pb.go: imp.pb.go +#multi1.pb.go: multi2.pb.go multi3.pb.go +#main.$O: imp.pb.$O test.pb.$O multi.a +#extension_test.$O: extension_base.pb.$O extension_extra.pb.$O extension_user.pb.$O diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto new file mode 100644 index 0000000..94acfc1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_base; + +message BaseMessage { + optional int32 height = 1; + extensions 4 to 9; + extensions 16 to max; +} + +// Another message that may be extended, using message_set_wire_format. +message OldStyleMessage { + option message_set_wire_format = true; + extensions 100 to max; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto new file mode 100644 index 0000000..fca7f60 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_extra; + +message ExtraMessage { + optional int32 width = 1; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go new file mode 100644 index 0000000..86e9c11 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go @@ -0,0 +1,210 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test that we can use protocol buffers that use extensions. + +package testdata + +/* + +import ( + "bytes" + "regexp" + "testing" + + "github.com/golang/protobuf/proto" + base "extension_base.pb" + user "extension_user.pb" +) + +func TestSingleFieldExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(178), + } + + // Use extension within scope of another type. + vol := proto.Uint32(11) + err := proto.SetExtension(bm, user.E_LoudMessage_Volume, vol) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { + t.Fatal("Decoded message didn't contain extension.") + } + vol_out, err := proto.GetExtension(bm_new, user.E_LoudMessage_Volume) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if v := vol_out.(*uint32); *v != *vol { + t.Errorf("vol_out = %v, expected %v", *v, *vol) + } + proto.ClearExtension(bm_new, user.E_LoudMessage_Volume) + if proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { + t.Fatal("Failed clearing extension.") + } +} + +func TestMessageExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(179), + } + + // Use extension that is itself a message. + um := &user.UserMessage{ + Name: proto.String("Dave"), + Rank: proto.String("Major"), + } + err := proto.SetExtension(bm, user.E_LoginMessage_UserMessage, um) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { + t.Fatal("Decoded message didn't contain extension.") + } + um_out, err := proto.GetExtension(bm_new, user.E_LoginMessage_UserMessage) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if n := um_out.(*user.UserMessage).Name; *n != *um.Name { + t.Errorf("um_out.Name = %q, expected %q", *n, *um.Name) + } + if r := um_out.(*user.UserMessage).Rank; *r != *um.Rank { + t.Errorf("um_out.Rank = %q, expected %q", *r, *um.Rank) + } + proto.ClearExtension(bm_new, user.E_LoginMessage_UserMessage) + if proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { + t.Fatal("Failed clearing extension.") + } +} + +func TestTopLevelExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(179), + } + + width := proto.Int32(17) + err := proto.SetExtension(bm, user.E_Width, width) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_Width) { + t.Fatal("Decoded message didn't contain extension.") + } + width_out, err := proto.GetExtension(bm_new, user.E_Width) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if w := width_out.(*int32); *w != *width { + t.Errorf("width_out = %v, expected %v", *w, *width) + } + proto.ClearExtension(bm_new, user.E_Width) + if proto.HasExtension(bm_new, user.E_Width) { + t.Fatal("Failed clearing extension.") + } +} + +func TestMessageSetWireFormat(t *testing.T) { + osm := new(base.OldStyleMessage) + osp := &user.OldStyleParcel{ + Name: proto.String("Dave"), + Height: proto.Int32(178), + } + + err := proto.SetExtension(osm, user.E_OldStyleParcel_MessageSetExtension, osp) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + + buf, err := proto.Marshal(osm) + if err != nil { + t.Fatal("Failed encoding message:", err) + } + + // Data generated from Python implementation. + expected := []byte{ + 11, 16, 209, 15, 26, 9, 10, 4, 68, 97, 118, 101, 16, 178, 1, 12, + } + + if !bytes.Equal(expected, buf) { + t.Errorf("Encoding mismatch.\nwant %+v\n got %+v", expected, buf) + } + + // Check that it is restored correctly. + osm = new(base.OldStyleMessage) + if err := proto.Unmarshal(buf, osm); err != nil { + t.Fatal("Failed decoding message:", err) + } + osp_out, err := proto.GetExtension(osm, user.E_OldStyleParcel_MessageSetExtension) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + osp = osp_out.(*user.OldStyleParcel) + if *osp.Name != "Dave" || *osp.Height != 178 { + t.Errorf("Retrieved extension from decoded message is not correct: %+v", osp) + } +} + +func main() { + // simpler than rigging up gotest + testing.Main(regexp.MatchString, []testing.InternalTest{ + {"TestSingleFieldExtension", TestSingleFieldExtension}, + {"TestMessageExtension", TestMessageExtension}, + {"TestTopLevelExtension", TestTopLevelExtension}, + }, + []testing.InternalBenchmark{}, + []testing.InternalExample{}) +} + +*/ diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto new file mode 100644 index 0000000..ff65873 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "extension_base.proto"; +import "extension_extra.proto"; + +package extension_user; + +message UserMessage { + optional string name = 1; + optional string rank = 2; +} + +// Extend with a message +extend extension_base.BaseMessage { + optional UserMessage user_message = 5; +} + +// Extend with a foreign message +extend extension_base.BaseMessage { + optional extension_extra.ExtraMessage extra_message = 9; +} + +// Extend with some primitive types +extend extension_base.BaseMessage { + optional int32 width = 6; + optional int64 area = 7; +} + +// Extend inside the scope of another type +message LoudMessage { + extend extension_base.BaseMessage { + optional uint32 volume = 8; + } + extensions 100 to max; +} + +// Extend inside the scope of another type, using a message. +message LoginMessage { + extend extension_base.BaseMessage { + optional UserMessage user_message = 16; + } +} + +// Extend with a repeated field +extend extension_base.BaseMessage { + repeated Detail detail = 17; +} + +message Detail { + optional string color = 1; +} + +// An extension of an extension +message Announcement { + optional string words = 1; + extend LoudMessage { + optional Announcement loud_ext = 100; + } +} + +// Something that can be put in a message set. +message OldStyleParcel { + extend extension_base.OldStyleMessage { + optional OldStyleParcel message_set_extension = 2001; + } + + required string name = 1; + optional int32 height = 2; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto new file mode 100644 index 0000000..b8bc41a --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto @@ -0,0 +1,59 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +message SimpleRequest { +} + +message SimpleResponse { +} + +message StreamMsg { +} + +message StreamMsg2 { +} + +service Test { + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // This RPC streams from the server only. + rpc Downstream(SimpleRequest) returns (stream StreamMsg); + + // This RPC streams from the client. + rpc Upstream(stream StreamMsg) returns (SimpleResponse); + + // This one streams in both directions. + rpc Bidi(stream StreamMsg) returns (stream StreamMsg2); +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden new file mode 100644 index 0000000..784a4f8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go. +// source: imp.proto +// DO NOT EDIT! + +package imp + +import proto "github.com/golang/protobuf/proto" +import "math" +import "os" +import imp1 "imp2.pb" + +// Reference proto & math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +// Types from public import imp2.proto +type PubliclyImportedMessage imp1.PubliclyImportedMessage + +func (this *PubliclyImportedMessage) Reset() { (*imp1.PubliclyImportedMessage)(this).Reset() } +func (this *PubliclyImportedMessage) String() string { + return (*imp1.PubliclyImportedMessage)(this).String() +} + +// PubliclyImportedMessage from public import imp.proto + +type ImportedMessage_Owner int32 + +const ( + ImportedMessage_DAVE ImportedMessage_Owner = 1 + ImportedMessage_MIKE ImportedMessage_Owner = 2 +) + +var ImportedMessage_Owner_name = map[int32]string{ + 1: "DAVE", + 2: "MIKE", +} +var ImportedMessage_Owner_value = map[string]int32{ + "DAVE": 1, + "MIKE": 2, +} + +// NewImportedMessage_Owner is deprecated. Use x.Enum() instead. +func NewImportedMessage_Owner(x ImportedMessage_Owner) *ImportedMessage_Owner { + e := ImportedMessage_Owner(x) + return &e +} +func (x ImportedMessage_Owner) Enum() *ImportedMessage_Owner { + p := new(ImportedMessage_Owner) + *p = x + return p +} +func (x ImportedMessage_Owner) String() string { + return proto.EnumName(ImportedMessage_Owner_name, int32(x)) +} + +type ImportedMessage struct { + Field *int64 `protobuf:"varint,1,req,name=field" json:"field,omitempty"` + XXX_extensions map[int32][]byte `json:",omitempty"` + XXX_unrecognized []byte `json:",omitempty"` +} + +func (this *ImportedMessage) Reset() { *this = ImportedMessage{} } +func (this *ImportedMessage) String() string { return proto.CompactTextString(this) } + +var extRange_ImportedMessage = []proto.ExtensionRange{ + proto.ExtensionRange{90, 100}, +} + +func (*ImportedMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ImportedMessage +} +func (this *ImportedMessage) ExtensionMap() map[int32][]byte { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32][]byte) + } + return this.XXX_extensions +} + +type ImportedExtendable struct { + XXX_extensions map[int32][]byte `json:",omitempty"` + XXX_unrecognized []byte `json:",omitempty"` +} + +func (this *ImportedExtendable) Reset() { *this = ImportedExtendable{} } +func (this *ImportedExtendable) String() string { return proto.CompactTextString(this) } + +func (this *ImportedExtendable) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(this.ExtensionMap()) +} +func (this *ImportedExtendable) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, this.ExtensionMap()) +} +// ensure ImportedExtendable satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*ImportedExtendable)(nil) +var _ proto.Unmarshaler = (*ImportedExtendable)(nil) + +var extRange_ImportedExtendable = []proto.ExtensionRange{ + proto.ExtensionRange{100, 536870911}, +} + +func (*ImportedExtendable) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ImportedExtendable +} +func (this *ImportedExtendable) ExtensionMap() map[int32][]byte { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32][]byte) + } + return this.XXX_extensions +} + +func init() { + proto.RegisterEnum("imp.ImportedMessage_Owner", ImportedMessage_Owner_name, ImportedMessage_Owner_value) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto new file mode 100644 index 0000000..156e078 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto @@ -0,0 +1,70 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +import "imp2.proto"; +import "imp3.proto"; + +message ImportedMessage { + required int64 field = 1; + + // The forwarded getters for these fields are fiddly to get right. + optional ImportedMessage2 local_msg = 2; + optional ForeignImportedMessage foreign_msg = 3; // in imp3.proto + optional Owner enum_field = 4; + oneof union { + int32 state = 9; + } + + repeated string name = 5; + repeated Owner boss = 6; + repeated ImportedMessage2 memo = 7; + + map msg_map = 8; + + enum Owner { + DAVE = 1; + MIKE = 2; + } + + extensions 90 to 100; +} + +message ImportedMessage2 { +} + +message ImportedExtendable { + option message_set_wire_format = true; + extensions 100 to max; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto new file mode 100644 index 0000000..3bb0632 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +message PubliclyImportedMessage { + optional int64 field = 1; +} + +enum PubliclyImportedEnum { + GLASSES = 1; + HAIR = 2; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto new file mode 100644 index 0000000..58fc759 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +message ForeignImportedMessage { + optional string tuber = 1; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go new file mode 100644 index 0000000..f9b5ccf --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A simple binary to link together the protocol buffers in this test. + +package testdata + +import ( + "testing" + + mytestpb "./my_test" + multipb "github.com/golang/protobuf/protoc-gen-go/testdata/multi" +) + +func TestLink(t *testing.T) { + _ = &multipb.Multi1{} + _ = &mytestpb.Request{} +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto new file mode 100644 index 0000000..0da6e0a --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto @@ -0,0 +1,44 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "multi/multi2.proto"; +import "multi/multi3.proto"; + +package multitest; + +message Multi1 { + required Multi2 multi2 = 1; + optional Multi2.Color color = 2; + optional Multi3.HatType hat_type = 3; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto new file mode 100644 index 0000000..e6bfc71 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +message Multi2 { + required int32 required_value = 1; + + enum Color { + BLUE = 1; + GREEN = 2; + RED = 3; + }; + optional Color color = 2; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto new file mode 100644 index 0000000..146c255 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +message Multi3 { + enum HatType { + FEDORA = 1; + FEZ = 2; + }; + optional HatType hat_type = 1; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go new file mode 100644 index 0000000..1954e3f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go @@ -0,0 +1,870 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: my_test/test.proto + +/* +Package my_test is a generated protocol buffer package. + +This package holds interesting messages. + +It is generated from these files: + my_test/test.proto + +It has these top-level messages: + Request + Reply + OtherBase + ReplyExtensions + OtherReplyExtensions + OldReply + Communique +*/ +package my_test + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + // This field should not conflict with any getters. + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +func (m *Request) GetGetKey_() string { + if m != nil && m.GetKey_ != nil { + return *m.GetKey_ + } + return "" +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} + +var extRange_Reply = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} + +var extRange_OtherBase = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", + Filename: "my_test/test.proto", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} + +func (m *OldReply) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *OldReply) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*OldReply)(nil) +var _ proto.Unmarshaler = (*OldReply)(nil) + +var extRange_OldReply = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Height + // *Communique_Today + // *Communique_Maybe + // *Communique_Delta_ + // *Communique_Msg + // *Communique_Somegroup + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Height struct { + Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` +} +type Communique_Today struct { + Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` +} +type Communique_Maybe struct { + Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` +} +type Communique_Delta_ struct { + Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` +} +type Communique_Msg struct { + Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` +} +type Communique_Somegroup struct { + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Height) isCommunique_Union() {} +func (*Communique_Today) isCommunique_Union() {} +func (*Communique_Maybe) isCommunique_Union() {} +func (*Communique_Delta_) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} +func (*Communique_Somegroup) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetHeight() float32 { + if x, ok := m.GetUnion().(*Communique_Height); ok { + return x.Height + } + return 0 +} + +func (m *Communique) GetToday() Days { + if x, ok := m.GetUnion().(*Communique_Today); ok { + return x.Today + } + return Days_MONDAY +} + +func (m *Communique) GetMaybe() bool { + if x, ok := m.GetUnion().(*Communique_Maybe); ok { + return x.Maybe + } + return false +} + +func (m *Communique) GetDelta() int32 { + if x, ok := m.GetUnion().(*Communique_Delta_); ok { + return x.Delta + } + return 0 +} + +func (m *Communique) GetMsg() *Reply { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +func (m *Communique) GetSomegroup() *Communique_SomeGroup { + if x, ok := m.GetUnion().(*Communique_Somegroup); ok { + return x.Somegroup + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Height)(nil), + (*Communique_Today)(nil), + (*Communique_Maybe)(nil), + (*Communique_Delta_)(nil), + (*Communique_Msg)(nil), + (*Communique_Somegroup)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Height: + b.EncodeVarint(9<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.Height))) + case *Communique_Today: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Today)) + case *Communique_Maybe: + t := uint64(0) + if x.Maybe { + t = 1 + } + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Communique_Delta_: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.Delta)) + case *Communique_Msg: + b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case *Communique_Somegroup: + b.EncodeVarint(14<<3 | proto.WireStartGroup) + if err := b.Marshal(x.Somegroup); err != nil { + return err + } + b.EncodeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.height + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Communique_Height{math.Float32frombits(uint32(x))} + return true, err + case 10: // union.today + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Today{Days(x)} + return true, err + case 11: // union.maybe + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Maybe{x != 0} + return true, err + case 12: // union.delta + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Communique_Delta_{int32(x)} + return true, err + case 13: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Reply) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + case 14: // union.somegroup + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Communique_SomeGroup) + err := b.DecodeGroup(msg) + m.Union = &Communique_Somegroup{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Height: + n += proto.SizeVarint(9<<3 | proto.WireFixed32) + n += 4 + case *Communique_Today: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Today)) + case *Communique_Maybe: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += 1 + case *Communique_Delta_: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Communique_Somegroup: + n += proto.SizeVarint(14<<3 | proto.WireStartGroup) + n += proto.Size(x.Somegroup) + n += proto.SizeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Communique_SomeGroup struct { + Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Communique_SomeGroup) ProtoMessage() {} + +func (m *Communique_SomeGroup) GetMember() string { + if m != nil && m.Member != nil { + return *m.Member + } + return "" +} + +type Communique_Delta struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } +func (*Communique_Delta) ProtoMessage() {} + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", + Filename: "my_test/test.proto", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", + Filename: "my_test/test.proto", +} + +func init() { + proto.RegisterType((*Request)(nil), "my.test.Request") + proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") + proto.RegisterType((*Reply)(nil), "my.test.Reply") + proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") + proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") + proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") + proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") + proto.RegisterType((*OldReply)(nil), "my.test.OldReply") + proto.RegisterType((*Communique)(nil), "my.test.Communique") + proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") + proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden new file mode 100644 index 0000000..1954e3f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden @@ -0,0 +1,870 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: my_test/test.proto + +/* +Package my_test is a generated protocol buffer package. + +This package holds interesting messages. + +It is generated from these files: + my_test/test.proto + +It has these top-level messages: + Request + Reply + OtherBase + ReplyExtensions + OtherReplyExtensions + OldReply + Communique +*/ +package my_test + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + // This field should not conflict with any getters. + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +func (m *Request) GetGetKey_() string { + if m != nil && m.GetKey_ != nil { + return *m.GetKey_ + } + return "" +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} + +var extRange_Reply = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} + +var extRange_OtherBase = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", + Filename: "my_test/test.proto", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} + +func (m *OldReply) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *OldReply) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*OldReply)(nil) +var _ proto.Unmarshaler = (*OldReply)(nil) + +var extRange_OldReply = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Height + // *Communique_Today + // *Communique_Maybe + // *Communique_Delta_ + // *Communique_Msg + // *Communique_Somegroup + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Height struct { + Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` +} +type Communique_Today struct { + Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` +} +type Communique_Maybe struct { + Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` +} +type Communique_Delta_ struct { + Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` +} +type Communique_Msg struct { + Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` +} +type Communique_Somegroup struct { + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Height) isCommunique_Union() {} +func (*Communique_Today) isCommunique_Union() {} +func (*Communique_Maybe) isCommunique_Union() {} +func (*Communique_Delta_) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} +func (*Communique_Somegroup) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetHeight() float32 { + if x, ok := m.GetUnion().(*Communique_Height); ok { + return x.Height + } + return 0 +} + +func (m *Communique) GetToday() Days { + if x, ok := m.GetUnion().(*Communique_Today); ok { + return x.Today + } + return Days_MONDAY +} + +func (m *Communique) GetMaybe() bool { + if x, ok := m.GetUnion().(*Communique_Maybe); ok { + return x.Maybe + } + return false +} + +func (m *Communique) GetDelta() int32 { + if x, ok := m.GetUnion().(*Communique_Delta_); ok { + return x.Delta + } + return 0 +} + +func (m *Communique) GetMsg() *Reply { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +func (m *Communique) GetSomegroup() *Communique_SomeGroup { + if x, ok := m.GetUnion().(*Communique_Somegroup); ok { + return x.Somegroup + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Height)(nil), + (*Communique_Today)(nil), + (*Communique_Maybe)(nil), + (*Communique_Delta_)(nil), + (*Communique_Msg)(nil), + (*Communique_Somegroup)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Height: + b.EncodeVarint(9<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.Height))) + case *Communique_Today: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Today)) + case *Communique_Maybe: + t := uint64(0) + if x.Maybe { + t = 1 + } + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Communique_Delta_: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.Delta)) + case *Communique_Msg: + b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case *Communique_Somegroup: + b.EncodeVarint(14<<3 | proto.WireStartGroup) + if err := b.Marshal(x.Somegroup); err != nil { + return err + } + b.EncodeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.height + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Communique_Height{math.Float32frombits(uint32(x))} + return true, err + case 10: // union.today + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Today{Days(x)} + return true, err + case 11: // union.maybe + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Maybe{x != 0} + return true, err + case 12: // union.delta + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Communique_Delta_{int32(x)} + return true, err + case 13: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Reply) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + case 14: // union.somegroup + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Communique_SomeGroup) + err := b.DecodeGroup(msg) + m.Union = &Communique_Somegroup{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Height: + n += proto.SizeVarint(9<<3 | proto.WireFixed32) + n += 4 + case *Communique_Today: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Today)) + case *Communique_Maybe: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += 1 + case *Communique_Delta_: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Communique_Somegroup: + n += proto.SizeVarint(14<<3 | proto.WireStartGroup) + n += proto.Size(x.Somegroup) + n += proto.SizeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Communique_SomeGroup struct { + Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Communique_SomeGroup) ProtoMessage() {} + +func (m *Communique_SomeGroup) GetMember() string { + if m != nil && m.Member != nil { + return *m.Member + } + return "" +} + +type Communique_Delta struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } +func (*Communique_Delta) ProtoMessage() {} + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", + Filename: "my_test/test.proto", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", + Filename: "my_test/test.proto", +} + +func init() { + proto.RegisterType((*Request)(nil), "my.test.Request") + proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") + proto.RegisterType((*Reply)(nil), "my.test.Reply") + proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") + proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") + proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") + proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") + proto.RegisterType((*OldReply)(nil), "my.test.OldReply") + proto.RegisterType((*Communique)(nil), "my.test.Communique") + proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") + proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto new file mode 100644 index 0000000..8e70946 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto @@ -0,0 +1,156 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +// This package holds interesting messages. +package my.test; // dotted package name + +//import "imp.proto"; +import "multi/multi1.proto"; // unused import + +enum HatType { + // deliberately skipping 0 + FEDORA = 1; + FEZ = 2; +} + +// This enum represents days of the week. +enum Days { + option allow_alias = true; + + MONDAY = 1; + TUESDAY = 2; + LUNDI = 1; // same value as MONDAY +} + +// This is a message that might be sent somewhere. +message Request { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + repeated int64 key = 1; +// optional imp.ImportedMessage imported_message = 2; + optional Color hue = 3; // no default + optional HatType hat = 4 [default=FEDORA]; +// optional imp.ImportedMessage.Owner owner = 6; + optional float deadline = 7 [default=inf]; + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // These foreign types are in imp2.proto, + // which is publicly imported by imp.proto. +// optional imp.PubliclyImportedMessage pub = 10; +// optional imp.PubliclyImportedEnum pub_enum = 13 [default=HAIR]; + + + // This is a map field. It will generate map[int32]string. + map name_mapping = 14; + // This is a map field whose value type is a message. + map msg_mapping = 15; + + optional int32 reset = 12; + // This field should not conflict with any getters. + optional string get_key = 16; +} + +message Reply { + message Entry { + required int64 key_that_needs_1234camel_CasIng = 1; + optional int64 value = 2 [default=7]; + optional int64 _my_field_name_2 = 3; + enum Game { + FOOTBALL = 1; + TENNIS = 2; + } + } + repeated Entry found = 1; + repeated int32 compact_keys = 2 [packed=true]; + extensions 100 to max; +} + +message OtherBase { + optional string name = 1; + extensions 100 to max; +} + +message ReplyExtensions { + extend Reply { + optional double time = 101; + optional ReplyExtensions carrot = 105; + } + extend OtherBase { + optional ReplyExtensions donut = 101; + } +} + +message OtherReplyExtensions { + optional int32 key = 1; +} + +// top-level extension +extend Reply { + optional string tag = 103; + optional OtherReplyExtensions donut = 106; +// optional imp.ImportedMessage elephant = 107; // extend with message from another file. +} + +message OldReply { + // Extensions will be encoded in MessageSet wire format. + option message_set_wire_format = true; + extensions 100 to max; +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + float height = 9; + Days today = 10; + bool maybe = 11; + sint32 delta = 12; // name will conflict with Delta below + Reply msg = 13; + group SomeGroup = 14 { + optional string member = 15; + } + } + + message Delta {} +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto new file mode 100644 index 0000000..869b9af --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto @@ -0,0 +1,53 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package proto3; + +message Request { + enum Flavour { + SWEET = 0; + SOUR = 1; + UMAMI = 2; + GOPHERLICIOUS = 3; + } + string name = 1; + repeated int64 key = 2; + Flavour taste = 3; + Book book = 4; + repeated int64 unpacked = 5 [packed=false]; +} + +message Book { + string title = 1; + bytes raw_data = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 0000000..b2af97f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,139 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 0000000..f346017 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 0000000..c748667 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,149 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go new file mode 100644 index 0000000..ed675b4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any_test.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/ptypes/any" +) + +func TestMarshalUnmarshal(t *testing.T) { + orig := &any.Any{Value: []byte("test")} + + packed, err := MarshalAny(orig) + if err != nil { + t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err) + } + + unpacked := &any.Any{} + err = UnmarshalAny(packed, unpacked) + if err != nil || !proto.Equal(unpacked, orig) { + t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig) + } +} + +func TestIs(t *testing.T) { + a, err := MarshalAny(&pb.FileDescriptorProto{}) + if err != nil { + t.Fatal(err) + } + if Is(a, &pb.DescriptorProto{}) { + t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") + } + if !Is(a, &pb.FileDescriptorProto{}) { + t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") + } +} + +func TestIsDifferentUrlPrefixes(t *testing.T) { + m := &pb.FileDescriptorProto{} + a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)} + if !Is(a, m) { + t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m)) + } +} + +func TestUnmarshalDynamic(t *testing.T) { + want := &pb.FileDescriptorProto{Name: proto.String("foo")} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + var got DynamicAny + if err := UnmarshalAny(a, &got); err != nil { + t.Fatal(err) + } + if !proto.Equal(got.Message, want) { + t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want) + } +} + +func TestEmpty(t *testing.T) { + want := &pb.FileDescriptorProto{} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + got, err := Empty(a) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, want) { + t.Errorf("unequal empty message, got %q, want %q", got, want) + } + + // that's a valid type_url for a message which shouldn't be linked into this + // test binary. We want an error. + a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask" + if _, err := Empty(a); err == nil { + t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 0000000..c0d595d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 0000000..65cb0f8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 0000000..b2410a0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,144 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 0000000..975fce4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration_test.go b/vendor/github.com/golang/protobuf/ptypes/duration_test.go new file mode 100644 index 0000000..e00491a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration_test.go @@ -0,0 +1,121 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + minGoSeconds = math.MinInt64 / int64(1e9) + maxGoSeconds = math.MaxInt64 / int64(1e9) +) + +var durationTests = []struct { + proto *durpb.Duration + isValid bool + inRange bool + dur time.Duration +}{ + // The zero duration. + {&durpb.Duration{Seconds: 0, Nanos: 0}, true, true, 0}, + // Some ordinary non-zero durations. + {&durpb.Duration{Seconds: 100, Nanos: 0}, true, true, 100 * time.Second}, + {&durpb.Duration{Seconds: -100, Nanos: 0}, true, true, -100 * time.Second}, + {&durpb.Duration{Seconds: 100, Nanos: 987}, true, true, 100*time.Second + 987}, + {&durpb.Duration{Seconds: -100, Nanos: -987}, true, true, -(100*time.Second + 987)}, + // The largest duration representable in Go. + {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64}, + // The smallest duration representable in Go. + {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64}, + {nil, false, false, 0}, + {&durpb.Duration{Seconds: -100, Nanos: 987}, false, false, 0}, + {&durpb.Duration{Seconds: 100, Nanos: -987}, false, false, 0}, + {&durpb.Duration{Seconds: math.MinInt64, Nanos: 0}, false, false, 0}, + {&durpb.Duration{Seconds: math.MaxInt64, Nanos: 0}, false, false, 0}, + // The largest valid duration. + {&durpb.Duration{Seconds: maxSeconds, Nanos: 1e9 - 1}, true, false, 0}, + // The smallest valid duration. + {&durpb.Duration{Seconds: minSeconds, Nanos: -(1e9 - 1)}, true, false, 0}, + // The smallest invalid duration above the valid range. + {&durpb.Duration{Seconds: maxSeconds + 1, Nanos: 0}, false, false, 0}, + // The largest invalid duration below the valid range. + {&durpb.Duration{Seconds: minSeconds - 1, Nanos: -(1e9 - 1)}, false, false, 0}, + // One nanosecond past the largest duration representable in Go. + {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0}, + // One nanosecond past the smallest duration representable in Go. + {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0}, + // One second past the largest duration representable in Go. + {&durpb.Duration{Seconds: maxGoSeconds + 1, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0}, + // One second past the smallest duration representable in Go. + {&durpb.Duration{Seconds: minGoSeconds - 1, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0}, +} + +func TestValidateDuration(t *testing.T) { + for _, test := range durationTests { + err := validateDuration(test.proto) + gotValid := (err == nil) + if gotValid != test.isValid { + t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid) + } + } +} + +func TestDuration(t *testing.T) { + for _, test := range durationTests { + got, err := Duration(test.proto) + gotOK := (err == nil) + wantOK := test.isValid && test.inRange + if gotOK != wantOK { + t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK) + } + if err == nil && got != test.dur { + t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur) + } + } +} + +func TestDurationProto(t *testing.T) { + for _, test := range durationTests { + if test.isValid && test.inRange { + got := DurationProto(test.dur) + if !proto.Equal(got, test.proto) { + t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 0000000..e877b72 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,66 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +/* +Package empty is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/empty.proto + +It has these top-level messages: + Empty +*/ +package empty + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto new file mode 100644 index 0000000..03cacd2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/empty"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh new file mode 100755 index 0000000..b50a941 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh @@ -0,0 +1,43 @@ +#!/bin/bash -e +# +# This script fetches and rebuilds the "well-known types" protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +PKG=github.com/golang/protobuf/ptypes +UPSTREAM=https://github.com/google/protobuf +UPSTREAM_SUBDIR=src/google/protobuf +PROTO_FILES=(any duration empty struct timestamp wrappers) + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd "$base" + +echo 1>&2 "fetching latest protos... " +git clone -q $UPSTREAM $tmpdir + +for file in ${PROTO_FILES[@]}; do + echo 1>&2 "* $file" + protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die + cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file +done + +echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 0000000..4cfe608 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,380 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +/* +Package structpb is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/struct.proto + +It has these top-level messages: + Struct + Value + ListValue +*/ +package structpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 0000000..7d7808e --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 0000000..47f10db --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,134 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 0000000..e23e4a2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 0000000..b7cbd17 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go new file mode 100644 index 0000000..6e3c969 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go @@ -0,0 +1,153 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +var tests = []struct { + ts *tspb.Timestamp + valid bool + t time.Time +}{ + // The timestamp representing the Unix epoch date. + {&tspb.Timestamp{Seconds: 0, Nanos: 0}, true, utcDate(1970, 1, 1)}, + // The smallest representable timestamp. + {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: math.MinInt32}, false, + time.Unix(math.MinInt64, math.MinInt32).UTC()}, + // The smallest representable timestamp with non-negative nanos. + {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: 0}, false, time.Unix(math.MinInt64, 0).UTC()}, + // The earliest valid timestamp. + {&tspb.Timestamp{Seconds: minValidSeconds, Nanos: 0}, true, utcDate(1, 1, 1)}, + //"0001-01-01T00:00:00Z"}, + // The largest representable timestamp. + {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: math.MaxInt32}, false, + time.Unix(math.MaxInt64, math.MaxInt32).UTC()}, + // The largest representable timestamp with nanos in range. + {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: 1e9 - 1}, false, + time.Unix(math.MaxInt64, 1e9-1).UTC()}, + // The largest valid timestamp. + {&tspb.Timestamp{Seconds: maxValidSeconds - 1, Nanos: 1e9 - 1}, true, + time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, + // The smallest invalid timestamp that is larger than the valid range. + {&tspb.Timestamp{Seconds: maxValidSeconds, Nanos: 0}, false, time.Unix(maxValidSeconds, 0).UTC()}, + // A date before the epoch. + {&tspb.Timestamp{Seconds: -281836800, Nanos: 0}, true, utcDate(1961, 1, 26)}, + // A date after the epoch. + {&tspb.Timestamp{Seconds: 1296000000, Nanos: 0}, true, utcDate(2011, 1, 26)}, + // A date after the epoch, in the middle of the day. + {&tspb.Timestamp{Seconds: 1296012345, Nanos: 940483}, true, + time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, +} + +func TestValidateTimestamp(t *testing.T) { + for _, s := range tests { + got := validateTimestamp(s.ts) + if (got == nil) != s.valid { + t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid) + } + } +} + +func TestTimestamp(t *testing.T) { + for _, s := range tests { + got, err := Timestamp(s.ts) + if (err == nil) != s.valid { + t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid) + } else if s.valid && got != s.t { + t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t) + } + } + // Special case: a nil Timestamp is an error, but returns the 0 Unix time. + got, err := Timestamp(nil) + want := time.Unix(0, 0).UTC() + if got != want { + t.Errorf("Timestamp(nil) = %v, want %v", got, want) + } + if err == nil { + t.Errorf("Timestamp(nil) error = nil, expected error") + } +} + +func TestTimestampProto(t *testing.T) { + for _, s := range tests { + got, err := TimestampProto(s.t) + if (err == nil) != s.valid { + t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid) + } else if s.valid && !proto.Equal(got, s.ts) { + t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts) + } + } + // No corresponding special case here: no time.Time results in a nil Timestamp. +} + +func TestTimestampString(t *testing.T) { + for _, test := range []struct { + ts *tspb.Timestamp + want string + }{ + // Not much testing needed because presumably time.Format is + // well-tested. + {&tspb.Timestamp{Seconds: 0, Nanos: 0}, "1970-01-01T00:00:00Z"}, + {&tspb.Timestamp{Seconds: minValidSeconds - 1, Nanos: 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"}, + } { + got := TimestampString(test.ts) + if got != test.want { + t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want) + } + } +} + +func utcDate(year, month, day int) time.Time { + return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) +} + +func TestTimestampNow(t *testing.T) { + // Bracket the expected time. + before := time.Now() + ts := TimestampNow() + after := time.Now() + + tm, err := Timestamp(ts) + if err != nil { + t.Errorf("between %v and %v\nTimestampNow() = %v\nwhich is invalid (%v)", before, after, ts, err) + } + if tm.Before(before) || tm.After(after) { + t.Errorf("between %v and %v\nTimestamp(TimestampNow()) = %v", before, after, tm) + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 0000000..0ed59bf --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,260 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +/* +Package wrappers is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/wrappers.proto + +It has these top-level messages: + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue +*/ +package wrappers + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 0000000..0194763 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml new file mode 100644 index 0000000..faca4da --- /dev/null +++ b/vendor/github.com/gorilla/context/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: tip + +install: + - go get golang.org/x/tools/cmd/vet + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/vendor/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md new file mode 100644 index 0000000..c60a31b --- /dev/null +++ b/vendor/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go new file mode 100644 index 0000000..81cb128 --- /dev/null +++ b/vendor/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/context/context_test.go b/vendor/github.com/gorilla/context/context_test.go new file mode 100644 index 0000000..9814c50 --- /dev/null +++ b/vendor/github.com/gorilla/context/context_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Set(r, key, value) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go new file mode 100644 index 0000000..73c7400 --- /dev/null +++ b/vendor/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/vendor/github.com/gorilla/securecookie/.travis.yml b/vendor/github.com/gorilla/securecookie/.travis.yml new file mode 100644 index 0000000..6f440f1 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/securecookie/LICENSE b/vendor/github.com/gorilla/securecookie/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/securecookie/README.md b/vendor/github.com/gorilla/securecookie/README.md new file mode 100644 index 0000000..aa7bd1a --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/README.md @@ -0,0 +1,80 @@ +securecookie +============ +[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) [![Build Status](https://travis-ci.org/gorilla/securecookie.png?branch=master)](https://travis-ci.org/gorilla/securecookie) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/securecookie/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/securecookie?badge) + + +securecookie encodes and decodes authenticated and optionally encrypted +cookie values. + +Secure cookies can't be forged, because their values are validated using HMAC. +When encrypted, the content is also inaccessible to malicious eyes. It is still +recommended that sensitive data not be stored in cookies, and that HTTPS be used +to prevent cookie [replay attacks](https://en.wikipedia.org/wiki/Replay_attack). + +## Examples + +To use it, first create a new SecureCookie instance: + +```go +// Hash keys should be at least 32 bytes long +var hashKey = []byte("very-secret") +// Block keys should be 16 bytes (AES-128) or 32 bytes (AES-256) long. +// Shorter keys may weaken the encryption used. +var blockKey = []byte("a-lot-secret") +var s = securecookie.New(hashKey, blockKey) +``` + +The hashKey is required, used to authenticate the cookie value using HMAC. +It is recommended to use a key with 32 or 64 bytes. + +The blockKey is optional, used to encrypt the cookie value -- set it to nil +to not use encryption. If set, the length must correspond to the block size +of the encryption algorithm. For AES, used by default, valid lengths are +16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. + +Strong keys can be created using the convenience function GenerateRandomKey(). + +Once a SecureCookie instance is set, use it to encode a cookie value: + +```go +func SetCookieHandler(w http.ResponseWriter, r *http.Request) { + value := map[string]string{ + "foo": "bar", + } + if encoded, err := s.Encode("cookie-name", value); err == nil { + cookie := &http.Cookie{ + Name: "cookie-name", + Value: encoded, + Path: "/", + Secure: true, + HttpOnly: true, + } + http.SetCookie(w, cookie) + } +} +``` + +Later, use the same SecureCookie instance to decode and validate a cookie +value: + +```go +func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { + if cookie, err := r.Cookie("cookie-name"); err == nil { + value := make(map[string]string) + if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { + fmt.Fprintf(w, "The value of foo is %q", value["foo"]) + } + } +} +``` + +We stored a map[string]string, but secure cookies can hold any value that +can be encoded using `encoding/gob`. To store custom types, they must be +registered first using gob.Register(). For basic types this is not needed; +it works out of the box. An optional JSON encoder that uses `encoding/json` is +available for types compatible with JSON. + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/securecookie/doc.go b/vendor/github.com/gorilla/securecookie/doc.go new file mode 100644 index 0000000..ae89408 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/doc.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package securecookie encodes and decodes authenticated and optionally +encrypted cookie values. + +Secure cookies can't be forged, because their values are validated using HMAC. +When encrypted, the content is also inaccessible to malicious eyes. + +To use it, first create a new SecureCookie instance: + + var hashKey = []byte("very-secret") + var blockKey = []byte("a-lot-secret") + var s = securecookie.New(hashKey, blockKey) + +The hashKey is required, used to authenticate the cookie value using HMAC. +It is recommended to use a key with 32 or 64 bytes. + +The blockKey is optional, used to encrypt the cookie value -- set it to nil +to not use encryption. If set, the length must correspond to the block size +of the encryption algorithm. For AES, used by default, valid lengths are +16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. + +Strong keys can be created using the convenience function GenerateRandomKey(). + +Once a SecureCookie instance is set, use it to encode a cookie value: + + func SetCookieHandler(w http.ResponseWriter, r *http.Request) { + value := map[string]string{ + "foo": "bar", + } + if encoded, err := s.Encode("cookie-name", value); err == nil { + cookie := &http.Cookie{ + Name: "cookie-name", + Value: encoded, + Path: "/", + } + http.SetCookie(w, cookie) + } + } + +Later, use the same SecureCookie instance to decode and validate a cookie +value: + + func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { + if cookie, err := r.Cookie("cookie-name"); err == nil { + value := make(map[string]string) + if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { + fmt.Fprintf(w, "The value of foo is %q", value["foo"]) + } + } + } + +We stored a map[string]string, but secure cookies can hold any value that +can be encoded using encoding/gob. To store custom types, they must be +registered first using gob.Register(). For basic types this is not needed; +it works out of the box. +*/ +package securecookie diff --git a/vendor/github.com/gorilla/securecookie/fuzz.go b/vendor/github.com/gorilla/securecookie/fuzz.go new file mode 100644 index 0000000..e4d0534 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz.go @@ -0,0 +1,25 @@ +// +build gofuzz + +package securecookie + +var hashKey = []byte("very-secret12345") +var blockKey = []byte("a-lot-secret1234") +var s = New(hashKey, blockKey) + +type Cookie struct { + B bool + I int + S string +} + +func Fuzz(data []byte) int { + datas := string(data) + var c Cookie + if err := s.Decode("fuzz", datas, &c); err != nil { + return 0 + } + if _, err := s.Encode("fuzz", c); err != nil { + panic(err) + } + return 1 +} diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/0.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/0.sc new file mode 100644 index 0000000..e42d3c1 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/0.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxjRDhCaG9WdS1IMUVoSVc0N0RRYVVMX0ZJcHlPNU9vZ0tLNnhaSm94U3dWVFRFUzBxZ3FTOEtnMWt5d1JsZnRvVFJSTy1VNEFqUXJfVGp0dVBKWTRpTHp0SjhyUlJWbEpMVlF2ZV9CdU5Vb2d0cEE9fFomN3uC7sVpjIiNqJ7nSmSW0OcB-1nXJndHHUK35Z_o \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/05a79f06cf3f67f726dae68d18a2290f6c9a50c9-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/05a79f06cf3f67f726dae68d18a2290f6c9a50c9-1 new file mode 100644 index 0000000..22ded55 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/05a79f06cf3f67f726dae68d18a2290f6c9a50c9-1 @@ -0,0 +1 @@ +: \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/05aefe7b48db1dcf464048449ac4fa6af2fbc73b-5 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/05aefe7b48db1dcf464048449ac4fa6af2fbc73b-5 new file mode 100644 index 0000000..65c6572 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/05aefe7b48db1dcf464048449ac4fa6af2fbc73b-5 @@ -0,0 +1,3 @@ + + +  \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/1.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/1.sc new file mode 100644 index 0000000..e3ec3af --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/1.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxURmd2SUVTenFSNDBMTU84ZDkxY01Kc1JhVTY2dnFqdjUtOFRQZ3p4N2FiN3ctMWVtSnVtZ1kxVjZkR24wdVVWWG9keHMwZXFXek9XTUs0QVo4dXNkZFdPQXRScUo5RzJLZG8teGVNLXFEMEJwcE1lTk9GeVprdjJ0SlpjRkVfbEZBLS1TNXlmTlFidWZCWkh5TEE2WE5YaERuenZjZTZWczFyV0xONFEyc0hWNjZ6RFBGUlFxaUdBcjlZOF9SMzBTV1ctb25NLWxxUWh5X2hhNk1jd3plaWZ0NFV0cGNRYWpqSjlWSjMzUU9QSHdMUUlYdVVQVHNHOEJsd2tra2h3ODJtWkVkRVhLQ0ZRNUJlZVdva0U3RWRyWk1NaTZjaEhOdkppejNnPXyQsRqNFNx_AMU6do1StnhxPHRr3HHtfu7dVvDF3qiDNQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/10.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/10.sc new file mode 100644 index 0000000..69f8960 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/10.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxROFk0ZzZkLTYwWWx1NlJOVFdRZS1YeEZkVGN3TkI5bXdlRkFtTkFTRXZKck1aSGI4TVJ3Vy1Sc2FVWFI4N3REYU9DYzdLb1JRQjBMemhtNHhaOUtMNnhlQjZhUExUeEVwOEJJXzNVcnhqMzZsc2ZyVDFPdlBrcVAxQjhwTE9JdFF2WG9FTWxqcjFWT1ZGZ20yVVdDckFVUE1QQW1EbURlU3lxcEJYRmh3RllhRkNaQnFDNXNGWGx1bm1vYWd3RFF3VWlxaVgtckhkTWZldktzWkZvcFpHTkxwd1Y1SENqdGFpbE9VOUpRZkhSN2RKcHJ4akhteFE4Z2xodVZRSWRyUk83WHpQbW04dEZFVi04Zkd5VHR2WFdGcUdvQWRlWWs4dnZGVUlNRjF1VT18Ylj4DXWIZQD6Tmf1UTA54iJj49qE2sUFzr69lsBdJXo= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/11.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/11.sc new file mode 100644 index 0000000..cfadb16 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/11.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxvY1g0WGhRNVNaRHVLX1lPODEyOGlRWXFIb2diNFd1Tmt0YWF0TWcyeXhKb1pDZE55Wk50MFlodmYzUWwtRnp1bkVHZ3ZoZlFWZnc4WGluVWNyaDhPTlRlaGlMWTNJcDV5dFJxSFJWQWIyYjM4V0RxaDVJQy1nREtiaGZHMUhQNzUzZ014SWtVU3p4QTdlV3NBclVzSHYtQ05CNVZrNGszb0VRbVphZTB5RG5QellrbjlpeTNZX0RXZGtQaXhZTnkzQ0ZNdXhCZlNLNENqTEZGSHkxNzBBV1ZpZHV0c0FGU1FSSDc5QXdIaDdJZlFrUFVUVGdOZmEyeGwxYTBUc1RUNzRQTC1uWmZlWUp1eV93dkQzMnZkZjdRMUE9PXyAdVeo7mS4I2iCVT_RBaAtFggrM1ATqiz1Qfl_yGMLdg== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/12.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/12.sc new file mode 100644 index 0000000..067aa57 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/12.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxabVUwLTZRMUc2VXpoN2tLaC1uSlRUS0pUelplUFB5UHlrU194QUJfem1zZ0F6Q2ZxbU04dWhOWTB2RE42MUFIeDF0U3dVdzZtaHNXOFFTbmZxd1FIaEtTUk1HdnNZTGRaNHBLWE5JMkhlWDg0UGwwcnphZVFGc1Z5YVRISDB6bjZmZHNJaWJIbmdfR3ZKSTJCZjdVNlZaaVlPaFRFNURMLWlPNXFrWUYyaUc0U1FGQzlGS0FwS2NVcks4ZUVHUWc3QVo0OFA5ZzczYUhzYjY5cGZSbi1YLVI2RENnalZRZl96V3MxdUdTVkE9PXw8F0WJVogg9iGVRsENzrQyPuDs2ZiPEoax9PhGEuae8Q== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/13.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/13.sc new file mode 100644 index 0000000..50e478e --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/13.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnw4N3pMcHc3NlAzS2JwTHBzSGgxQ0k0cWtMOUZwc0JJSGVUVnJDTnE1MTh1WXpLZFB0MHFNejBXOGV1MGs0VHRzQVlrTWRnREpoZ214Qm1ldEJnNUkyVFc0U3pHbC1hc3JRbDRnLVpDNFhLcmNQaGM9fHfi4gFT970lAhbTpz5WB_N8a8ps0mDRnW-T9-5gUqur \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/14.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/14.sc new file mode 100644 index 0000000..4f6eb26 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/14.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxVSFpwR0lpMVVsQmF2bjNya1dVTTZfQW9McWk2dFhVSEkyWGZva1NNbE1uOWc4aVNBU0N0V3hLU1k0eGpDVEQ5dWtYZ3EwdldzZFM0eTQ4ZHRsS2Rjbmp2YjNUZkt4ejRtWThuUnY3UVR0SVE3dFFkMnBwaHdjR0VVS3o5N25NY0tiWllUTlgyNDc1MzVEN1dZSTQ0NVktWVlvNzlaMkYweVZndFFacmtxNVhmS2otN0FFRU1SWi1aX0kycUo3OEtkT0ZhbTliZ3J6cDZtUUhvVGdUQnZQRU1KaFNWWVJvNnp4MjFfOXlGSjlKc1VpZkxnRmV0WkNFZmM3ZGlOcG5NQnBGc2VwME5PemNpbFdxaG8tZHJtdVJvLTd2XzRvS05sVk1oTEswV0MxUmk1c0F6VUU5bVQxMHdPbVpzY0M1b3Q3dVNKUmh5b202TFVwQjBfdENNalB6N2hrcTR8xUFEKJ42IacQ7V72qt2NSe4yPms5fjuFSgAVnoIzTe4= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/15.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/15.sc new file mode 100644 index 0000000..1f38e37 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/15.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnw3X05zSEM3ekZZZW9iN0pwOW9MT25acVlJbGRTaWNNbEt6Y0RYaGpWRUo0dXVySjQtRm1ndFdGTEhyT2dNcE5laFJMZWluOUkySFVORzFlYzEtZ2xzU1NwU3RXSGdER2RoUmJsWWpERjl0d1BvTTJVbW9yZ2V6QkNiRy1ESnNwUVhpZmNsdEhyR0p4RVpCX2V3ZmxReVk4PXwxMDjJkXSAv2j7SOCkQI6DhctO1rmkfe5TLCemSgF5TQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/16.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/16.sc new file mode 100644 index 0000000..da7e774 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/16.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxVLUNuN1Bmb2owRVUwLWJHRUVwUGZPNmUxSDJkeDlBeE45NDIxLUZaVE16VU5tUF8tMEpZZlc0QmxFSmlJMVdNU0tfdWlGRDFadWNTb2ZBbjlJRng2bzAxa0hCY0cyM2R4SlhKZjZuV1dCdGFRdFRBS1ltaU5lMWtPdm9wcmVQZzZlT21UYWNMVGQwUjM1R0NBTVYyZHlldnlRPT18NfdAQbuBFXoRDB2lcBp3PsOZOoRyGXOD_5Vb5mOYfdk= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/169c3e89cd10efe9bce3a1fdb69a31229e618fc0 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/169c3e89cd10efe9bce3a1fdb69a31229e618fc0 new file mode 100644 index 0000000..c00af04 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/169c3e89cd10efe9bce3a1fdb69a31229e618fc0 @@ -0,0 +1 @@ +8Q=== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/17.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/17.sc new file mode 100644 index 0000000..b9e411c --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/17.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxPSEJsU0FQMUh5OUYwTWRLRnFFckJXalVSSnNET1hmeV9Tb3IzRXlqS05IR0s3YnhmSkU5SVVYTlFzU09OcEpKcGpQaHd6NDU2ZWhoLXQ2ckpoM3cwSGJMMk9VbkpWX0ZVUlB1aFNnaWlSYXBzb2ViNXJMRjhBcF9WVmRSelhOaWtuODRHV1E2QjA3b2ZnQ19IUFdQS2x1Q0VuaWxHN21SLTVGZUs3MFVvTFhzZmdoSXZ5VTFJaXF5LUZLVUg3aXV3clFmfHWnnWaZO5H5ZBokfQp9DE0Vqo52hWUySU2Qb5dOis4X \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/18.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/18.sc new file mode 100644 index 0000000..291882c --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/18.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx6UzgxOUtEd0RRVE9fUzIxVGdjV0xvZExBM25HSFU0R05BZ2FSVzFEX29hRzQyTEdSZHVOdGNBU1pJVk9fZ3hlU0toY3RHSjJiZjVXdGNXY095cVNqV2owWm9OdXJfY1BHMlFib3hWZ2dwQzFteVBWWTNJQU9TcWtTbjF5STJPQ2Z1RGsxbzJzSGpyYnBZR3gwSDc0VjJlYkxkVkswbjd1c01VWUdLR3RhWEFLd3p1RkNvV3F1QWVJNURtZXQtY3EyOVZ1WEdCN19lUFZibFV3WDhvb0JwNE5nMEVqbDRmdWloN1g1WWRTNWdNMDB6aUhTQkxvWFBzcFdka1R3N0p2Y3ROR0pyRmdCaU85cEgxVlhOYjhHbGJUdlFMNDlSVTlMbGFfZ09ZREhnPT18O_WDUgPOoZz_Tj8Xl5xmVh8MW4DNTlR1Z_RKXp25EgE= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/19.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/19.sc new file mode 100644 index 0000000..5e03246 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/19.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxWTzdmRjR5X0pDT2J0VVpEUlZuMkFhQTBUQUd5TG5fa2xSZndZTTB2SVdCcHpZaG4yRFdpSFRQamx2VW5RZlIwZ0h0MjRKTEdIb25RNFh1N0VGQjhBQzJSenl1d0V0d1NkNy1WU0FNaFB6cl9tWW9xQzJUOW5yelVicHAyVkJ5ekZuVnhRVVFTTTFDQmhFRHp6QVNkSjFMdndWd0tuamhTUEtEUTAyZHVCcWhiMFpsSHN2V19yby1tQ0lVQjlPTXcxaDZLa3BFTnkzYnlBTTlLTTZWdHFKbjVIejVwRjJKbFpGd2J3TXJwYnQxQ0gtTT18fWdsGgvMPIXZ2GFuTHyx7UEQxVQZ5kuLef2HuTWuMmc= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/2.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/2.sc new file mode 100644 index 0000000..85b7814 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/2.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxWZTY5Rmh6Tjctc2Q0LUNKS1JjckZCYlFrdTVHZDY1SGN5Y25yY21DTVhOaXMyVTZSeEVnZEhwd3hrOXpFdk1MZXRJM1lOY3BrQzM0eExFSUtsMG8zTVZGNV91SDI3cV9BRDAzVVpFNEg2WWljQk5kOHRFRVlya3J6Y1NuTWhqVnA0eDhLelY3MkVGZmdRbXdvS2VXSWxicWxiX2drOXZHQkVOY1VzQWdUNktheTZ2UC1hMHRFMEdubmF2RmNxLTF6cVZhM1VQTnJSR1RJVm1MSHRiX05XVkc1bEpjWGFLQ0dMS08xMm1QQTFtekFUNUlCZz09fDoYy22cGspOxGc5hXTDyuC2dUigy05-6F9jWgzcQAhq \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/20.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/20.sc new file mode 100644 index 0000000..c44b13c --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/20.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxVWW9MekhaTFowaW9PdjkyUU9pX2M3WHNma1E0bXltbE54N3kyLVhuUU1ya3cyaG9FdFJGRkdIcEY0bmE3RkoyTkFUSlJBSEhmSDlLRW1qN0xUM2EyVEZPeTJyNHlFaERIMng2eWljeGNOaFNXVTZoUlJUbDdLMUExRmE0NW5qRzhZWlpreUdCMGh2QmpPcmEwYm1wam5UVzR2NnI0c2Z4dEdQZnlaZz18gq2BDyZoxUhCEdkDBJI-CKd_dBMqivkn0JPOWKVU5w0= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/202ad82e80f70c37f893e47d23f91b1de5067219-7 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/202ad82e80f70c37f893e47d23f91b1de5067219-7 new file mode 100644 index 0000000..cc21ded --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/202ad82e80f70c37f893e47d23f91b1de5067219-7 @@ -0,0 +1,3 @@ +8Q== + + = \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/21.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/21.sc new file mode 100644 index 0000000..b4dcc3d --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/21.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxMXzFTTy10UHl2ejdOVnBtU3o0VmM0ZnRmN0dMbnk3Wlc1WVRlOXRZRTA5aVBOaHlxUDJWZ2gxcDRlZk50aGJNOUdqSlpyNmFBZGdmcUtiMEFZTFpPbTF5d1VfT1UzRHl4M19uelJBemgxYzVOU0lqYlI1Wnd0SW1MRGw0Tmw4ZUNOdWxWZF9fZlZVNnJUYVZXcDl4Q0JBSmJmd3N3Mnltam13UGtxQVBKTkkxcm5mUUNhWVprNUZYMXFPRG80aFhTODhfRXhVTU1uckJHYzJTQUhCeEFYVmVQSUpuWmFmSzFTaz180bgqBZn9nHXblgUiVtub8vMX5yoi0kbhENzp6sddMt8= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/21606782c65e44cac7afbb90977d8b6f82140e76-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/21606782c65e44cac7afbb90977d8b6f82140e76-1 new file mode 100644 index 0000000..851c75c --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/21606782c65e44cac7afbb90977d8b6f82140e76-1 @@ -0,0 +1 @@ += \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/22.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/22.sc new file mode 100644 index 0000000..40296a0 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/22.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxTd180RjZHLWF5bjhuazNjQ1hmZXFIdlJMQnk3dDBDLTZpdUgzeHU2V0dFWFFUTnBhaEc1Y1ZHVllnUGROS1BjTGdnM2M1X3drSEotdGNCMEhwVE1tS19nMURTdDhCTUNGMjN1WHpFd25yc29XeWVzQkhvUXI1Qjlnb01fbkxZaEpsVVhsLTJhTFVaeEJjVGt1QjR6T3pNek9aeTRtQzlFQ2FTZDRRLWpTcjRaT1BHUFJRTFd2aUNXZmNlX29BRGgyWGFNUUhBRHJVSGpmVmlEbDVXTjlVcE1aR1JCM3lTZTNNMHdsbzNVVVdzUkt4aWV3c0cwenUtdFVaX0prOUZvTDFjTXwmW7ViLzDpDQ4HYcVtkyAja04RfLbwMybME8V1n2KnJQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/23.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/23.sc new file mode 100644 index 0000000..ba30140 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/23.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxxQkhqeVh5R1NmamdQVXVKcXpZa1UxWG1JdmxsZ192RDF5RmlXem1IaWU5cm1nVzhEWThJR05sQTJhTWFWOHhmZVNkWUFseUtNS2NVTlRaaFpFWElrS1hpZ1AzMFdkVUNEeklGT0lIUDNVQ0pMU3g3RjZJb2RkUnAzVkFRNDA5RzI1ekg4TFpUWU5KTDhsNGY4QVA2NVpmcTdYbWNXWEw2OGRtWWozQ1NhVlRzaW93VHl0RXR1VDBIMDBrRy1IZFBiZFNyVGU0bVEtQ1NPNVVkb0FHREM5UnR4Vi1LajdsclJzbnF3OTlMU09TdFBTcTM1eEo3WEVISFJwRlNhMC1lQTZiS0tlSDJjRTZMc1FKT0VrSWIzdXZtRkw5UHlQTTZxQT09fLEJ3Ff2tP3aJxgI5TrhzBOYMEF5guG6Lkn4PrPGh_uU \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/24.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/24.sc new file mode 100644 index 0000000..79b19c1 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/24.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxWQUVBX01zRWJrdkcwektCQU5FNWdJTmZmQkVvenRaTVVNTmFCcUtJS2gtWUM1MlltMExlcHFrVEdGNnQxbDNvOGFpSHQ3c1pQak1DbUw0Z29KT213QkJneTZrYTE0a3hpc2Q3SHNZV01LWGJqdWY2Q1p6TjlTX21pOU82Y2pVUTJzWE1ITElVLXZNUkxJT0hEVy1XalJmc1NNZGVYdlRFSjhPQUFBTEdTLUhYcldPQXJubWdXUnRNYkpkSmtJSklTZEhQajZVbGw1cWYwUmZPaEE9PXzAmHnGMLYEU2NdJfNpx0XFAoVqZEexvIbAIvvO1oggig== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/25.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/25.sc new file mode 100644 index 0000000..0271610 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/25.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxUUy1RMHhjTm5rM1hHWHRPT3prM0lUT2F4NGxERmNZMmdDbTJpdk1ObUpXZGZnSEZ6czQtWHRkNHFDVXBaREhYc2xIS1FnM0FjUzlBb1ItVVBRR0c4THRLaXJyOUVwTndCSzdpTGliR0xsQmpzX2FjRmhWaDdISGlYVFJFNmowZC1FYlpsbnNRVUtFei0xZVpUV3loUDluaGhQM2FNSlZaVmRwYThySm5RcVpEdm9rVVdMUkVpY0JuQ3g5RDNyUFZkQjVldzVQazdVZC1vemkxTjRGVTlwWlRsTGZZNURXU2IyeGVQV2JiNS04OVVFSW9QR1B1dUVtUXBRPT18YY5iRJLicxAb8HQ3ztH55VaIsETVU-ih8XWjqvUFdWc= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/25c648c4c5161116b9b3b883338ddae51f25a901-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/25c648c4c5161116b9b3b883338ddae51f25a901-1 new file mode 100644 index 0000000..8c7b9c3 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/25c648c4c5161116b9b3b883338ddae51f25a901-1 @@ -0,0 +1,2 @@ +8Q== + : \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/26.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/26.sc new file mode 100644 index 0000000..62a3d14 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/26.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxWckxCcURZVzN5ZXpGWUNKUU16LWN6eEwxeERyVERpeUFZQzJNOWhrN3ZPSV9HbTU0bnJmcm13dnNTSWFWdElnMlA3OTg2cDhubDctNGo2WklHS0dSZm1Oc3FuVk9TMDFzckN3WGEzRzNuNDNvd0hGMVNtS2hXNnVkbmg0Skx1MUhmTkU3dVd2TGUzaEV0TV9PdEdWcl9wNlNqbEZyTXcwRjAwc2NjOGJhUEp0b0Z5YzE3RWFCZV9JekdXSmhxWlQzdk5wblE3SjFoeVc5dXVlc0RGN3RSMD18GWFHfNiy-kbdyUHsI7sohnd2psiLzBQsh4EqtAkWNbw= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/27.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/27.sc new file mode 100644 index 0000000..e94851d --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/27.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnwxT2JtNzdGVGpFeEtTMDBtLVYwbDd1djEzbzJtbnJqU0hsNExlb2F1VkwteGY1VkJESGp1TEx2T1M4MWhfbndNN0tsSzJ5cXdyWEhrc1JnVEt2NWh4VnoxMHkwWF8yMlpOWnVLYnR1ZEJOWHEwUmpaRWI4Njd2Y21XN3JnYzd5d1RIeEtDU1YtdGdyX2tFN1AtY3JZZDRNdVVJSG9EWGEybEdGUDVENE1tWjYtNnE5UEpLYlJEYTNaN285LTVaeU98-s7M5tZwxQHP__ru0pr2s2RvJoHKw3UvrX5RySOdkyI= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/28.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/28.sc new file mode 100644 index 0000000..7bdff12 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/28.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxaZ0RRU3NQbkRQMEU2eE9IM0pPY3JKdnppcFJnVHhHZ2FiQmh1YWU0ZFh2eUREdmNkSmtmdTBueHJqWkJhS3ZJN2YxdGpWQUZQaXBZWlFMd3ZwUDlhZ2J4dlJhbGtCMmFmbmNGQTVlSmFZckh3T01oZUYyT1kwb0RvejJZMlg4QTZxaVJ4a2FoeE93U1QwU19BREo4SklicGxkYWZBY0FucE5VbXJkWnNwaXpjR1o1QUxQTzlZNl9Jam13Nm9IZmhsUFBfazBpcUNKMHdyOU13OWtMSThpYkJRZ25pbWhUaExxRzRUTlRBTjBjanVlZWc5WW1rSS1KdlpUSUo4YVEzeEN1VDhlc0stSFJ4YmRyVmNWdWRZZGZpM0hudEZ1aF8wV1hjMEFrWHJVTEhGdz09fNbtYysnGt4wf4hzwKO35zhSLDX9GXSzLkircCuJMmzK \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/29.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/29.sc new file mode 100644 index 0000000..742b8d4 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/29.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxNN2xITzVlekdrNzhMT1BPSmdZZnJuOFppblJjSWNpTDhfdXFiVEpIR1pqeFdnRHlnZEJrN0o1UDNLQm9MaC1lN1NLMVFiOWw5ekpKUktJaklZQWJKODZJUlZlZlBFZ2JObmpVVFZtUzI3R01HQ3RHNEI4RF9Hakw3bGMtUW1Ucll1Mjl2SXlJRDJmdnRaS2I3SENGSDA2U3hDbGZSZkVMQW9xRXEtREl3RzY4cklDSlZVV09mWm0tU0JwaGxKXzVWTU5GY0plZTBIY0RCOXh5cVI1dktGRy1XVGk3aWhJcllTX3cxa2RHVUZnLW45eUl0VFZRTHVrYUpMVWlrYlBSaTdOME5PWTVWYm9SeXFpM0dQUW1HOGFJWGFOWlllNV9fSG4wWktlNW5FNzZ8ZrFQcdFDl_-VscnBXTUhM2XvBRo4Ev0Ap24dqkH3jnI= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/2aad7069353f2b76fa70b9e0b22115bb42025ec0-2 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/2aad7069353f2b76fa70b9e0b22115bb42025ec0-2 new file mode 100644 index 0000000..9d69512 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/2aad7069353f2b76fa70b9e0b22115bb42025ec0-2 @@ -0,0 +1,2 @@ + + : \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/2b28c8193457fb5385d22ef4ca733c4e364f00e7-4 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/2b28c8193457fb5385d22ef4ca733c4e364f00e7-4 new file mode 100644 index 0000000..9ee09a3 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/2b28c8193457fb5385d22ef4ca733c4e364f00e7-4 @@ -0,0 +1,3 @@ +8Q== + + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/3.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/3.sc new file mode 100644 index 0000000..d97d578 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/3.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnw0b1JaSmp2TWp5M1pFeUJVRVN1VDZBSmxFNXpMMGJ2LTU5aXM2N3g0Y2RRdGlyWEczT1ZoY01fZldvRWZhYXAtYm1OQ3FQUG1IY1N2M1dTZV8yeEpuMWtQeDdXcno2QjI4UUptYklOOHdQZnhTQkFJRmVmNF9tVHpHNUpJdWJ0cUVHZXdacWFabU1HaWdfcnMxX2RpTWNkbU4zRDBfMnlLUjYwTlRMa2NnajlkemxKNVpLTTJPNnRtcDIxNUJ1TU9Uczh6S2FmUGk0YVF2UVAzS2o5Y29IM1FYZz09fGpjKWy2IhR07BHKlkb0Zr7qxojZDZLU2AKB-1azftV0 \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/30.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/30.sc new file mode 100644 index 0000000..00953b7 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/30.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxZUHVVYmEyUHNRUFlDV3FLU2s5WDdIY3NYWlpYNENISEVta053YzFMQkp1M01pLW1Cck1neUVNZ3BpZ1A1UXNNVXcwQ0NYd1k3bkNtUV9hMDRlTDJHUFM3Sm1NLXREWUkweElOc0xHV0NYQW16dUU0QnQ2S3FQdGl3UnlxX3I5NGdwenVFajlXMDVON3VwZjVJYXh2cndSVFlicHNXY2NUOU9SU0ZDQm5ySFpGNVV4dnY0dWdQVFY2U2hJMmR5MDNRcmJickE9PXygp9rjsTBYaNuCDC0IRxtbTx47LZzJqOHHkfbe1LGNXA== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/31.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/31.sc new file mode 100644 index 0000000..78a6f42 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/31.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxGWHN6bWdhWlRTeHBJTGlJcVdkVXZVaUpjSkRxMDF6VzU1eDlDQmFUbjJFaU5RUzB6WUJ4c2cxUTAwMDAxZU0zNGR6REF4cG1CNzQ4QnJBVlZaT3dnOTBBTUVnYXFiaGFESUswMEtaQWNCUGNQQ3BwbmtMY21hUU5CaG1PdmtLY0VoMk1PaWNJd0VfLV9DZW93QXFjalhRZE1ieUZBUjF4QWhDeVBaUnQzMjV4SnFuZE1fOU9ra2ZOV0ppLTRHNXg3QzktLTVtYnlkRUJaTDRMbllxSW5kaHhxd1FZYWllLWZQUW11ZlhUVWhLMHZ3anNRUVVIbEVDd0F1cXFjNUFTQlZoNTJWT2RNVVJyM3hEY3IwZDZkQ1JuNThfT3pvUXVvbjZWOVNNTWY3X3hEbnM9fLxht7FvJD4a_FPtZFHY-Jty04388qOsUPtYAh_v7RzR \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/32.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/32.sc new file mode 100644 index 0000000..f273faa --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/32.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxDN1d0UkJ5Z295QWp2QmJEaXdUajBEelJRRDNGVEJDalR6Z05wUGpoeWc5TjdEZC0wTEljZUp5VVlHQmR3ZmRkWkM3SndCaGMwclQxSDJfVEpjSjlQVUxLQ01zZnV6TmVpVWZwelZrNGgyTHFKODFNZGt2LVVySDJ0RnlpQk40UWNFQ1BTTF83cjVBVGhzNDJCTnZycWk4Ulo2TnlxZ1RtXzlEMXJpel8xMDV3emc5aHFiN2dDS1NyV2FqcFVIci1OVVA0bGtlNXNaS1Q4dDdIaDVnY2lDeGdoUFk9fABSA5I3BIpmZmQgqbHgTAqVXIli2piNjBtxOxdjvqOo \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/33.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/33.sc new file mode 100644 index 0000000..e815509 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/33.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxTOXU3QS16QzVwMnA0cW9rYV9xOGJaSnd0TE9mU0ItTEhIaXp0OHpTVVBiZDllTlAzV2RrS2tzdDBzM3JSWkFCSWc3ZWxEZm5sLUVydnFVT2x0eFo0QWZYTTJUMDNsa3QxZldnUV9mX05pcklvSXpmX2c1bzdGLVdQdFd1U2VrTkdscHpJZVNyUkMtSGlwXzNnMFRZbnJZNmNrT1dCNjNiakpYNjl3QVF3bzJMMi1HRHzsH2Tt9rxpZH3zsEqJu4iMDx3b0HY_DG8ww5NHxWnf_g== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/34.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/34.sc new file mode 100644 index 0000000..2cbbe11 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/34.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxyYXZWSDBFYlhuT3dJd1RORjQyck9LU0RJcXFnNTNpVzlkNkliZzhJMUpOWnFfMW4wSmhpblZZRUpkMnZ5S01uUnZPd2ZROFZpcFduTXVzSDlHMVZyZmFqV09odm0xWkV5RXNoazEwbnNsOUdQV0hXQjNpVkIzWTFJUi1sQk9IZHluM0dubndmbkxLUnw71foKMZj5sjRaa8r9NSKLljA4s-sV3iSuUIDE-rhFeQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/35.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/35.sc new file mode 100644 index 0000000..000bbd0 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/35.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxwT0pPY3dJdDVuWVVwdzY2ZHhWS0xsemNXdDJnNkp6VWk0X2k5bS1ZMHUwdERuSmx4SFp6LVJ1R21IcDJRMnFlOVZVUHI4SFBoUWlZY0RQd1NkeTFzRDlnemNVSXVuMU1EZV9OLVNHcm1BRlQ1d2xXQ1AzTzN6dGNWdzgzTW9VbjJQU09CbkVZWVBXTHVHbTNFZ1BzbThGSzNFdmhqREpSS0FtVkhFbTBxV0RHUUpBPXyJHQBE0YGJZylQO-9FgF9v1AR1MQt9RRljMVjtSJWJrw== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/36.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/36.sc new file mode 100644 index 0000000..057e7fb --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/36.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxkZ0pabTA5S0VrZFFhTG5vbGx0NjVoai1pbkR0RmI4VWhVaG8yZGNabWJmRW1lajlRdmlkT3Q2dFhFZTFLNXFrMWtCamlEV25SeTYxTGJqWV83M1Y1STd2ei1vaXFqY19pWnlwU0I2STdOakhpd1ZGTGRKV3R3VE5VZ01EcldPUVZpaDgwUnN1Z3p3YkZGdHk0QkhxWWNTcGFranFhcTdCR1NteGZQb05jOWkzVTd3a0tnPT18_ii97jAl3_Q3XFaUY2r8Fk4bqBhysx1rSQw_ppe-oSw= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/366e3e0397c8ceca170311fb9db5ffcddf228b51-5 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/366e3e0397c8ceca170311fb9db5ffcddf228b51-5 new file mode 100644 index 0000000..927a20c --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/366e3e0397c8ceca170311fb9db5ffcddf228b51-5 @@ -0,0 +1 @@ +8Q== T \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/37.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/37.sc new file mode 100644 index 0000000..a6cc973 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/37.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxNMEpYckdpU2JSV0VGS04tOXFGbFZmOFBxRnE0akZZZGZYazh5ZUxnYnhLU3g0YmNaU2RYQ1BUZUdaYXluTnZmQktleFY2WTQtWFhyblptOGk5Zkc1c0FhTkpjTmMtNmxjbEV0dkZNT203cWVUSXgzMFhscmxsQXpkd043cEQzU3Fpc0dXWk95Q213cTJGSUxoZjBWREQ2aVdfZXdyWHluMElmeWRHZnZzVFBDUE1ub2tCZTIxa011UDduc3hscEZjMHF0NHdRdnBtLTQxZF96c1h2WlZfbnV0V1B4ZjJwWTlodFpLOEU9fCETOj9CrLkOjHQ49IVc1S1yDcqlwQ4iVFaSZIiovrrN \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/38.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/38.sc new file mode 100644 index 0000000..472dbcf --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/38.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx6dWpVS3poT2ZaVDhwZVhGS3ZSa3hCckVuTkNPbWNiOGZtak92Rklrc0Y4dEZVbWlfNk9DZmNNMjZ6cEpaeHptNEdIWEp6dWo4YVBEbW1Da2VZMU5LX2FyelJlRmdzQkNwdXZGaDVLQXM1eVp2OVV5TjNBMUYtN0ExMlhMMkZCUklRPT18uOhvGznCMRvXlfsraWq4DfbciJOMOzHzoW7PMo0aIxo= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/39.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/39.sc new file mode 100644 index 0000000..b1ce72f --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/39.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxSUTlaRTRnVHkyMktVSWJCZHBCVzdWUDRtRlJ4TmV5OGFCd0pPNllNdEFJMk96RDdLOWtqN1hTNFR1MzA0cVd5M01KM2d1Z0Fud1BQcTQxaUJGTFNkd3lqZTFpMGxnbnVwdGFPX0x1Rm9aVWVyVHlxeUdoaWFRdUtPeEhyY3ZYQWF6eVFRSE41eFhIZmxkaWloeXlmN3Y2dFJya2tvNl9WZkhXaU9IdTdPcHMtXzQ4V2FHOGRwSFJ1ZjdES3VqMTFRRjZEalozWVpjOTFBY1FoeFc0VDhZdFdCeVJhS2JCc25PVGdIaFl4cFF6Yml0ZHlCSEk5c1NhSHppNUpEdF93bjRTUENoOFFrMjlUVU1yeHNhb0ViWEFwWFVWRnBfQkxhZmN3aE9XcTVyVkVvVkRWb09sRkFKMWpnbTZaa1pRSmxORTBVS2xVUEtvSm43MD18oCSPYwyWuG4Fi_L4i-Ck2AtkAP_DFNm2P25AONmh8PI= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/3916f239f9da91baa003ee6dc147cca7f7f95bd7-2 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/3916f239f9da91baa003ee6dc147cca7f7f95bd7-2 new file mode 100644 index 0000000..63ed914 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/3916f239f9da91baa003ee6dc147cca7f7f95bd7-2 @@ -0,0 +1 @@ +MT=` \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/3e70a0a4bb1ecd96f554cbef9f20c674ff43e2f6-10 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/3e70a0a4bb1ecd96f554cbef9f20c674ff43e2f6-10 new file mode 100644 index 0000000..4770f53 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/3e70a0a4bb1ecd96f554cbef9f20c674ff43e2f6-10 @@ -0,0 +1,9 @@ + + + + + + + + + = \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/4.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/4.sc new file mode 100644 index 0000000..d2d6ea8 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/4.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxBdWtBNGVWdUtUN2pwcFBUWGl0anpObmNRYmlkMkdZamxZTEotQk9UWHJpX19ydk51a0pkNklaRHN1SVVGYzdWSHRNRTRtZVdyT0VOTkZncUxPaWtZTkE5TzBOeTBOc0RyWEZuTzl6eEpHQ2lRSVh3MFNmc1JNekxfMzFkOVhmN0FldTdlMGFfbXh6bWRpYTZuQT09fIXGQXlpKRx2z84DdIjKzSaH-rt1LNVccss7Pok37t5R \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/40.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/40.sc new file mode 100644 index 0000000..5f8a783 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/40.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxzTl9JWlczZmFOeEltLU45SjQ5NDhkSEJkdnpBU091ODZPY04yQUJPV055cjJkaUVtdTJXbzd1WTZRdElzMlVBdUExeGVZZHo3ZHQ5SFRBZGs1cFhkVF8xRUxLOU5Tcm44Z1FyMm1INW9CeEFZbDJ2aUZyeHRfbTBlY3EzcXd2Vll1Q0tQQ2FkOHV2QmdUazdESXN0NWNqY0tILVBkd2FJbUhENVh4cXBPT1NhNG9LVlJUSWZ6eC00a1RLVmRXWUhBVFNjbUVDeWFHd0kyN2JDLVVrR0oyai1XRTJzQWdTZ1ZnMGNlVU1pTWtjc0VkQTNRdFJEdE1TdURyTnNCd0w2Mm5FV3Y3dUtpZ0J1eDhjQWVKWDJXSF9SR1pDS2s1Njk1OV9YSXNNbEg4Z0RQbW1lQnl3OVNRPT18xpN1Gzy5f8THuL4VpHgE6rkEAgP8s-JALzUMIdCy46s= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/41.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/41.sc new file mode 100644 index 0000000..a46dfd9 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/41.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx4XzhJbWhsRF8tNXhHWE40QkJqSC12dVNPQVRZcHJYcXhOSlp1MHRERlA4Y2Nhd1dpV3YwRktxMy1WRDRkSHhsOEJhMnFmMms0d080dG9NMlM2Y3NSbzZoOWpjeUFkZjRod0xFNTF0RzI4ZDJlZzRzUzhXSDU2ZHZfdlhnY2RfNGRqTzQ5VUJCaHVVOWNXczR4bnF3cU5XQURvdmFDSkJKVy1HT3dvZ1VPVzVDOFliMFhaazBYVkJfVk1oU3B4UExYcllOU3VUTnAwYi0wa0dvd0JvcFF6dzVuQ3ljdENIUHzmuNnj2ddk2WHZr5R7DCU110FZIt0WYBt2eswzaM4gug== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/42.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/42.sc new file mode 100644 index 0000000..980d902 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/42.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx4ekswcTZlZk1PZ01kMGxjbERUTERudFVIOWVRRzduU1FrZG5GRWdYN2VIcC1JRjllQjZnT2t0WWxFM3JxMmxHTExtazhfNU54eHNJNnlobFJoQjZreVFteUE4WkVrbFJ5MTluTlFyaEcxT0w2VUV2aUtaYWhvUUJ5WW55LUVHSFBpZGlwQXpRYWVZVVpXMElrWHJjYURqQVgzaHlhaERxNktwU3VLa19iVkY4QUp0WjlPTXcwNzNVNVRqTDFRVHBxYkZGc1ctU2plNWtqX0FvWDA0OWpkV0YwREk0MGNEM3AwQXcyT2FkNGVWeGtyWG1QMkUyTElZQWphYjl6ZTNVaGVEZHJZa1NneE9UamhnVGZkVnFFQmRGN2JfZGhKUnZCcE12THV1eFdpX090RDVrNThheGthUFlJSjJPTnEwSGZCNm9scC15TGRMVjk0YkNUOUE9fBemXTR7HKYdKtHj4eD9GyTuMPweT6BzKJORecQUOHiY \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/43.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/43.sc new file mode 100644 index 0000000..0db6df8 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/43.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxpdTBEREVHaWx6OW9YZ04xX2ZwYTh0bVY1UXZYY1dGSk9XM0NFZ2VKektIR0F0dVY0RVVYNC1lLWtaWFdXSDl4aXlhQnNqQ3pMYlNxRFhXeTZtTkJxcmhKckd3U2hMV2xGeEJ5WWxsNFNxSVhFdW4wek9XdDJNTWI5d01tLWxITkk1Tl9IRjRxZnJnY0JPZ0VBS2cwc0Fya29qNG13ZHRvb25ESUp1UTlQM2hYSnFvelZyLWpYNHJqSnRnaTRFbkhVVnZyc3Fhb2UyQ3NkdnpQVjBxZkFVOVoyVmVOVExDUXpHaXVMNlEzQkNzdzF0M2RGODk0U1VVX3lGcDViVHpPTF95bFhGaFJkU2ZORDFkaDBfem12Xy1FV1VSR3ZKTFpJdW5ZZFJWSHZhVHlqbWN0ejkyOWlKY08yYnllSnVITlZTeWdDMVFSckI3dHFsdDdDSDNFUzNHUWwxRm5JT0NqVHpFPXy6hn09Iaig1LeW5S8uecM1E6X9JRmTLzFDWXDDklk92Q== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/44.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/44.sc new file mode 100644 index 0000000..5aad2de --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/44.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxYdjBGa0xlZFhqMmlPMFgwQUxxbmtjRlRCMWJKWWV6dkNvWE85ZElQeUtKQjFfRzRMa2pqbE4tUTM4djFUV2lzNTBLa21XMXVMSEx6TTR4Qjk2WmhiejVic0ZiOEQ4UTAzSFU0Q21QWlp1SmRQVEhGMjZXdFFTRXBCNlpWZVlhVEdFbG1PYnNkSWpmSVRFcUZDT19ZUVBuaHRIUXpaNGFiQnBTOHVrMGVTdFFvb0ExS0pYbWp8Vj60yLafH82y8qhmu1BxIr3gUFi963hxiFAdnBspPXY= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/45.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/45.sc new file mode 100644 index 0000000..17ef829 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/45.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx2aGc2ZVB6blVXVmdKNWVXTjlkU3RvVlRNcTFqY3czVFRDMUlwVEVzX0p1UHlfRmpmWmdualRhZmo0LVh0THBPVUhmUDZoekFkZEVrcFRhVm83dnU1cmZPdktOcFJ6UTIyN1I0MjcyUkU1S3ZfMlVBSGx0MFU5TWRuRWotMGhiWmhELVV8610FbnSvRth9SdVC1Am-lp-2-VAR5tdLzBQL7kFPeKw= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/46.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/46.sc new file mode 100644 index 0000000..9645ef2 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/46.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxUY09CUnpyeFZzakpjOVg5d1VENEFnRzBUemZZM3VwUzZSZExXQXp3Qm51d3d0REV2OFdkS3NpTTFQVEtzRWdTSFBMdklSQVAzRk5yTzRKOUJhXzBIMm13MkxZSVB5VFZjclNXbWVTQnRCeWRrWVNjenl4R3FtUm80RkJwNTFNVWFjYUhlb0psUGowVC1EdU9fR2hLVE95SVgxak1QR0llU0JxWjRQcWc5WlQ3dUUxRGVaU255QVhGeHFGSmlFeks5Wk9TQnpnYURyZjYtMnQ5eGxiREhPMnQwQ0dGTDI3bkxBakY4LXVkT1BBM0Y1UnNRYV82QlVZNDRDYml4cVhFRjFBUHFmQy1Td1N3VWlEcjI4aGdqQjhId1lKQkZSdXd5eVVScWJPVXNBdWpPTUs2S2JzeDdBQzdDWkxMX2hENkVkeXhIUT09fFSsK7EVDyN-twBBPhUpAJbKWVJL3e3e-oryTS0Gptk_ \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/47.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/47.sc new file mode 100644 index 0000000..fa4494a --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/47.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxrNlhBSTJrN3dKeVc4SzltRGVDNDVUcVpjSldJRklfaGtQcjMyN0EwVVp5ZFJUakZVM1ZzSlktdXBXVHF3bVl2ZFU2Y0xDTFRuRGJ5bGFvZWZJQ05qd1d3YkNNaVk5d3R4Qk9lR0lFX3A3UmxHQnZEUEhZMjF5Uzhra1pZaGxsVXVxN1ZkVHU2SnpkbUtCSmZmZ1JjRU9STkNLSkxmeGFfQThERG52VjlnM2g4ZU5oSVdCM218tzuauoYyLqD78b9QoY073Z90G2zU4SVn4AShBcQiZdk= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/48.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/48.sc new file mode 100644 index 0000000..b85cb44 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/48.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx4WE0wVjZ5eXRsN0ZSdzRnMkY3aUhQZHRjTjRlRXNuRDkxU3ZDeFBGUl9UelZPSnNsTVJuX1pNUWx3RW5USGoxbUtKNU9sT0dSZTRybnU5NEFrSmVDYy1SMEItVmVZblZqVHN0el9BMTBaY2NHZVlkMGE4ZDFKNTk3NnFnc0NweTFZdG4yUVItVG1rWV9ZdjYwai1XQTBXem1RZVJ0WUtqYmtuXzZVTGQ4b0JibklublZEWWJsUVRYaTRRNURSNWJUSDVEUlFvSDR2aEhqX009fHDobPXrZ7PCnuoYwkWqQHmvqwG9flgTnby1RqAp0eEk \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/49.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/49.sc new file mode 100644 index 0000000..7583719 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/49.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxPUzdKazJFaWlTQmtNVFltQ1hmb2FBdzZIZ1NndnlHWmNRbjBkeFE2b2ZZaF94TnVka09sUXJjUmw5UlZmYXdZTl92clpVV3ZxSzhWajBSUWtkV015STNtak5SNDNkSXc5NnNJTkk3SjVJTkVaQVJpaXpOVXl1R2ZlMldZa0lIVnprMk93TEl4UEd3TUhTNl91QU1BZ1p6dlBqSFdrOTlvcW8wbTlKa25KenNFS2Q0T0RPcy11cEp0ZVdrbG5kNlBkUk8zWG03VmF6TFdBLVVVYzJlQ3lvSVQtbDEtNmUtQUVQY2dFSHZtYUtpT0t4ZDZQNUliM01LbjJQdF9wc0lWNkFaNWdPSnMydz09fCUtVdRsGfGjq0xST1F2G02INSyzVO-RQV82fZAx-tF8 \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/4b6a3b5efec9fd7ff70c713e135f825772ee0c5b-6 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/4b6a3b5efec9fd7ff70c713e135f825772ee0c5b-6 new file mode 100644 index 0000000..dc9ef02 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/4b6a3b5efec9fd7ff70c713e135f825772ee0c5b-6 @@ -0,0 +1,38 @@ +J + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/5.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/5.sc new file mode 100644 index 0000000..44df1b1 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/5.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx5UlRweXlJS0JZanJyRXNFeDBnVWNfUjhDb0dlWFVQNGpUSHdxSk81ZkRrRG1rTVppX3pSeEdFU0NGWnlJTnF5alJnT2lESDdRYnpxeWo3YU1aZVFuSkFYSlA4WjIwaVFQTFJrNTk2X0N0eHM1SURNbDR1Tkd0cmlXUEdzeDJFb2FSd2lfemN2NlJFOGt6TnJDRk9PejMzeWFFbzAwcUhzZTlXZVFpWmdEdEg1fKISvrG7Go5WiuNqKEVCmCg02rTvoDUJR1MDwNdR_a7Y \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/50.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/50.sc new file mode 100644 index 0000000..91d0c5e --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/50.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxIWG9SVjBHTFFaZ1hTX0RYemlZZ3VtWktBSTNsYi1QZ25YbTdOV1ZWNTVNd1Z1a2ZzOU9sZzFyVklKUjJfbjY1bHNjbHRHZnR5Q3h5TVR3MGNXMlRDT0FqLVNnaGNfYmlGSllsSENJS2Zac2p3RHRwcm1hWER6eDdrVEdaT0ZuTy1YWlRoR2tPb1hWY2Q1ZmNVd2o1Znc0SHo1N3BUV2tmVk4weEZEMnRXRWhtTjlNSGRBUzRBZDNCWWVPMm4wVlA5TFNRR3l3Z2lMd0xyWVlzdEM1cHdoZzBvUjlVcThBN2dfQWt0NU9vR0ctX2h1U0hiUVFMfKEklV7w5Qdw5B-zHGdc_8KwMCEaJaU93kfrkF0UlCO9 \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/51.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/51.sc new file mode 100644 index 0000000..7958561 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/51.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx4QmExOVdwZHNGcUxERzVObjVVcGwyS3BGQWwzbDRMSWVEU25MMllkUHhTWU94cHI2SFJ4VTBUOExZeVZpcXlTUXV1N1RfMWkySWVHallzRHNnY2Vqbm1QSzBuY1BWQURZT29fQ1NfZXFSaUEzY05xc0dzSmw3a3UyZlh3bDhlTUJMcmN8WV5M_DCWr0mpbvysNGaacX1PWwZukYRGCUd7zasT1Z0= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/5122906052326fb2d0f65fef576c1437b95256af-5 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/5122906052326fb2d0f65fef576c1437b95256af-5 new file mode 100644 index 0000000..2eb43ed --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/5122906052326fb2d0f65fef576c1437b95256af-5 @@ -0,0 +1,7 @@ +8Q== + + + + + + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/52.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/52.sc new file mode 100644 index 0000000..070657d --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/52.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxhWkw1N1lYaXhlYXd2ZmlIeXFKMGVwZWZrRG0tYmNBVzY5UG94U1lLTjlPcVNDS1JUR1NoNkd1cnpMbUQ3OHFmMVFMRDAwOUFBZzh1OGVJX2dPR3RlOWhUVVZXcEV1NGNBcGMtYlB0M2tidTNUWXpLTVlLMHAwZVRQN3JVcHk2QnVXSnJyRGVpWTEyREtEQ25KaUZmZGRsSTRtZzN6MFJKeFFjMVh4b1pvOXBOVFJTZDVhY3ZnaVJXa053bXRmSnRha3B6VkFxUFZrM1JtUWlLbWMxeXNuM0t4UmxGXzNBPXwxnzQayybnomVu9KKV09VJw2upuS6v6c1S3b0sJWkzYg== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/53.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/53.sc new file mode 100644 index 0000000..c15bf39 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/53.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx1WVpMRXFVYXY1X2JmVkd2X292V1NyYzdRc0k1ZGIwYm41all0Sy1veU5IWHNXYjg2OGxGdFQxdHNRR3RqS0pYZU10bmNVOHBEOG9OYXRSSXpnZnlwX2hlUV9aaU53dUZJWUdkM0FBcE1mTHc5QXpRWU5jYkxHY01SbDk1Qi1OaWRidmkwMnI1OXRIM1BPN3ZNMkx4M1NHRTVGb3pGaHQ5bnQ3U2dVOS00SmJ4MGZndW50T1ZUQllCWHBnZ2dxcTRZS09sSkJjTnNTZ0EyU0NufLwPEP2PJ8RYQ9tckzQTz1lelud5sVzo6Jlpg8JkdBnE \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/54.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/54.sc new file mode 100644 index 0000000..d3245f4 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/54.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxlM0tBMGdMZnE4V3JLTHAxMFdIOEZTTDcxMzFhNlFaa21sYWlVV0dPc01ST2t3eElrd0pYSk4tMzZEUUVCekdYYWo0aXVxdlNHRXJZN19YR3g4MDd5ODd2OUpMb2c4bUM4TDJCWmVqdGFwd3VBVHFqXzZaNFpoQzNqcGZmMzI5RUpOX3p3N29HUjNtdHV3MDNLb1o5Ui1HaDhEOWxIVmp3ekE0eEMzZlpEQTFWaUlKQjl4UzBleVZXajlsOHpKQm5PcjUySF8zRC04RnF3dV95SWdfcm96VnZQMFlNUTIxMkh2dkpvRGgtM1dNa3hhRElCVEdTWGhqQlFnTndEbjZZVXFiRUVQNVpmNFg0Uks0cU8ta2tmLUxwSTlBZW5KZ3g1WGJpMUtVT2RXSmxIOU1fOEV4cFc1Snc1c2h5eGlqWTkzQT18oYMWx-zYrvIFzHtsBzkW0omgLkRlrOi2tV236JFCT_M= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/55.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/55.sc new file mode 100644 index 0000000..36c6f9c --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/55.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxwNXNlWnV2cS03bDdrdTItdUxtRUZyMC1KRWFJd2padHFyS2hhY005a0szS3hkanY4RUp5TnJPMU9Cd1BJVlduSV91SFhKQnBnZktlTUgxdG5MRV9IcWJEQ19fZzZzNDIxaF9hQ0J2YV91Undnc21XMlduMUptSV81Rl8xdnY5NWN1aFdmaDFQUTlkWXlnd2NsY0pLSlZGemR5UFNyMHdCMWc9PXz_khLrMOYAvmm8TxPWewFz30kjj7NEr2ySyO_Tmifbew== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/56.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/56.sc new file mode 100644 index 0000000..c136dfc --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/56.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxFY3ZmNU0xdF9GUGZJQ0FvRU5JOXRlWEFpcGZxTXNhQUFhZlJPMzlaT0tNZzY4Y0VZZ1ZxWDRtUkJhZXo2M2d4TERXd1Q0Tngyb0J1czdrMGV4RWVfLTFWZXFTR0hYZExSMDVuSUVNZ19RPT18GQy_KUXhl6Dgc-_uGeJPfHdhznbqAKhEA0jEZq0wSKM= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/5601b416f11820e0203c84570e4068cf87acad17-4 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/5601b416f11820e0203c84570e4068cf87acad17-4 new file mode 100644 index 0000000..12127a1 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/5601b416f11820e0203c84570e4068cf87acad17-4 @@ -0,0 +1,2 @@ + +  \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/57.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/57.sc new file mode 100644 index 0000000..89b174a --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/57.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx2bVUtU0tHRFM4cTlKbXJ5Z01TdjFoTXE2VHM0eGlSQ2czV1h3aFM0Ym05YXVkWThqWWpwLWZMbVp2Tm52R0Q0QXhMVGNhQUh6YzVWc19fd2lVMUNGYmFZOHVJWkd5SXhlODhtMFp6dV9vc04xdkpVRDctcTBGUk1MSG4tTE5KNlZLQWQ3OXdqMTMzc3dMZXpza2lSVmdmX3lpejJZcHp1RVlFQXg2aEx3VTlyZl8xbmZxdkhpcHZySUVsNUR6eU51NEE5NUFaR0xSNG8tSk9OQjVRLUxsWldNdUF5M3RhQ3FGdjV0Qlk5Wl9hRzdkZndtTTd5QnBPcVNjV2xmOEg0QVJkSEFwZnRKOG03MjR6OEJIZjA5UEVvMTNHVHwcuDVKBM6I7Zpg7kt4Pk-iLloKwO8M8f1qNQH7qLp0sg== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/58.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/58.sc new file mode 100644 index 0000000..78b8463 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/58.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxoMjc5X2Fyb1dtWUk2MmZlV0NyNWZMU3BZUl92ay15blBZWnNmQk1qWUlkOS1pLWxkUDIyLUZwY3R2X1lRQzZNMmJnZV9iYXdYNG9pMmVISjZsUHNMSUVwQnBHdHFoSXc4Mm8zcVY3UnpjYWZJaWdSelZWMEI1V2NqOWpFY01oUTVXR053TzlyRnB6LWFzQzJKckZ1cDJtZDJndGFPZFRxdWYtc0dGUGRhRkVsdGJ2WmptUmNlQnZONjAwOEdoamk2anItSzFWVXwHNj5MIcwy6_5z9_v4k_oCTN1AX7sFtrk25QICzUSuQQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/59.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/59.sc new file mode 100644 index 0000000..c48464a --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/59.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxsRmNtVjk1VXdqbnlWeDVDd0Q3QlZSaXdaWkx5Sks3QmVNb3hKVWU5cE0tdXBtQmJuSUkwNHJ6SlJwUmVjcjFhNWp3RDZNN293eGtTb19RU1Y2Q3NpcXFFQnhVdWR0eUFKYmNjT1dPNG1xZGQyeVMzYVFuOWROaTQ1allsRzloQ1NseVVzcDQxY0hGdVFGWnZyMzFWZkNuajBzbGZmY0M1WXZTckxSS0tYOGNJU1Rzay1WWXh3MHBhV3hQZmdFUnlpeTVUbXR5NzF1OW5CQ0h3SHhyLU15MzdhSmVMbVlNem50ODNxT1RmQTkyVEc4NksxVWk3SU4wbnNKY2xPdz09fEL3LkMkHs9AH_Kktdn22O9rjO5cdPayirCVWvy7NdFK \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/6.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/6.sc new file mode 100644 index 0000000..e27da3a --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/6.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxpUFhiYVl5dWhheFJxclp4RURnSWpUTnZLd1NDT0hLOW44bnRhc3dES3VZYVJsYzlBZkgySGhfTnJYcU02ZG5zY2M1TmFOZDhETTFqT0tMSklESmZTa2VRTmJYcm5UM005X2p3UE9aUmx1Uy03MEY2R3lMQjlqVFRGd1NHWXhGUXNLTkwwbFN3a0VFcTF1ZzZlbmtvQ3luRDRBd0x1dGZxZlIxUWhwWWZhcDlHS21jay1PeW5VV0hXSFAyRDlERGpTa0pwOHhXcU5yRkQ3NHRHOFFrZVBManJDRlFvRl9qZUJvR3Z4V2F2T3JDT0x1QUtScjlrMlZ6bmFlTzhtd2FId0xBekktS240ZHEwQTJ0TG03UmJFR2tPVVdpY3FiN0xuM01ib3Z4dy1HZEFOUzQtYi12WkIyOWxPV1QzMk03SG9RPT18Oy9xErnKlbCafCV5ZRcdNQ4oAkM6AS78DKMLRzE3Gp0= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/60.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/60.sc new file mode 100644 index 0000000..bf32a57 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/60.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxXZ0t5V0dfQnlNV3Y4d0dTN3JseVBuUTVsSTlYWGdyRE1XWGRNZ19ncUtZRFR0RWJMX2FaV2hxWVRwakx0VDdNQlBrOFlWdURTa09Jd1JSUlgxTV8yc0R1V3hmcXlwTmdyc2dXVDdBTjR4WUstZEV3TXdFU2Q0NUVzb1JKNnNzaTZBSnhweWhFZnoxV0Jta3hfQUtySE1ZLU5aWGQzcVlrbTJYNXlTOVNwbjFPMnNzYWZNTzRVWXpKSGFpejNrN3FUandSVVUtYml3d0pvSWVIQVFENV9TZUdvajdGSm94eDZBOGtWZFFPOWJfal9CR0dfaEI3bk5rbDVZX2ZPSnlJQlZBaFVXalllMDFQY3c0aWtoYkctQ1YtX1VKT2sydVVPUDg9fLemJf00wrx74T_1wH-isE4OGkygea21I7GLkI4X7udr \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/61.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/61.sc new file mode 100644 index 0000000..ac692a2 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/61.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnwxaDl4VUR6MnlJNHU1alhmRi1tWGdabVowaEJsbzdWb0xIekw2SGdVS29Td1BWZjZrQ3Z2ZmQ5cF9SZ256Qkc4cHM0QVRXMWpVVm1yS25HRzdkY0hWM00zc2MwTVVfaWFDM0RBSzlqQ0VzVko3MkhJdXAydzY0VmRWQkdXQV9FeUI5ZER6VFhpSzBWVDlFcXNkM09QUUxUN0F1bmpKQ2VWWm5oUzVEalZLbC1oellOMUZzQjBnS2VxX3puOFdJdm45alhXVHBIZ0VZVEdkaVpmN2NHSGVwU0FlcTdzc2Y0amM4bnMzZTF6ZGtXLUR1TTdSZV9XaWZwZUdTMkp8GA8K9swZ2XKR0-0_JudwK_Qa4BiuzOOxvZWn6Gt1pBo= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/62.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/62.sc new file mode 100644 index 0000000..6e874b0 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/62.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxYaDYzWUFLYXh4VWJFVlM4eWMwcVpkWlZ2bEJxOFNYdk5sUTJfQ3RPYUxKT1ZOTGlfYmNIT2tzWUprUkFWQTdzdUgwQUpOcjgyYU5nYjFDRXIwUVhIaHpQbkVheXxTuTszScurs8XNj14TW9ZRbnD2xmn-zFDAIyc8ZUDrXg== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/63.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/63.sc new file mode 100644 index 0000000..22fe4dc --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/63.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxEZHpBelhtbTZXUjQ1RGVsYmdvSEVCR3VtQk5vbEg0MDF5UUxwcmRGeEtjd2tqR2c0Y2VWMXlwanE5bnNndkllRTh0Yll2NU5aUXBSVTdwazE0bDZLNXF2bVZocHV4QVBpVlUxdjJJTWtHMy1VYTV1TXJvOGxPa3JXT3FFY3g2VDJiM2NEZ3ExTS1RUzB1SGgzVVl4el9hTkZycTZZU05vR1JSWVhocGNDeV8xT1dxNHViZndyS2VOeGJFbkxRPT18uzh6hDGbvGTPDjTotEtdXXbulR9ukML3n9YHQBzyX_U= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/64.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/64.sc new file mode 100644 index 0000000..90e3a4f --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/64.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnwwanp6eU51aGxsSEt0b0ZRNXhkUjU3YzZXWjJySnc2aXo2c2FYWFB3NjBnUDRnQUpPQ082bUZrTmJ5V1pvZVJXRi1JTzlsXy1KLVhpMzNidnlrV2xOT2RIVE5QalFqWklhSTU5c1BoSmQzY24yLUU3RFNMSXExTTE0djdyMDZqY0FIanhYWk14WUFERWk2a0pFbE9BYTgyOGJKalVCRm9SVWVYb2lvNW5pUkQwb09hUXo1bXBiUWRBRmx6cWpmU09NbWRLVWJsbnyekkTo3BRb4h8NxFsA-u3yJovwUt6YiFZRA6601AWliQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/65.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/65.sc new file mode 100644 index 0000000..fc436e1 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/65.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx6RXFNam1mMGdqdl9FdWdYbVhENlVnbElXVFVlVEloSWJFTFhQS1pqTzZONk90NDJIQ0pzTFdFMC1kdGlnaGJRR2lQTnZjVHVvbDd0SjlzUzhTdXJiNl9DMXl1eFN2Z2x6ZV9nSGJQNGxEQ1hDdHFoVGlSeGFmMzdsbG5ZcXZLbC0xMlJVOHJlU0E5WkFKcXBuVWNKbUQzRXhQdkswTlpNR1F5SFBndWs2dFZwRmNxS0Y2OVp2LWVvTmc9PXwa834Gqa_XEA5QQg7bVXgvdF3xnPxb8SYWMk2DYOmPfg== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/66.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/66.sc new file mode 100644 index 0000000..a4887af --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/66.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnwyUmFwak85TjIxOG16amY0bnVyLWJ2cVpmdHdGOFRERExoQk5PajRIM09KaVBjWEN0UHB3T0NCUGstUE9EOEwyRE4zNVZERkx3anBQWFhwTjk1MTNTRWxTRVA0eDhpMERxUFdfUlFxdEZDU1hFT0pCUGNtR1JUTHB8vOPKYBrn6hBxJVx8lm83klo8lEfuN5HNBE6iMfRcVUo= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/67.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/67.sc new file mode 100644 index 0000000..dfde3a6 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/67.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxCOXhFREpZaVJKem5Ec2lodER1RDd1Y3JsWngzQ0s3ZTE5RTAwRHVaRGhvWWpIa2wyWldaSHlUZXYzVGJ3eTRZa0oyNE83MGtxRl95SV9LdkRIQjlsYklwZjRYbzdKX3FpM1NkTnZNc0VlRi1kVTl3RGNjSXRkNFQ1ZkFJemJ0M2d5SG5QRmxiR1RwNkphUm52a01LQVlPSUZSaFl3aTlHbG42VlhnT0lwTHRUM1BkOUxKSjNKU1VEaWNNRVFna1FuY0dBQm1vemhWREpBaE5Na3JQWTV5WEtWZHRBWE96VFQ2RlljTlBkZnk3R25xQ1lXUHFEWjdPRXJMQ3pCOFFfS2hNcnUteWtDODA1WVVlZ182SDRQaUd2SFR6LUVnWnE2c2NXMldLX1ctQlpTV3RqUm1wclYxU25ZcnpvaUZpc3laTTVVcWw2RVZadFhYcE9sS0pmbEJLV216RUp81kEhfeuoPh7fasWg194BQUrNABxQRgO5E0UZhArboiA= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/68.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/68.sc new file mode 100644 index 0000000..06dec96 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/68.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnw4OGlmcDZDelEtZXJNQXBFSE92SXBFcjFGWVFYak5vTWRtX2oyclQwUmcteC1Bemx6bVktVTd6OUk3UVVkNGZ0TnZIOVlHRmRWbUVUeXNGSHBCbnNiQUpCcWZfa0Y3cmI2ZWMyd1ZST0puNE1qdmJXODlQa1JaVXhxQU9FcU51ZkpuaUFJZUV4ZW82YTVZUmEwZThfMUoweTFpYTBzWFlWMUlfZTJ6ZC1nc0hkNWcwR05nRWJYTjBkaXVLRVJaZ1lORnpNVGZ3SG10QVh1WWJuTzFnVFl3PT18NNpLAmbpk2NJ4nUzZwjMB67lvypX_GHa8s0CJ-ftV8A= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/68c721c56a20c85b4aefdffcd60437cf2902b0fa-10 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/68c721c56a20c85b4aefdffcd60437cf2902b0fa-10 new file mode 100644 index 0000000..ac94a21 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/68c721c56a20c85b4aefdffcd60437cf2902b0fa-10 @@ -0,0 +1,9 @@ += + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/69.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/69.sc new file mode 100644 index 0000000..e75e6bc --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/69.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxWc0FwTUN2ZFdZS1JuZHpjazFvYkluVUhrOERDLWo0TFNaSV85QnZ0N2x6b19TdEFYcWJOQU1XR1ZodVNubGNmUXN2UDdMeDB2OXdfV0s2a3JPb1k2STlwU1BZWnJ2ckZnaFVmdWlaMFRDMGxOaDNSTm03c3FpMEdvVWxhdFh2YTFtYXU1bElWZnpaeGJkQUJWVDdIazZsQUVlRl94TEtWeFE9PXx0Ao9dRKXodS2pdmdMxn4s2gqMdvIf0LyqafRO2A6Epg== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/7.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/7.sc new file mode 100644 index 0000000..8f3d200 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/7.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxOUkxmblc1cnZBRUlKWDN0X2NZYzlwZmJwalBvdWVST1M1dnRFb2lKd1ZVelJ4ZHcwSFNyRGZobUMtMUlwcUdlcUxUdlRLc25ubWVJSk1qR3J3Ql9sVXNpeWpFU3h5Wmd2dm94SFEtU3pqWHpvUzl0YTFjOWtWMjRNRUw1NFh5SlRQa0tPV21pVXFieTc2cFZCOXYxblhsdW9WZVR5cmlZYmc9PXyOjmpasY0soiOXocnIfIjXzLBdvcIJSUf7MtnecEiumQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/70.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/70.sc new file mode 100644 index 0000000..4423068 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/70.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxONy13NV9kajB6Q2l0cEpZcS1oUGMzbTh3UEZqTUVGaTB5WVBTQ1lFNnhOS1I0OW1sLV9FRWZPNVdmRDBTakdlTzVLbXRfVFh0dTN2d09CekI0ckhCZlF1NC1PRHJpOXRyNkdPeUlQeWZ1WEhIenlodWRzeUJ0OERWOU9pQVYxRTZpTU1CQVBaMWtuRGJDSEtIeXpBajFweUhfNXZMbmE4WGNLQWZUYVJUQ1U5dGNQeGdNSlIxTUhiSVRvZDFxa2hVWUdxR3RVYnBzSWtrV2pEVjFhTmZFdE9DYkJiMWxmcVI2bC1PdDJnWHMxOEdoY0laQkdaMGEwUUYyb3NHMkhUOUYxOUMyQi1FTTQ0MXJqazlVRy05dz09fAeQr50BUvNembveqXWEGP7rlr5dtj67s9wqE6gHzaaC \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/7095a5454c9f66801f2b298e577a488a9cadf52d b/vendor/github.com/gorilla/securecookie/fuzz/corpus/7095a5454c9f66801f2b298e577a488a9cadf52d new file mode 100644 index 0000000..bea6e84 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/7095a5454c9f66801f2b298e577a488a9cadf52d @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxjMHF0NHdRdnBtLTQxZF96c1h2WlZfbnV0V1B4ZjJwWTlodFpLOEU9fCET \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/71.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/71.sc new file mode 100644 index 0000000..c332155 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/71.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxsLWpPOUZEdzRvVDY3OGRkMjNkNnFhV3lSYmVscS1SQlQ4X1FyZUU4NFFQaUp1cmxwSkFrZWRJc3BSSjhaeVlZalFxNnVNUHhGcWRGZzRvSFkyOE4xc2V2MHRxVHdMQ2RKVFk3UVJnZmxKSTZkRnBUYjZqTk1QMjBlVDhXN0dBYXA2b0hPSVRoNHZIYUkxeXRaZ3NzbzRLR1dEWE42ODVrTWhIUkJ6cXJvaW1GSDAwNDI4MUdlS0lIWXhncEVXVDJBSE13VkZaM0tyRT18W02jU9IYV9lzRqQQxfOLzy-8Bd9bDwNImCpHGH6rnF0= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/71853c6197a6a7f222db0f1978c7cb232b87c5ee-3 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/71853c6197a6a7f222db0f1978c7cb232b87c5ee-3 new file mode 100644 index 0000000..139597f --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/71853c6197a6a7f222db0f1978c7cb232b87c5ee-3 @@ -0,0 +1,2 @@ + + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/72.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/72.sc new file mode 100644 index 0000000..6d2e42e --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/72.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxpZ3o4cWhqT2pvdDlFbDJkQXNadHBia0hOMUpIQXBCQVozVHNMRlFYZ280SkhGb1JZeUJORDloMVRWM09LUjRYeW92MF9qaHdJT3pua0hVMWNOVUhaYTdLTG5mQXpmX1NaMVNlaUhZPXxTJ2ptj0jydEz6XpNwmpmW61lddMyLC1R1pxzI1eELCw== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/73.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/73.sc new file mode 100644 index 0000000..ba1a772 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/73.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxpanVFSDVyc0RGd2JSQVdHaWl4TEdmQ2hKSzkwUnFPN2t1c0hSVUJ6OGtCREpsLWwxb29YaVY0azFIUzJmYUZCOXVxM1A3YlAwbVctM1FzR29JcUFYSV9qWEo0Q3hhZS1BNG1wTXJFUkg3czVfYkJmbG1FTUpYRWhuajRQUkVjVmo5MzUzUT09fBj3LQ__1HenyVLrv81zQEy48ViAGjoQY2cq3guJe9b3 \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/74.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/74.sc new file mode 100644 index 0000000..3cff9dd --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/74.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxaY002LVp3cmEzR2VReVRBUWhkZTBsVkRUQi0wcWRrYVVaNUMxNy1vdjZldVpwWjFIUnhsRjlHaDFnQm1TWm1fUFhQMlppaHppTks3T1RudGJfN24tclpsZGl1NEdiQjJxLUQ3dlFDbExoMmJnVVpGX05FZDF3STg3NzJuSkRMZ2xnN3lqY2xDM3NBN1poaDlzMzBhWS1tWHB1clhybWxHQUdJckVrdWhrQT09fFQeJmtO8msJrVC858xkw_S8fMoaOZMkHV1aI4Jh-IO- \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/75.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/75.sc new file mode 100644 index 0000000..5fa1dea --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/75.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxndjZkcXRUT25pNXdSRVY5bWRvbHNSemROWGJHZXBNZ2VXeUc2ZVcxSVpQa2pTcDVNU0h0aGdlcVZBeFFFcU5SMW5JVnUwakpfWjlOQmE3Ql8td1ExOTEzNXoxOXZ2RzlkclF6QnhTM1ZEYy0yc2xIUldRT2F3SUhacFJLYVhvPXw4CQ6cMxjmD2QJtJJz4nUnQIaNACfQALE6oAWm6m4r8g== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/76.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/76.sc new file mode 100644 index 0000000..a78f94d --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/76.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnwyNHNDMDlsaWw4ZG1MajRrTnNkcWxlV3BIMWZQWTdwZGJub043WU84T0FHQlBnc0NVZ0RsQzVTUEFDRi1JUU14X0pGV2JTUWVVWXFEd2JBNVFpbjlTYXNNM3huVmFNaEpsamdMWVNiX2xpZkUyWU1IbHJzaExCMzhKRHROWFotZGVWMEI1a09yNWNRbTkxM0FnNzkxeGRiSnliMTB0Vjd4eHoyc1ZDOFcyUExLeFZRTlVfWFF2YkdaRmtLNzVrdV84SGRWdU1NcmtnclQ1ODdHaVR5RExTR2xTUVFpQWJISmNpZzBRZ3VUX2wxeDFWLXgxa2RYVG8yVHdfWjFSYUhGc0ZPU3ZmRGFvNHdHNk5GLVZmUVJma0tLejFwMVlBPT18oNqBh6DhdbWHZytoN8fI9neyTMW05JckdFpJQVk84H8= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/77.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/77.sc new file mode 100644 index 0000000..09c6116 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/77.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx2Nm9oUDJCSGQ1eUJtSFBTOWRsZzVhUGlpUGdpRmNiR1ZidElBbUZRX0RVanIzN3NvY0lFUzc2T2NRVjNFYUxjeURZTFdyLTRpZDl4RGlBbTBsby1DVGtoUmFnRjB1bTE1dUFMbkYwbjRiR0NvcUpEam81UjBzZ09CYl9SVURudGgwSnVQUHo2R0s5QmVYel95cDgzWnZUYnBBdmlESm1ReHBDazdGa1hoZjBPSENHYUFZVXgtZmlNT0R3MnU0SDBUdUVNYlhQUy11ZUVMazdVMlZOUXFBcDh4RVAxaFFVPXxDq1pbr8BUxXw2qZpJMevp_9IcMC85wOHzyARU2H_nWA== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/78.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/78.sc new file mode 100644 index 0000000..cac4aec --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/78.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxTV0Z2NnJRUkxfWUh6TDhnSG5OaWdWR0FjNG1FN1FoMTN5QXBTRmg3M0FPNjFYcmtXeTBBU29HQlpRY29hWHNZLXFKdEVGRHYtdUplQXpDeVBpSmhKY1oyZHBzelhPZnBoVURFbDBleVNzMkpONFg4S1YxYzktMHFHc2VoX3hWRUplelFvYk5lN0RwMERSbWxZYlN5dEVIUUlmNFd5SEhqRlhRaTFsR2NrQWQ3UE5SMG9zZkxCR0ZlbEs1LWlzdUxzYy0ybVZCWjQ2R29OOWhyckJmVHFPXzBHM2VYLWpocFhPdGFxci11UXJhQjhyUHh0a3N1WWd1dHJaSWZkUERwTTdiYXg1ZExnbENjMjhJUnFWak96dGQ1czdBQ3lxR01EV2xzVjF4ZFBPUlZYcGZ2RXJZNmVvOGRpUDN3ZUp5M1MyY2FVM19RUlprPXydkj-7EvCOCaQNbV34o-vAQ8d8-fUHjI-c8krAQRPnYQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/79.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/79.sc new file mode 100644 index 0000000..8f34884 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/79.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxOY3l2VGo3WmNvWURFbmVsTEZlYmNKSXVWbmI1SGhOeEU1aFpKb3RKWjByQkRiaEV5TGJkMVhwV3JCQzBSMXhsRHBPd05VMjA2M2lmb3B0QmdhaWthMUdKSHV0QWU4M2ZLQWpqb2dpQWZKcm9YSnBlZkpYWE5ZY1pDWEV4TklMWXpMTnNoRDVSejc4Y2F4aWc4bk5ORUx6UlktUHFjVkxfSmxwVmVSYUs3YWhmZzNSUjNKYmRTY3N4cV8yWnlXX2lCX0ZrU0V3TXNrN3otTUd3YnpuSGstVjZya3RXM29KQUtHVnZybXdrLWdWRDRlSS1kMmRab2dmRE1uYlFQcWxJcTBwTWhsbGtURVUxSHROaVE0cVlzdlNZYXd2bnRteXUweFRzSXhZY3xD7IJyMO91H3LSSM2v0nyqzyfNexTgGNeSYxw1n1YrWA== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/8.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/8.sc new file mode 100644 index 0000000..e373c75 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/8.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx5ZjFSUkZQWHF6R0lrUlY1cXBwZTZpd0NDQXh4TXlDNUJuQWtldTQzOF84NTRRNFZFcVdKck9Ya2pDX0xINm0ySTlDYlA3RzNXY2FKbk8yMk5vc05XbzVEV0NLTnA5V0lSc1BUdEtYUmpTQXRUNTJKMDRyc3FjcG0xOHlWMlYyNTVMQjl2T0JlMlVSWmxkd3R6SEhPck1nTEdZWG9pU2VBTFlKWG1NS21MeGVkT0RQUi1WdUhlT1lOZ0lNSVhCQnRMRlk1dDBfZnRtZUlMUTFuSWdJakhZY0d3VUNaLW1maDFEUTFwamxZcTA5V3d3MlQ3enVMaDdfajhjQjl4d0xyNlQtc3dOMEhDbWsxX3FQN0VRWkNBU0d0MXZHanZhZTdfaDFqZlIzSHRIWEJJX3haZUd0WG9FeXFYSU1Za0JkdHBsNFNxQk9TM0N6RWtmRElJZWhUfKjpFKhD4zyUX3Wrcy0WeL0FzV2CQX9amyu939W3kXSN \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/80.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/80.sc new file mode 100644 index 0000000..460e921 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/80.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx0bG43b1BVZzR4MUs5WUZNWVcwRGQ0cWVEbFRqY1FnRDJLdHVTMUdSUGdUdm5ZOW0xYUJUZW5YV3hUYVBqejU3VVhpMU5lQTg1LTNZSnNzajB5MktLTEZYamdVXzN5NVR3UF9fM0R4ZmtPMEtLT09ULXNWRWVPRkVUREVnUWhudDI1dDIteHZaVGV0SW83ZV93N3FoOElWckZqWGcxd1g1cmJHZlI1bUVxVHFOT1E9PXy7SPZGoPPnDxm8JiVZi9hmXvvaIker83ubu_xoQONmKg== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/81.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/81.sc new file mode 100644 index 0000000..f2d9ad9 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/81.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxKU19faFdTdGQ5S1NrdThZN25Fb1RmNE9HT3JQcjdIaGxIRXFBMDdMMHc3VzJsMnN1M2Y1aE1VQUVlS3lqQnlCcjJOdkZ0OXRQUE12VUJMRWJiR3VJVmI4N2EycjdZOWo2THJmcV8yNEVnWG90NU5JUjh4RzN6ek5HZmxINkJnd1JZbDNfR2VsNTJRN3lCY1BmY1J3UlBKa1FkZmZQNHJLZ1dBVFpaY0J3VTctamYwY1AxSFh6TGxObUxuZnR5aTcxV01ZMmc9PXxHixMgIoezOeAoBv4fBC7eHYtIrvhSSu_I2XJgVc76RA== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/82.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/82.sc new file mode 100644 index 0000000..371335b --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/82.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxtdTk0NmQzLU14OXVVMG5USzNXdnd0azFIcXFfOElxdG94T0tYWU56TzRhNS1qVDdjVFlURjlrdklZZ1BpNldHczg1SkVHSWhzNjlBRnN4dXI4ZkFObml4ZEM5VUdxVkxfSmRHV3dpWjNleVhyUERhYm9OdDZNMkl1TnlXaGRNa2hyaEdxNVptVzlOWEhaWkREM3JnZ3AxSVpRaU0tN1NPMTNraW1LS3hBQjZPaThxM05fUlliekVjWG1VPXwmvTU7eZgs2jKeks78dS_iaI9YAE5euzu6plCXKk2goQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/83.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/83.sc new file mode 100644 index 0000000..794dd19 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/83.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnwwU3BYN3YtbTFrUm0xbUtKWUlidW0xVTJ6Y3Y4U2NLajRtczh4Mk9wUDJVM1RJa3o2T3ZHQm9zaElwMjJKTVp1M0FqYUtKNGplNkw5VHNiZGU4ak9ENVZwRVZ5OFhYTT18HGfGXzhKalF_Nuce6azPPzYwsYHXcq6aWlMSRTGnklw= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/84.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/84.sc new file mode 100644 index 0000000..8d49ec4 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/84.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnw0UE9TaHhpRk1TRnVlMDU1Q3c0STV1dHZaX0plb1RhRC1HeVR3V1lITC1HanpLMGl3T1RSXzE1YnBjUzUxT2sxWFp2VWQzSE5wellmai1BUm9nQi0zTy1MdERYWTBBU0lkQ0N5WHBZV19wNS03OVJPeUlRVWpsMnc2bjZfRlFjendVRzdzSm1aaG1TMmFoVEtORkg0aWJfSzlzSEZ5ZFc2amJsb1d4NklYNV90aEt5MDVTcjM2QTRYeTFhZEhoVTlrQlA5NEpNU2gyUTR2TTRUcDJxdk5jZXpIQ1FFSjVkOWI4b2g3RS1vWnRFPXwkqgChSUkX36GJC3EuUK4WURPWCCegxHB8tPv__n5PXw== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/85.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/85.sc new file mode 100644 index 0000000..a6e39bd --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/85.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnw2YjRRak5MaEZJcVpnamhkeWRkYWNnbGFSWnZVT01XUVRUY0piWGFhSl82dTRraEFDZEhnOHM1MG9YZnNIMWJBdkpmaUVscjBjTmE2czgxWTc0TDMxR1VlVFRqUERvaFUtSFBNUGVuclVTd3VjaDh0R3l0MTAtWkZ2Z0FBNmhCOHZ2NC00MlhrTVE9PXyKjjYod2keoNkH9xgBYzdhfn92OG0pynAFLXSXosy0Gw== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/86.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/86.sc new file mode 100644 index 0000000..a2d9739 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/86.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnwtZEpLQVA0dU1Ga1ZqaTd4SGJJandmbEFuVllnaXJmTnBPM0xCLW5LM2tvdzZlQ3JfdUlaLVFleUd2ZGZ1Q2tDUG5aNmhtUGpPdW8yVnZiaERDcnU1dHFwUVBjRFp4UDFLaGloX0Q3VkVZU0tESjJ0aGNKdHV3S0JESjNLVTRSd0lLclBtX1hNRFBfaDVKdjczQWtwa1o2M3N3RTd1RE5kSDlxbWRmT2RUQURpX180YTRWNHhQM0k1cjJnN1Y5R19aY3pYTk1tSWtMVWZwdzhUQ2Y4dVlaS0xYbVdrS2Zhdkp5TlNVTzF5M3hSSmp4MXN0LTdFTjhSUnp3X0Vpb21wcko1alZKSWRScDlVb0pWeTJoaDlZNmc0VDFQT0FId0c1WGJLSU41Ung3SlBsNVdld2FZaF9pbUh8m3-4Zy4L5KCxUET1y9iV5cJgt0FTXBulXvO6e_Aeg_c= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/87.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/87.sc new file mode 100644 index 0000000..fba2d8a --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/87.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxBQ2R0b01ORnFrZzZIalNBVDlGLW9PXzRCVWVvYWIxWC1KdzJqUGRPMnZ1Q1VSYkNTd3lFV3VPS1VMSHRWWnEyempXVTRLY3NkUTBodWQwZ2p2Sm9RdFlPRzFxMGtqQ0NGWkV6SlB1a0Zkel9rNkRrYUpaOVpBcVF6Q0N1aHlRM3BpRTE4dVU1VWd0dmZ1dkxWXzhPTzNOeDJaeTJOWm5KS3RabWNPZENJRVhiZ3NOUk5KX0dWYkRtdEdaNlNCNy11T3pvWUpUNjBUTDVQdUFJeUZoazQwQlZCc1BYSHYxeWtfUmJKSTFHY2dBeExRa1ZXY2ZWWFlMOURwOGNYLWFKT3dRTUNqdnVPb3d4c0dWYlBRVWhmUmZ3SWlyS3BvQzN2SHJnRndOZnZKbzFUNkVPUGpIQ2RSeWRzSFlYbG5pdVhFbFpYdzA5RnBNdWVTdEh1ajVGdG9KWWFzMD184QvlQbFCVsttDDm-Pf8YvgzUBk51UFvHe1EZXrM-vXY= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/878643f2e5fb1c89d90d7b5c65957914bb7fe2c6-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/878643f2e5fb1c89d90d7b5c65957914bb7fe2c6-1 new file mode 100644 index 0000000..f6d4cee --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/878643f2e5fb1c89d90d7b5c65957914bb7fe2c6-1 @@ -0,0 +1 @@ +YA= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/88.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/88.sc new file mode 100644 index 0000000..7b84569 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/88.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxLM2xubFVwSU5qVnJHS1BYWThYZjFtYmcxNmVsLWxzRWR6VC00RkJOVUdUOWpuTzZTM1NIT0xUTXNGWTJpeWFGQjlZMlJabzR4OHJvSnpyVXIzVTBVZU1FcVIzc3pGS0Z6QzhCdnRDTE14dk1DYmhEUkFtOUREYWtQMDkwTW1IMWg4S2FrdEVvajRSbFRHaU9VVlhfZWhxU2ptTUdZdXNZeXVUd3FWZ21YRlhsREM1eWllM3lrWGw1TWN1NUxZcEViZ0ZnLVBNVDVGUTQ1QjAyRG55bUlYbEZad1ZpNkZNeFc0TDRfUmJKV0lUSUFWMDlEVnlLemZsNVl2T1kxYWY5U0pQV0VaRG5oVDlqVFB0U0k2cXlPTkpSVUNfalJndDhrdWJITW51cThPYVZ2RWVXQ0czVEFuUGxVUXVIakRiSUxLaG16OVNJWHBIeVRkUTlBMHc9fEYKEvjdgKBHD3Zn9ScjC71jYwMeS2-zY75xQnmpqCSO \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/89.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/89.sc new file mode 100644 index 0000000..9521792 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/89.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxEV2NSaUl0YS1kY2t0YnpWZnRDd3dfM2M5LXE4ZFI5TDhIdUZ1R05PTmlJTHoxMlNJaGxzMFA5dkljRVJRb01zdnBla2ZQZTFZSXdoNUthNzJiZ3JSc18yc0tRTHVJZ2hfZkF0WkUyNXg5OE85TXFGcUFIWWU3dml0bDUxdWpQbFQwQnpPS2NYek5kODZla21XY2Nrb0ljTVdRN2JZaWlGX0pOSUNmMWxzWDA2ZjM5N3RoWnFkSU1BdG5aSFFkcGQteW1ZbUlJV2NUcExtcFdPVC1nRV9neEtuX0VSMExXYXJTN29KZ0R0ZUgyNWU2ZThtVFMyZlNHS19xZHg2Zk5NcVN6bmxTVHNKTm9xa1g2TEVZNjhnNmlrRm9rU2R2S3AwYXBBUFJYMzNKbGs2LUllX01lVkxLRWxWUU44cEtia1U5ZWpPd0NWZFBySWlxeENLSWF3bXd3cFYwR1lwdnJZUURzPXwywZdJd17BJ8hx6xNXtwzgG0iX456897xLcR5CCw15RQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/8ed2598d72255e78e1cdecba1a0a3b0cb4e4d8be-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/8ed2598d72255e78e1cdecba1a0a3b0cb4e4d8be-1 new file mode 100644 index 0000000..6c12cb9 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/8ed2598d72255e78e1cdecba1a0a3b0cb4e4d8be-1 @@ -0,0 +1,2 @@ +- +½ \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/9.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/9.sc new file mode 100644 index 0000000..958b8ad --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/9.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxXQXRMTzdlZUlRaUV2OGl6Z25YSTNtQUlSOFlsT3huQUExRy13dU1SM1l5Z29uOFFQRXg1Y2ZyQ3lHaFBhT3hJcVR5M2ZYdTF6RTlXaWd1Vy1fbE01X3pwbFRVZTdoT1FVUVNnRUJlZmJPT29KbzA3aUYyUUY0V3dJVnZzaWdWc3NBeDNHWXpQX0RwZHxGwb5E3feFV7AMx9UYDiBTrzOgKdxTfYxfFJS1UiMAXg== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/90.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/90.sc new file mode 100644 index 0000000..1aa2fde --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/90.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxDUEl5WFB6dUV2ZEtCcy1MdXdaSERPQmgyOEN6Qm5xVUJuNlVFXzExMXpxeTl5amNCN05FRlhjQTkzaXFKOVdxdGptcncxcnNEXzlqV1pndUhVYjRyWV9mY3lUejZNcWJQWmdXaXBZd3hVSHhYMzJkbG9xNUdjWTJUSlJPSURGQUtrblU3YUg2b2UwbWI3TU80TFFSM1ktRkZLNVVaRjJON1pLWk9wbU9jd0NoeDdUZjFHaWFfeHFoNklJcnJ0YkFKZC1nbmVLTmxleXZ1VDlkZWIyZlNjai1CVUFMMzhtLS16SG9RQzdlTDQwazF1LU9UYllzZTRyWEg4X09YanlHZF9meGUyQ0UyLUthbEhMWGFKNVFBZDFfZzY1UWNHWWkzMTU4b3pCVmViXzJYQWFMd0c0UnE0alg3Ukd3enBNT0Z6cDVxaHo3R3c0PXwmpAcnVSB3YHD_PbruU0D3jg7IwrzBa5HtuCyeaCpsmQ== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/91.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/91.sc new file mode 100644 index 0000000..efa6f27 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/91.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxnNHlZNUxHdGlmQUU3T1VZWENOUGZVYWY1SVFPdXBHeDJZZE10amVCeEt3WVZwbktwYlNlNEJGdnk0X2RhRXd3RGlwVGZwNWZYcnJQRkNQQlVhNnlDNmZKa3NSTmRLd2FLZFljdHA4XzU2UEFmdVpDUjdtMzVtRjZfSHd2SDRsX0VnPT18TkfZK1nzBLYcMAQDkHIHtymNL6qBe7qQwUlIO9Znu7Q= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/92.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/92.sc new file mode 100644 index 0000000..7cfa33d --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/92.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxXcS1wakRmNGdWRUhKb29EazlmVk1fS2lFUS1EUUt5dTdSeFpPLVQ2ZjIwaDRSeXBuOUJIMHFuSnd2TXREWU5vb2JybWZyNjNvLTQyVFZxN01BM09MZzA4QVhrNnV2bHp5WTZ4dUdDOC1XSDBrNmVxTjV1TzJSSGJVSXg0NFRkSHktZ0dSQmptTVdqSDdQRnFHRzJwalhCaTJjeElpMFp2a2U0dVNHMFBkMDZ5NG9yamhOcldpX2oxVW45d0xmV3VsbFhrTHFiaFZzZTMyVHV5RkFiMG5jRHo5OWFZWlIwc2hBbHl8GdvKOhv93iUO1tB87ROLC5-JCLe2y-R50Z5x_68YvRI= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/93.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/93.sc new file mode 100644 index 0000000..5b28264 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/93.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCo= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/94.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/94.sc new file mode 100644 index 0000000..fef4f00 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/94.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx1ZVNNc1hrMzQ0RmVDTUVpb01EaFlrT1BRdzhUQk9Oa3dIbWpmNE96OXB6X2F5ejVzUWlPdHppSDMxRVhrckhOMVA4MllBOWJTUXozWlFnanFoTDBpN3owc1VXX1FCai1ET29GS1A5Mjc2U2V5c2ZIQmJoUVIzWnNoYnY0TUdVcXU0M0Z4SGdKRHFQeVIzbzY2cXZGTVJBPXy-Le4iGLMrUB76xVGBZW3NOgKwpaYIku26_tyl93OhYA== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/95.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/95.sc new file mode 100644 index 0000000..6d65b38 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/95.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxQbFV1anRTQjZ0LVpjclVjM0JGeGhCTk5YZmx0QUh2QVR2MzMwa1NLVHhmc05hVndEMXU4TVQ1bXRGZ0o0UHE3Wnc3OHkwWVVjTFRfenhvYWZNWTBSMUlSVWxCYmcwNGRtUHA1czRtYjVIVkJBczFCM1RwRXpYTlZaUUFZWk1YZXJKRy1tUWQ0aGJBd0JQeEhWbmhvRTZQaGJtXzBsOVk9fKKCVRkqaBYWfLHjIU-cPjXaAV5uL4nHk2RI5jWj4jd1 \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/96.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/96.sc new file mode 100644 index 0000000..f3de56d --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/96.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxFUnRfR2doNzVNU2k0al8tUjk4ZEV3bEg0TVM0bXFGZGlKSVlLRVc0UWxjOHNiRWtBMWtKWUkyM21iN1Z1ZWtQaHlxYXJPZzRSbTdyZXZVQU1rLTRKZlhzWlVheDZiY1V3X29DSzZaMHFsMzlEeG1hN2xaQmNIWDktVE5tbVBvQVhyX3VLc0F4ZWkyN1FRUVByeTJRSW1VN0QteXJ8_in0ZHSD5jPWPsR4IcjuzTekZ_CGzQKgl3Zu5U0U8HM= \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/97.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/97.sc new file mode 100644 index 0000000..fd751b5 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/97.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxWZ2p6d3JzUFFQNWpoNUFqakxTemdHaXNQM3pIQ1Z5aXp2bDhyUG10bk5qSVliNUlqRDdMMmp0VGpyeHdFanF4aE1LMzhaWUdUWnJlXzh0dEdRTzRUSWwza0pvYk9uRmZXOXNUclRoZnIwUVVwOTlPUnZUMDk2dDhTNUptZXg3aVhnZU5RVWw2YjhFUUwzNWxFeERpYlJFY01PMU0xcjljSUtMVVhoT1ZGU1RYdHZxRnJvVGJncXVBdUhDbHNUU3N2RnpZSEozVkw2cEJZSVE9fLwwrMqXB6g0r9yOzqZcTuiAc2OFEuK7AxEdTJGPoBgU \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/98.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/98.sc new file mode 100644 index 0000000..daf7d59 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/98.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3Mnx4WWZPcGdmeUE4LUZFX1pMMDBvdm1Gb1BobUtvVmpGdXVSUXJVZXJtZmtGbVJuRmtWOGI2dF90akdPbkNNam5IamtON3RDeU5yRUF0aGtuTDFRNlEtel91VEpYR281OEZVSXJjWEhoZ1dSX0hzTUNrV0hVZDlIbmxsLVNJWVk4aWJIM0p3OGc9fKEguloAVNTKMZbroD0NCrbjtVCLZt8rWBTnUYIX04Wh \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/99.sc b/vendor/github.com/gorilla/securecookie/fuzz/corpus/99.sc new file mode 100644 index 0000000..4e9d47c --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/99.sc @@ -0,0 +1 @@ +MTQzOTkyMzI3MnwyemlkdjBTNVZrQ2Znd3ZpT2JkV19lSzAydHI2aDVUaGNTUGtSdGFLdlh0LU9DZ1Rlb3phWlludUw2RUswWUtfSzE0ckJXSlQ0WDBuYnExT2dtNlhhcFE4OEFiZTN2c1IySHdHWTNTQ0lZTlExZWp0MXhZUG5LNkozVWRORENpdmpUMk5ld2xtMURRVm5oa21Zck5OVFRWRXgtRlc3NFFzZk1pUHZGQkF3TW5nZHpiMS1tXzNiZUlpeFFwWk10S2l2bnVuV0NyR2Q5T0RtYzRDVDZva09aUjZCeW1Kd1lLNldiSWU5Y3ZzZVh0RUhLNFVVWXZSV0ZLYkdjcnJFV3VmVC1ESmktSy1ORlUyYW1uQmhGN2pKamhtRWFoNmd3R3dDVmEtUERxYTgtY2l0cmhVZ3BCYXpvdmpuNVQzMVRYdEkwMG1LVDRnbWpnWWpsZXFhUDdSZ2lmbHlrcXhzYUZPfP5o9bNi5YI5P5tq0ZQupy5n1-kqBqOl19ZMmTcZBCVN \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/9e34c6aae8f2c610f838fed4a5bab0da097c5135-2 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/9e34c6aae8f2c610f838fed4a5bab0da097c5135-2 new file mode 100644 index 0000000..ef1e344 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/9e34c6aae8f2c610f838fed4a5bab0da097c5135-2 @@ -0,0 +1,2 @@ +T + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/9eecb7ef73e5211948391dfc0c2d586e3822b028-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/9eecb7ef73e5211948391dfc0c2d586e3822b028-1 new file mode 100644 index 0000000..a32d399 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/9eecb7ef73e5211948391dfc0c2d586e3822b028-1 @@ -0,0 +1 @@ +MnxBPXy- \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/adc83b19e793491b1c6ea0fd8b46cd9f32e592fc-2 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/adc83b19e793491b1c6ea0fd8b46cd9f32e592fc-2 new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/adc83b19e793491b1c6ea0fd8b46cd9f32e592fc-2 @@ -0,0 +1 @@ + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/ae3eb68089a89eb0a707c1de4b60edfeb6efc6e0-4 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/ae3eb68089a89eb0a707c1de4b60edfeb6efc6e0-4 new file mode 100644 index 0000000..2b33900 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/ae3eb68089a89eb0a707c1de4b60edfeb6efc6e0-4 @@ -0,0 +1,11 @@ +J + + + + + + + + + + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/b4f6322316fe4501272935267ab8b1c26684c884 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/b4f6322316fe4501272935267ab8b1c26684c884 new file mode 100644 index 0000000..2673fbf --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/b4f6322316fe4501272935267ab8b1c26684c884 @@ -0,0 +1 @@ +MTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoOuMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJUSWZNTnZYRHl4UF96cVk3bFhRTUhlZ1lEVF9CeXY4cmM5QlFQYi1jSTllbHBCUTZ4aENNSjBUZGRQQk5lYTh1S0VzMEVIaFRPMUFxRmxsZVBCX1RxZmFKUERoS0ZxWi1pST18CZk1M1q6UbWa3CHGVtjBOeOMZHoe1hpdzHjAIaQ7cCoMTQzOTkyMzI3MnxaRm5adFRCT3lweURrUWkyN09ab3M1N2hXcVIwd2QweG03NUNpZWx1a25BQmRVdy1LTnY2QktlVnpwSm9VbGpuRHNJUVdORk1qS2xqZl9FeWlFZWVvZHo3MndjaWN1eGNoT25hVWM4TEtWQnQ3QlNQeEx5R1F0bTFvUEg2a2wtMUFXdk82Z0RUdVUxa3pZekQ1R19XMDROSnlkTnhaZU9HOTh6ZG5UVHBycUNSRXBxVVJU \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/bf3f814c978c0fc01c46c8d5b337b024697186cc-7 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/bf3f814c978c0fc01c46c8d5b337b024697186cc-7 new file mode 100644 index 0000000..79020a3 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/bf3f814c978c0fc01c46c8d5b337b024697186cc-7 @@ -0,0 +1 @@ + = \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/c63ae6dd4fc9f9dda66970e827d13f7c73fe841c-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/c63ae6dd4fc9f9dda66970e827d13f7c73fe841c-1 new file mode 100644 index 0000000..ef6bce1 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/c63ae6dd4fc9f9dda66970e827d13f7c73fe841c-1 @@ -0,0 +1 @@ +M \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/cebedf21435b903c4013fb902fb5b753e40a100e-8 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/cebedf21435b903c4013fb902fb5b753e40a100e-8 new file mode 100644 index 0000000..898c69e --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/cebedf21435b903c4013fb902fb5b753e40a100e-8 @@ -0,0 +1,5 @@ +1Q== + + + + = \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1 new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/da5f06015af7bb09d3e421d086939d888f93271c-3 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/da5f06015af7bb09d3e421d086939d888f93271c-3 new file mode 100644 index 0000000..32f992e --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/da5f06015af7bb09d3e421d086939d888f93271c-3 @@ -0,0 +1,3 @@ +T + + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/df60b2ac6f14afbf990d366fa820ee4906f1436e-2 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/df60b2ac6f14afbf990d366fa820ee4906f1436e-2 new file mode 100644 index 0000000..3747a16 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/df60b2ac6f14afbf990d366fa820ee4906f1436e-2 @@ -0,0 +1,2 @@ +8Q== + : \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/ec54cdb4f33539c9b852b89ebcc67b4ec31a2b01-5 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/ec54cdb4f33539c9b852b89ebcc67b4ec31a2b01-5 new file mode 100644 index 0000000..01cab44 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/ec54cdb4f33539c9b852b89ebcc67b4ec31a2b01-5 @@ -0,0 +1,17 @@ +J + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/ec80b4b6f256eb0f29955c2bc000931d3b766c57-6 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/ec80b4b6f256eb0f29955c2bc000931d3b766c57-6 new file mode 100644 index 0000000..63d869f --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/ec80b4b6f256eb0f29955c2bc000931d3b766c57-6 @@ -0,0 +1 @@ + = \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/f2c59710b18847b10176f19fb0426cb597bafef0-9 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/f2c59710b18847b10176f19fb0426cb597bafef0-9 new file mode 100644 index 0000000..c7e805d --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/f2c59710b18847b10176f19fb0426cb597bafef0-9 @@ -0,0 +1,9 @@ +1Q== + + + + + + + + = \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/f4de882915d90ead3b18371ab004abb24b3cd320-3 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/f4de882915d90ead3b18371ab004abb24b3cd320-3 new file mode 100644 index 0000000..a24621e --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/f4de882915d90ead3b18371ab004abb24b3cd320-3 @@ -0,0 +1,2 @@ +8Q== + diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/f82d23aaf2be2cfc7aa8e323922208cdfce8d35a-3 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/f82d23aaf2be2cfc7aa8e323922208cdfce8d35a-3 new file mode 100644 index 0000000..725e321 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/f82d23aaf2be2cfc7aa8e323922208cdfce8d35a-3 @@ -0,0 +1,2 @@ + +  \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/corpus/fa0f4cd7fee9eb65ebb95a3dc88b6fa198a2c986-1 b/vendor/github.com/gorilla/securecookie/fuzz/corpus/fa0f4cd7fee9eb65ebb95a3dc88b6fa198a2c986-1 new file mode 100644 index 0000000..f4aac5a --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/corpus/fa0f4cd7fee9eb65ebb95a3dc88b6fa198a2c986-1 @@ -0,0 +1 @@ +hYA== \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go b/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go new file mode 100644 index 0000000..368192b --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go @@ -0,0 +1,47 @@ +package main + +import ( + "fmt" + "io" + "math/rand" + "os" + "reflect" + "testing/quick" + + "github.com/gorilla/securecookie" +) + +var hashKey = []byte("very-secret12345") +var blockKey = []byte("a-lot-secret1234") +var s = securecookie.New(hashKey, blockKey) + +type Cookie struct { + B bool + I int + S string +} + +func main() { + var c Cookie + t := reflect.TypeOf(c) + rnd := rand.New(rand.NewSource(0)) + for i := 0; i < 100; i++ { + v, ok := quick.Value(t, rnd) + if !ok { + panic("couldn't generate value") + } + encoded, err := s.Encode("fuzz", v.Interface()) + if err != nil { + panic(err) + } + f, err := os.Create(fmt.Sprintf("corpus/%d.sc", i)) + if err != nil { + panic(err) + } + _, err = io.WriteString(f, encoded) + if err != nil { + panic(err) + } + f.Close() + } +} diff --git a/vendor/github.com/gorilla/securecookie/securecookie.go b/vendor/github.com/gorilla/securecookie/securecookie.go new file mode 100644 index 0000000..cd4e097 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/securecookie.go @@ -0,0 +1,646 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securecookie + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/gob" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" + "time" +) + +// Error is the interface of all errors returned by functions in this library. +type Error interface { + error + + // IsUsage returns true for errors indicating the client code probably + // uses this library incorrectly. For example, the client may have + // failed to provide a valid hash key, or may have failed to configure + // the Serializer adequately for encoding value. + IsUsage() bool + + // IsDecode returns true for errors indicating that a cookie could not + // be decoded and validated. Since cookies are usually untrusted + // user-provided input, errors of this type should be expected. + // Usually, the proper action is simply to reject the request. + IsDecode() bool + + // IsInternal returns true for unexpected errors occurring in the + // securecookie implementation. + IsInternal() bool + + // Cause, if it returns a non-nil value, indicates that this error was + // propagated from some underlying library. If this method returns nil, + // this error was raised directly by this library. + // + // Cause is provided principally for debugging/logging purposes; it is + // rare that application logic should perform meaningfully different + // logic based on Cause. See, for example, the caveats described on + // (MultiError).Cause(). + Cause() error +} + +// errorType is a bitmask giving the error type(s) of an cookieError value. +type errorType int + +const ( + usageError = errorType(1 << iota) + decodeError + internalError +) + +type cookieError struct { + typ errorType + msg string + cause error +} + +func (e cookieError) IsUsage() bool { return (e.typ & usageError) != 0 } +func (e cookieError) IsDecode() bool { return (e.typ & decodeError) != 0 } +func (e cookieError) IsInternal() bool { return (e.typ & internalError) != 0 } + +func (e cookieError) Cause() error { return e.cause } + +func (e cookieError) Error() string { + parts := []string{"securecookie: "} + if e.msg == "" { + parts = append(parts, "error") + } else { + parts = append(parts, e.msg) + } + if c := e.Cause(); c != nil { + parts = append(parts, " - caused by: ", c.Error()) + } + return strings.Join(parts, "") +} + +var ( + errGeneratingIV = cookieError{typ: internalError, msg: "failed to generate random iv"} + + errNoCodecs = cookieError{typ: usageError, msg: "no codecs provided"} + errHashKeyNotSet = cookieError{typ: usageError, msg: "hash key is not set"} + errBlockKeyNotSet = cookieError{typ: usageError, msg: "block key is not set"} + errEncodedValueTooLong = cookieError{typ: usageError, msg: "the value is too long"} + + errValueToDecodeTooLong = cookieError{typ: decodeError, msg: "the value is too long"} + errTimestampInvalid = cookieError{typ: decodeError, msg: "invalid timestamp"} + errTimestampTooNew = cookieError{typ: decodeError, msg: "timestamp is too new"} + errTimestampExpired = cookieError{typ: decodeError, msg: "expired timestamp"} + errDecryptionFailed = cookieError{typ: decodeError, msg: "the value could not be decrypted"} + errValueNotByte = cookieError{typ: decodeError, msg: "value not a []byte."} + errValueNotBytePtr = cookieError{typ: decodeError, msg: "value not a pointer to []byte."} + + // ErrMacInvalid indicates that cookie decoding failed because the HMAC + // could not be extracted and verified. Direct use of this error + // variable is deprecated; it is public only for legacy compatibility, + // and may be privatized in the future, as it is rarely useful to + // distinguish between this error and other Error implementations. + ErrMacInvalid = cookieError{typ: decodeError, msg: "the value is not valid"} +) + +// Codec defines an interface to encode and decode cookie values. +type Codec interface { + Encode(name string, value interface{}) (string, error) + Decode(name, value string, dst interface{}) error +} + +// New returns a new SecureCookie. +// +// hashKey is required, used to authenticate values using HMAC. Create it using +// GenerateRandomKey(). It is recommended to use a key with 32 or 64 bytes. +// +// blockKey is optional, used to encrypt values. Create it using +// GenerateRandomKey(). The key length must correspond to the block size +// of the encryption algorithm. For AES, used by default, valid lengths are +// 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. +// The default encoder used for cookie serialization is encoding/gob. +// +// Note that keys created using GenerateRandomKey() are not automatically +// persisted. New keys will be created when the application is restarted, and +// previously issued cookies will not be able to be decoded. +func New(hashKey, blockKey []byte) *SecureCookie { + s := &SecureCookie{ + hashKey: hashKey, + blockKey: blockKey, + hashFunc: sha256.New, + maxAge: 86400 * 30, + maxLength: 4096, + sz: GobEncoder{}, + } + if hashKey == nil { + s.err = errHashKeyNotSet + } + if blockKey != nil { + s.BlockFunc(aes.NewCipher) + } + return s +} + +// SecureCookie encodes and decodes authenticated and optionally encrypted +// cookie values. +type SecureCookie struct { + hashKey []byte + hashFunc func() hash.Hash + blockKey []byte + block cipher.Block + maxLength int + maxAge int64 + minAge int64 + err error + sz Serializer + // For testing purposes, the function that returns the current timestamp. + // If not set, it will use time.Now().UTC().Unix(). + timeFunc func() int64 +} + +// Serializer provides an interface for providing custom serializers for cookie +// values. +type Serializer interface { + Serialize(src interface{}) ([]byte, error) + Deserialize(src []byte, dst interface{}) error +} + +// GobEncoder encodes cookie values using encoding/gob. This is the simplest +// encoder and can handle complex types via gob.Register. +type GobEncoder struct{} + +// JSONEncoder encodes cookie values using encoding/json. Users who wish to +// encode complex types need to satisfy the json.Marshaller and +// json.Unmarshaller interfaces. +type JSONEncoder struct{} + +// NopEncoder does not encode cookie values, and instead simply accepts a []byte +// (as an interface{}) and returns a []byte. This is particularly useful when +// you encoding an object upstream and do not wish to re-encode it. +type NopEncoder struct{} + +// MaxLength restricts the maximum length, in bytes, for the cookie value. +// +// Default is 4096, which is the maximum value accepted by Internet Explorer. +func (s *SecureCookie) MaxLength(value int) *SecureCookie { + s.maxLength = value + return s +} + +// MaxAge restricts the maximum age, in seconds, for the cookie value. +// +// Default is 86400 * 30. Set it to 0 for no restriction. +func (s *SecureCookie) MaxAge(value int) *SecureCookie { + s.maxAge = int64(value) + return s +} + +// MinAge restricts the minimum age, in seconds, for the cookie value. +// +// Default is 0 (no restriction). +func (s *SecureCookie) MinAge(value int) *SecureCookie { + s.minAge = int64(value) + return s +} + +// HashFunc sets the hash function used to create HMAC. +// +// Default is crypto/sha256.New. +func (s *SecureCookie) HashFunc(f func() hash.Hash) *SecureCookie { + s.hashFunc = f + return s +} + +// BlockFunc sets the encryption function used to create a cipher.Block. +// +// Default is crypto/aes.New. +func (s *SecureCookie) BlockFunc(f func([]byte) (cipher.Block, error)) *SecureCookie { + if s.blockKey == nil { + s.err = errBlockKeyNotSet + } else if block, err := f(s.blockKey); err == nil { + s.block = block + } else { + s.err = cookieError{cause: err, typ: usageError} + } + return s +} + +// Encoding sets the encoding/serialization method for cookies. +// +// Default is encoding/gob. To encode special structures using encoding/gob, +// they must be registered first using gob.Register(). +func (s *SecureCookie) SetSerializer(sz Serializer) *SecureCookie { + s.sz = sz + + return s +} + +// Encode encodes a cookie value. +// +// It serializes, optionally encrypts, signs with a message authentication code, +// and finally encodes the value. +// +// The name argument is the cookie name. It is stored with the encoded value. +// The value argument is the value to be encoded. It can be any value that can +// be encoded using the currently selected serializer; see SetSerializer(). +// +// It is the client's responsibility to ensure that value, when encoded using +// the current serialization/encryption settings on s and then base64-encoded, +// is shorter than the maximum permissible length. +func (s *SecureCookie) Encode(name string, value interface{}) (string, error) { + if s.err != nil { + return "", s.err + } + if s.hashKey == nil { + s.err = errHashKeyNotSet + return "", s.err + } + var err error + var b []byte + // 1. Serialize. + if b, err = s.sz.Serialize(value); err != nil { + return "", cookieError{cause: err, typ: usageError} + } + // 2. Encrypt (optional). + if s.block != nil { + if b, err = encrypt(s.block, b); err != nil { + return "", cookieError{cause: err, typ: usageError} + } + } + b = encode(b) + // 3. Create MAC for "name|date|value". Extra pipe to be used later. + b = []byte(fmt.Sprintf("%s|%d|%s|", name, s.timestamp(), b)) + mac := createMac(hmac.New(s.hashFunc, s.hashKey), b[:len(b)-1]) + // Append mac, remove name. + b = append(b, mac...)[len(name)+1:] + // 4. Encode to base64. + b = encode(b) + // 5. Check length. + if s.maxLength != 0 && len(b) > s.maxLength { + return "", errEncodedValueTooLong + } + // Done. + return string(b), nil +} + +// Decode decodes a cookie value. +// +// It decodes, verifies a message authentication code, optionally decrypts and +// finally deserializes the value. +// +// The name argument is the cookie name. It must be the same name used when +// it was stored. The value argument is the encoded cookie value. The dst +// argument is where the cookie will be decoded. It must be a pointer. +func (s *SecureCookie) Decode(name, value string, dst interface{}) error { + if s.err != nil { + return s.err + } + if s.hashKey == nil { + s.err = errHashKeyNotSet + return s.err + } + // 1. Check length. + if s.maxLength != 0 && len(value) > s.maxLength { + return errValueToDecodeTooLong + } + // 2. Decode from base64. + b, err := decode([]byte(value)) + if err != nil { + return err + } + // 3. Verify MAC. Value is "date|value|mac". + parts := bytes.SplitN(b, []byte("|"), 3) + if len(parts) != 3 { + return ErrMacInvalid + } + h := hmac.New(s.hashFunc, s.hashKey) + b = append([]byte(name+"|"), b[:len(b)-len(parts[2])-1]...) + if err = verifyMac(h, b, parts[2]); err != nil { + return err + } + // 4. Verify date ranges. + var t1 int64 + if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil { + return errTimestampInvalid + } + t2 := s.timestamp() + if s.minAge != 0 && t1 > t2-s.minAge { + return errTimestampTooNew + } + if s.maxAge != 0 && t1 < t2-s.maxAge { + return errTimestampExpired + } + // 5. Decrypt (optional). + b, err = decode(parts[1]) + if err != nil { + return err + } + if s.block != nil { + if b, err = decrypt(s.block, b); err != nil { + return err + } + } + // 6. Deserialize. + if err = s.sz.Deserialize(b, dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + // Done. + return nil +} + +// timestamp returns the current timestamp, in seconds. +// +// For testing purposes, the function that generates the timestamp can be +// overridden. If not set, it will return time.Now().UTC().Unix(). +func (s *SecureCookie) timestamp() int64 { + if s.timeFunc == nil { + return time.Now().UTC().Unix() + } + return s.timeFunc() +} + +// Authentication ------------------------------------------------------------- + +// createMac creates a message authentication code (MAC). +func createMac(h hash.Hash, value []byte) []byte { + h.Write(value) + return h.Sum(nil) +} + +// verifyMac verifies that a message authentication code (MAC) is valid. +func verifyMac(h hash.Hash, value []byte, mac []byte) error { + mac2 := createMac(h, value) + // Check that both MACs are of equal length, as subtle.ConstantTimeCompare + // does not do this prior to Go 1.4. + if len(mac) == len(mac2) && subtle.ConstantTimeCompare(mac, mac2) == 1 { + return nil + } + return ErrMacInvalid +} + +// Encryption ----------------------------------------------------------------- + +// encrypt encrypts a value using the given block in counter mode. +// +// A random initialization vector (http://goo.gl/zF67k) with the length of the +// block size is prepended to the resulting ciphertext. +func encrypt(block cipher.Block, value []byte) ([]byte, error) { + iv := GenerateRandomKey(block.BlockSize()) + if iv == nil { + return nil, errGeneratingIV + } + // Encrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + // Return iv + ciphertext. + return append(iv, value...), nil +} + +// decrypt decrypts a value using the given block in counter mode. +// +// The value to be decrypted must be prepended by a initialization vector +// (http://goo.gl/zF67k) with the length of the block size. +func decrypt(block cipher.Block, value []byte) ([]byte, error) { + size := block.BlockSize() + if len(value) > size { + // Extract iv. + iv := value[:size] + // Extract ciphertext. + value = value[size:] + // Decrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + return value, nil + } + return nil, errDecryptionFailed +} + +// Serialization -------------------------------------------------------------- + +// Serialize encodes a value using gob. +func (e GobEncoder) Serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, cookieError{cause: err, typ: usageError} + } + return buf.Bytes(), nil +} + +// Deserialize decodes a value using gob. +func (e GobEncoder) Deserialize(src []byte, dst interface{}) error { + dec := gob.NewDecoder(bytes.NewBuffer(src)) + if err := dec.Decode(dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + return nil +} + +// Serialize encodes a value using encoding/json. +func (e JSONEncoder) Serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, cookieError{cause: err, typ: usageError} + } + return buf.Bytes(), nil +} + +// Deserialize decodes a value using encoding/json. +func (e JSONEncoder) Deserialize(src []byte, dst interface{}) error { + dec := json.NewDecoder(bytes.NewReader(src)) + if err := dec.Decode(dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + return nil +} + +// Serialize passes a []byte through as-is. +func (e NopEncoder) Serialize(src interface{}) ([]byte, error) { + if b, ok := src.([]byte); ok { + return b, nil + } + + return nil, errValueNotByte +} + +// Deserialize passes a []byte through as-is. +func (e NopEncoder) Deserialize(src []byte, dst interface{}) error { + if dat, ok := dst.(*[]byte); ok { + *dat = src + return nil + } + return errValueNotBytePtr +} + +// Encoding ------------------------------------------------------------------- + +// encode encodes a value using base64. +func encode(value []byte) []byte { + encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value))) + base64.URLEncoding.Encode(encoded, value) + return encoded +} + +// decode decodes a cookie using base64. +func decode(value []byte) ([]byte, error) { + decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value))) + b, err := base64.URLEncoding.Decode(decoded, value) + if err != nil { + return nil, cookieError{cause: err, typ: decodeError, msg: "base64 decode failed"} + } + return decoded[:b], nil +} + +// Helpers -------------------------------------------------------------------- + +// GenerateRandomKey creates a random key with the given length in bytes. +// On failure, returns nil. +// +// Callers should explicitly check for the possibility of a nil return, treat +// it as a failure of the system random number generator, and not continue. +func GenerateRandomKey(length int) []byte { + k := make([]byte, length) + if _, err := io.ReadFull(rand.Reader, k); err != nil { + return nil + } + return k +} + +// CodecsFromPairs returns a slice of SecureCookie instances. +// +// It is a convenience function to create a list of codecs for key rotation. Note +// that the generated Codecs will have the default options applied: callers +// should iterate over each Codec and type-assert the underlying *SecureCookie to +// change these. +// +// Example: +// +// codecs := securecookie.CodecsFromPairs( +// []byte("new-hash-key"), +// []byte("new-block-key"), +// []byte("old-hash-key"), +// []byte("old-block-key"), +// ) +// +// // Modify each instance. +// for _, s := range codecs { +// if cookie, ok := s.(*securecookie.SecureCookie); ok { +// cookie.MaxAge(86400 * 7) +// cookie.SetSerializer(securecookie.JSONEncoder{}) +// cookie.HashFunc(sha512.New512_256) +// } +// } +// +func CodecsFromPairs(keyPairs ...[]byte) []Codec { + codecs := make([]Codec, len(keyPairs)/2+len(keyPairs)%2) + for i := 0; i < len(keyPairs); i += 2 { + var blockKey []byte + if i+1 < len(keyPairs) { + blockKey = keyPairs[i+1] + } + codecs[i/2] = New(keyPairs[i], blockKey) + } + return codecs +} + +// EncodeMulti encodes a cookie value using a group of codecs. +// +// The codecs are tried in order. Multiple codecs are accepted to allow +// key rotation. +// +// On error, may return a MultiError. +func EncodeMulti(name string, value interface{}, codecs ...Codec) (string, error) { + if len(codecs) == 0 { + return "", errNoCodecs + } + + var errors MultiError + for _, codec := range codecs { + encoded, err := codec.Encode(name, value) + if err == nil { + return encoded, nil + } + errors = append(errors, err) + } + return "", errors +} + +// DecodeMulti decodes a cookie value using a group of codecs. +// +// The codecs are tried in order. Multiple codecs are accepted to allow +// key rotation. +// +// On error, may return a MultiError. +func DecodeMulti(name string, value string, dst interface{}, codecs ...Codec) error { + if len(codecs) == 0 { + return errNoCodecs + } + + var errors MultiError + for _, codec := range codecs { + err := codec.Decode(name, value, dst) + if err == nil { + return nil + } + errors = append(errors, err) + } + return errors +} + +// MultiError groups multiple errors. +type MultiError []error + +func (m MultiError) IsUsage() bool { return m.any(func(e Error) bool { return e.IsUsage() }) } +func (m MultiError) IsDecode() bool { return m.any(func(e Error) bool { return e.IsDecode() }) } +func (m MultiError) IsInternal() bool { return m.any(func(e Error) bool { return e.IsInternal() }) } + +// Cause returns nil for MultiError; there is no unique underlying cause in the +// general case. +// +// Note: we could conceivably return a non-nil Cause only when there is exactly +// one child error with a Cause. However, it would be brittle for client code +// to rely on the arity of causes inside a MultiError, so we have opted not to +// provide this functionality. Clients which really wish to access the Causes +// of the underlying errors are free to iterate through the errors themselves. +func (m MultiError) Cause() error { return nil } + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} + +// any returns true if any element of m is an Error for which pred returns true. +func (m MultiError) any(pred func(Error) bool) bool { + for _, e := range m { + if ourErr, ok := e.(Error); ok && pred(ourErr) { + return true + } + } + return false +} diff --git a/vendor/github.com/gorilla/securecookie/securecookie_test.go b/vendor/github.com/gorilla/securecookie/securecookie_test.go new file mode 100644 index 0000000..2b0f8a1 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/securecookie_test.go @@ -0,0 +1,301 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securecookie + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "reflect" + "strings" + "testing" +) + +// Asserts that cookieError and MultiError are Error implementations. +var _ Error = cookieError{} +var _ Error = MultiError{} + +var testCookies = []interface{}{ + map[string]string{"foo": "bar"}, + map[string]string{"baz": "ding"}, +} + +var testStrings = []string{"foo", "bar", "baz"} + +func TestSecureCookie(t *testing.T) { + // TODO test too old / too new timestamps + s1 := New([]byte("12345"), []byte("1234567890123456")) + s2 := New([]byte("54321"), []byte("6543210987654321")) + value := map[string]interface{}{ + "foo": "bar", + "baz": 128, + } + + for i := 0; i < 50; i++ { + // Running this multiple times to check if any special character + // breaks encoding/decoding. + encoded, err1 := s1.Encode("sid", value) + if err1 != nil { + t.Error(err1) + continue + } + dst := make(map[string]interface{}) + err2 := s1.Decode("sid", encoded, &dst) + if err2 != nil { + t.Fatalf("%v: %v", err2, encoded) + } + if !reflect.DeepEqual(dst, value) { + t.Fatalf("Expected %v, got %v.", value, dst) + } + dst2 := make(map[string]interface{}) + err3 := s2.Decode("sid", encoded, &dst2) + if err3 == nil { + t.Fatalf("Expected failure decoding.") + } + err4, ok := err3.(Error) + if !ok { + t.Fatalf("Expected error to implement Error, got: %#v", err3) + } + if !err4.IsDecode() { + t.Fatalf("Expected DecodeError, got: %#v", err4) + } + + // Test other error type flags. + if err4.IsUsage() { + t.Fatalf("Expected IsUsage() == false, got: %#v", err4) + } + if err4.IsInternal() { + t.Fatalf("Expected IsInternal() == false, got: %#v", err4) + } + } +} + +func TestSecureCookieNilKey(t *testing.T) { + s1 := New(nil, nil) + value := map[string]interface{}{ + "foo": "bar", + "baz": 128, + } + _, err := s1.Encode("sid", value) + if err != errHashKeyNotSet { + t.Fatal("Wrong error returned:", err) + } +} + +func TestDecodeInvalid(t *testing.T) { + // List of invalid cookies, which must not be accepted, base64-decoded + // (they will be encoded before passing to Decode). + invalidCookies := []string{ + "", + " ", + "\n", + "||", + "|||", + "cookie", + } + s := New([]byte("12345"), nil) + var dst string + for i, v := range invalidCookies { + for _, enc := range []*base64.Encoding{ + base64.StdEncoding, + base64.URLEncoding, + } { + err := s.Decode("name", enc.EncodeToString([]byte(v)), &dst) + if err == nil { + t.Fatalf("%d: expected failure decoding", i) + } + err2, ok := err.(Error) + if !ok || !err2.IsDecode() { + t.Fatalf("%d: Expected IsDecode(), got: %#v", i, err) + } + } + } +} + +func TestAuthentication(t *testing.T) { + hash := hmac.New(sha256.New, []byte("secret-key")) + for _, value := range testStrings { + hash.Reset() + signed := createMac(hash, []byte(value)) + hash.Reset() + err := verifyMac(hash, []byte(value), signed) + if err != nil { + t.Error(err) + } + } +} + +func TestEncryption(t *testing.T) { + block, err := aes.NewCipher([]byte("1234567890123456")) + if err != nil { + t.Fatalf("Block could not be created") + } + var encrypted, decrypted []byte + for _, value := range testStrings { + if encrypted, err = encrypt(block, []byte(value)); err != nil { + t.Error(err) + } else { + if decrypted, err = decrypt(block, encrypted); err != nil { + t.Error(err) + } + if string(decrypted) != value { + t.Errorf("Expected %v, got %v.", value, string(decrypted)) + } + } + } +} + +func TestGobSerialization(t *testing.T) { + var ( + sz GobEncoder + serialized []byte + deserialized map[string]string + err error + ) + for _, value := range testCookies { + if serialized, err = sz.Serialize(value); err != nil { + t.Error(err) + } else { + deserialized = make(map[string]string) + if err = sz.Deserialize(serialized, &deserialized); err != nil { + t.Error(err) + } + if fmt.Sprintf("%v", deserialized) != fmt.Sprintf("%v", value) { + t.Errorf("Expected %v, got %v.", value, deserialized) + } + } + } +} + +func TestJSONSerialization(t *testing.T) { + var ( + sz JSONEncoder + serialized []byte + deserialized map[string]string + err error + ) + for _, value := range testCookies { + if serialized, err = sz.Serialize(value); err != nil { + t.Error(err) + } else { + deserialized = make(map[string]string) + if err = sz.Deserialize(serialized, &deserialized); err != nil { + t.Error(err) + } + if fmt.Sprintf("%v", deserialized) != fmt.Sprintf("%v", value) { + t.Errorf("Expected %v, got %v.", value, deserialized) + } + } + } +} + +func TestNopSerialization(t *testing.T) { + cookieData := "fooobar123" + sz := NopEncoder{} + + if _, err := sz.Serialize(cookieData); err != errValueNotByte { + t.Fatal("Expected error passing string") + } + dat, err := sz.Serialize([]byte(cookieData)) + if err != nil { + t.Fatal(err) + } + if (string(dat)) != cookieData { + t.Fatal("Expected serialized data to be same as source") + } + + var dst []byte + if err = sz.Deserialize(dat, dst); err != errValueNotBytePtr { + t.Fatal("Expect error unless you pass a *[]byte") + } + if err = sz.Deserialize(dat, &dst); err != nil { + t.Fatal(err) + } + if (string(dst)) != cookieData { + t.Fatal("Expected deserialized data to be same as source") + } +} + +func TestEncoding(t *testing.T) { + for _, value := range testStrings { + encoded := encode([]byte(value)) + decoded, err := decode(encoded) + if err != nil { + t.Error(err) + } else if string(decoded) != value { + t.Errorf("Expected %v, got %s.", value, string(decoded)) + } + } +} + +func TestMultiError(t *testing.T) { + s1, s2 := New(nil, nil), New(nil, nil) + _, err := EncodeMulti("sid", "value", s1, s2) + if len(err.(MultiError)) != 2 { + t.Errorf("Expected 2 errors, got %s.", err) + } else { + if strings.Index(err.Error(), "hash key is not set") == -1 { + t.Errorf("Expected missing hash key error, got %s.", err.Error()) + } + ourErr, ok := err.(Error) + if !ok || !ourErr.IsUsage() { + t.Fatalf("Expected error to be a usage error; got %#v", err) + } + if ourErr.IsDecode() { + t.Errorf("Expected error NOT to be a decode error; got %#v", ourErr) + } + if ourErr.IsInternal() { + t.Errorf("Expected error NOT to be an internal error; got %#v", ourErr) + } + } +} + +func TestMultiNoCodecs(t *testing.T) { + _, err := EncodeMulti("foo", "bar") + if err != errNoCodecs { + t.Errorf("EncodeMulti: bad value for error, got: %v", err) + } + + var dst []byte + err = DecodeMulti("foo", "bar", &dst) + if err != errNoCodecs { + t.Errorf("DecodeMulti: bad value for error, got: %v", err) + } +} + +func TestMissingKey(t *testing.T) { + s1 := New(nil, nil) + + var dst []byte + err := s1.Decode("sid", "value", &dst) + if err != errHashKeyNotSet { + t.Fatalf("Expected %#v, got %#v", errHashKeyNotSet, err) + } + if err2, ok := err.(Error); !ok || !err2.IsUsage() { + t.Errorf("Expected missing hash key to be IsUsage(); was %#v", err) + } +} + +// ---------------------------------------------------------------------------- + +type FooBar struct { + Foo int + Bar string +} + +func TestCustomType(t *testing.T) { + s1 := New([]byte("12345"), []byte("1234567890123456")) + // Type is not registered in gob. (!!!) + src := &FooBar{42, "bar"} + encoded, _ := s1.Encode("sid", src) + + dst := &FooBar{} + _ = s1.Decode("sid", encoded, dst) + if dst.Foo != 42 || dst.Bar != "bar" { + t.Fatalf("Expected %#v, got %#v", src, dst) + } +} diff --git a/vendor/github.com/gorilla/sessions/.travis.yml b/vendor/github.com/gorilla/sessions/.travis.yml new file mode 100644 index 0000000..db17dd3 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/.travis.yml @@ -0,0 +1,22 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +install: + - # skip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/sessions/LICENSE b/vendor/github.com/gorilla/sessions/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/sessions/README.md b/vendor/github.com/gorilla/sessions/README.md new file mode 100644 index 0000000..65e5e1b --- /dev/null +++ b/vendor/github.com/gorilla/sessions/README.md @@ -0,0 +1,79 @@ +sessions +======== +[![GoDoc](https://godoc.org/github.com/gorilla/sessions?status.svg)](https://godoc.org/github.com/gorilla/sessions) [![Build Status](https://travis-ci.org/gorilla/sessions.png?branch=master)](https://travis-ci.org/gorilla/sessions) + +gorilla/sessions provides cookie and filesystem sessions and infrastructure for +custom session backends. + +The key features are: + +* Simple API: use it as an easy way to set signed (and optionally + encrypted) cookies. +* Built-in backends to store sessions in cookies or the filesystem. +* Flash messages: session values that last until read. +* Convenient way to switch session persistency (aka "remember me") and set + other attributes. +* Mechanism to rotate authentication and encryption keys. +* Multiple sessions per request, even using different backends. +* Interfaces and infrastructure for custom session backends: sessions from + different stores can be retrieved and batch-saved using a common API. + +Let's start with an example that shows the sessions API in a nutshell: + +```go + import ( + "net/http" + "github.com/gorilla/sessions" + ) + + var store = sessions.NewCookieStore([]byte("something-very-secret")) + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // Get a session. We're ignoring the error resulted from decoding an + // existing session: Get() always returns a session, even if empty. + session, _ := store.Get(r, "session-name") + // Set some session values. + session.Values["foo"] = "bar" + session.Values[42] = 43 + // Save it before we write to the response/return from the handler. + session.Save(r, w) + } +``` + +First we initialize a session store calling `NewCookieStore()` and passing a +secret key used to authenticate the session. Inside the handler, we call +`store.Get()` to retrieve an existing session or a new one. Then we set some +session values in session.Values, which is a `map[interface{}]interface{}`. +And finally we call `session.Save()` to save the session in the response. + +Important Note: If you aren't using gorilla/mux, you need to wrap your handlers +with +[`context.ClearHandler`](http://www.gorillatoolkit.org/pkg/context#ClearHandler) +as or else you will leak memory! An easy way to do this is to wrap the top-level +mux when calling http.ListenAndServe: + +More examples are available [on the Gorilla +website](http://www.gorillatoolkit.org/pkg/sessions). + +## Store Implementations + +Other implementations of the `sessions.Store` interface: + +* [github.com/starJammer/gorilla-sessions-arangodb](https://github.com/starJammer/gorilla-sessions-arangodb) - ArangoDB +* [github.com/yosssi/boltstore](https://github.com/yosssi/boltstore) - Bolt +* [github.com/srinathgs/couchbasestore](https://github.com/srinathgs/couchbasestore) - Couchbase +* [github.com/denizeren/dynamostore](https://github.com/denizeren/dynamostore) - Dynamodb on AWS +* [github.com/bradleypeabody/gorilla-sessions-memcache](https://github.com/bradleypeabody/gorilla-sessions-memcache) - Memcache +* [github.com/dsoprea/go-appengine-sessioncascade](https://github.com/dsoprea/go-appengine-sessioncascade) - Memcache/Datastore/Context in AppEngine +* [github.com/kidstuff/mongostore](https://github.com/kidstuff/mongostore) - MongoDB +* [github.com/srinathgs/mysqlstore](https://github.com/srinathgs/mysqlstore) - MySQL +* [github.com/antonlindstrom/pgstore](https://github.com/antonlindstrom/pgstore) - PostgreSQL +* [github.com/boj/redistore](https://github.com/boj/redistore) - Redis +* [github.com/boj/rethinkstore](https://github.com/boj/rethinkstore) - RethinkDB +* [github.com/boj/riakstore](https://github.com/boj/riakstore) - Riak +* [github.com/michaeljs1990/sqlitestore](https://github.com/michaeljs1990/sqlitestore) - SQLite +* [github.com/wader/gormstore](https://github.com/wader/gormstore) - GORM (MySQL, PostgreSQL, SQLite) + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/sessions/doc.go b/vendor/github.com/gorilla/sessions/doc.go new file mode 100644 index 0000000..668e05e --- /dev/null +++ b/vendor/github.com/gorilla/sessions/doc.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package sessions provides cookie and filesystem sessions and +infrastructure for custom session backends. + +The key features are: + + * Simple API: use it as an easy way to set signed (and optionally + encrypted) cookies. + * Built-in backends to store sessions in cookies or the filesystem. + * Flash messages: session values that last until read. + * Convenient way to switch session persistency (aka "remember me") and set + other attributes. + * Mechanism to rotate authentication and encryption keys. + * Multiple sessions per request, even using different backends. + * Interfaces and infrastructure for custom session backends: sessions from + different stores can be retrieved and batch-saved using a common API. + +Let's start with an example that shows the sessions API in a nutshell: + + import ( + "net/http" + "github.com/gorilla/sessions" + ) + + var store = sessions.NewCookieStore([]byte("something-very-secret")) + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // Get a session. We're ignoring the error resulted from decoding an + // existing session: Get() always returns a session, even if empty. + session, err := store.Get(r, "session-name") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Set some session values. + session.Values["foo"] = "bar" + session.Values[42] = 43 + // Save it before we write to the response/return from the handler. + session.Save(r, w) + } + +First we initialize a session store calling NewCookieStore() and passing a +secret key used to authenticate the session. Inside the handler, we call +store.Get() to retrieve an existing session or a new one. Then we set some +session values in session.Values, which is a map[interface{}]interface{}. +And finally we call session.Save() to save the session in the response. + +Note that in production code, we should check for errors when calling +session.Save(r, w), and either display an error message or otherwise handle it. + +Save must be called before writing to the response, otherwise the session +cookie will not be sent to the client. + +Important Note: If you aren't using gorilla/mux, you need to wrap your handlers +with context.ClearHandler as or else you will leak memory! An easy way to do this +is to wrap the top-level mux when calling http.ListenAndServe: + + http.ListenAndServe(":8080", context.ClearHandler(http.DefaultServeMux)) + +The ClearHandler function is provided by the gorilla/context package. + +That's all you need to know for the basic usage. Let's take a look at other +options, starting with flash messages. + +Flash messages are session values that last until read. The term appeared with +Ruby On Rails a few years back. When we request a flash message, it is removed +from the session. To add a flash, call session.AddFlash(), and to get all +flashes, call session.Flashes(). Here is an example: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // Get a session. + session, err := store.Get(r, "session-name") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Get the previously flashes, if any. + if flashes := session.Flashes(); len(flashes) > 0 { + // Use the flash values. + } else { + // Set a new flash. + session.AddFlash("Hello, flash messages world!") + } + session.Save(r, w) + } + +Flash messages are useful to set information to be read after a redirection, +like after form submissions. + +There may also be cases where you want to store a complex datatype within a +session, such as a struct. Sessions are serialised using the encoding/gob package, +so it is easy to register new datatypes for storage in sessions: + + import( + "encoding/gob" + "github.com/gorilla/sessions" + ) + + type Person struct { + FirstName string + LastName string + Email string + Age int + } + + type M map[string]interface{} + + func init() { + + gob.Register(&Person{}) + gob.Register(&M{}) + } + +As it's not possible to pass a raw type as a parameter to a function, gob.Register() +relies on us passing it a value of the desired type. In the example above we've passed +it a pointer to a struct and a pointer to a custom type representing a +map[string]interface. (We could have passed non-pointer values if we wished.) This will +then allow us to serialise/deserialise values of those types to and from our sessions. + +Note that because session values are stored in a map[string]interface{}, there's +a need to type-assert data when retrieving it. We'll use the Person struct we registered above: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + session, err := store.Get(r, "session-name") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Retrieve our struct and type-assert it + val := session.Values["person"] + var person = &Person{} + if person, ok := val.(*Person); !ok { + // Handle the case that it's not an expected type + } + + // Now we can use our person object + } + +By default, session cookies last for a month. This is probably too long for +some cases, but it is easy to change this and other attributes during +runtime. Sessions can be configured individually or the store can be +configured and then all sessions saved using it will use that configuration. +We access session.Options or store.Options to set a new configuration. The +fields are basically a subset of http.Cookie fields. Let's change the +maximum age of a session to one week: + + session.Options = &sessions.Options{ + Path: "/", + MaxAge: 86400 * 7, + HttpOnly: true, + } + +Sometimes we may want to change authentication and/or encryption keys without +breaking existing sessions. The CookieStore supports key rotation, and to use +it you just need to set multiple authentication and encryption keys, in pairs, +to be tested in order: + + var store = sessions.NewCookieStore( + []byte("new-authentication-key"), + []byte("new-encryption-key"), + []byte("old-authentication-key"), + []byte("old-encryption-key"), + ) + +New sessions will be saved using the first pair. Old sessions can still be +read because the first pair will fail, and the second will be tested. This +makes it easy to "rotate" secret keys and still be able to validate existing +sessions. Note: for all pairs the encryption key is optional; set it to nil +or omit it and and encryption won't be used. + +Multiple sessions can be used in the same request, even with different +session backends. When this happens, calling Save() on each session +individually would be cumbersome, so we have a way to save all sessions +at once: it's sessions.Save(). Here's an example: + + var store = sessions.NewCookieStore([]byte("something-very-secret")) + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // Get a session and set a value. + session1, _ := store.Get(r, "session-one") + session1.Values["foo"] = "bar" + // Get another session and set another value. + session2, _ := store.Get(r, "session-two") + session2.Values[42] = 43 + // Save all sessions. + sessions.Save(r, w) + } + +This is possible because when we call Get() from a session store, it adds the +session to a common registry. Save() uses it to save all registered sessions. +*/ +package sessions diff --git a/vendor/github.com/gorilla/sessions/lex.go b/vendor/github.com/gorilla/sessions/lex.go new file mode 100644 index 0000000..4bbbe10 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/lex.go @@ -0,0 +1,102 @@ +// This file contains code adapted from the Go standard library +// https://github.com/golang/go/blob/39ad0fd0789872f9469167be7fe9578625ff246e/src/net/http/lex.go + +package sessions + +import "strings" + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func isToken(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !isToken(r) +} + +func isCookieNameValid(raw string) bool { + if raw == "" { + return false + } + return strings.IndexFunc(raw, isNotToken) < 0 +} diff --git a/vendor/github.com/gorilla/sessions/sessions.go b/vendor/github.com/gorilla/sessions/sessions.go new file mode 100644 index 0000000..fe0d2bc --- /dev/null +++ b/vendor/github.com/gorilla/sessions/sessions.go @@ -0,0 +1,241 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sessions + +import ( + "encoding/gob" + "fmt" + "net/http" + "time" + + "github.com/gorilla/context" +) + +// Default flashes key. +const flashesKey = "_flash" + +// Options -------------------------------------------------------------------- + +// Options stores configuration for a session or session store. +// +// Fields are a subset of http.Cookie fields. +type Options struct { + Path string + Domain string + // MaxAge=0 means no 'Max-Age' attribute specified. + // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'. + // MaxAge>0 means Max-Age attribute present and given in seconds. + MaxAge int + Secure bool + HttpOnly bool +} + +// Session -------------------------------------------------------------------- + +// NewSession is called by session stores to create a new session instance. +func NewSession(store Store, name string) *Session { + return &Session{ + Values: make(map[interface{}]interface{}), + store: store, + name: name, + } +} + +// Session stores the values and optional configuration for a session. +type Session struct { + // The ID of the session, generated by stores. It should not be used for + // user data. + ID string + // Values contains the user-data for the session. + Values map[interface{}]interface{} + Options *Options + IsNew bool + store Store + name string +} + +// Flashes returns a slice of flash messages from the session. +// +// A single variadic argument is accepted, and it is optional: it defines +// the flash key. If not defined "_flash" is used by default. +func (s *Session) Flashes(vars ...string) []interface{} { + var flashes []interface{} + key := flashesKey + if len(vars) > 0 { + key = vars[0] + } + if v, ok := s.Values[key]; ok { + // Drop the flashes and return it. + delete(s.Values, key) + flashes = v.([]interface{}) + } + return flashes +} + +// AddFlash adds a flash message to the session. +// +// A single variadic argument is accepted, and it is optional: it defines +// the flash key. If not defined "_flash" is used by default. +func (s *Session) AddFlash(value interface{}, vars ...string) { + key := flashesKey + if len(vars) > 0 { + key = vars[0] + } + var flashes []interface{} + if v, ok := s.Values[key]; ok { + flashes = v.([]interface{}) + } + s.Values[key] = append(flashes, value) +} + +// Save is a convenience method to save this session. It is the same as calling +// store.Save(request, response, session). You should call Save before writing to +// the response or returning from the handler. +func (s *Session) Save(r *http.Request, w http.ResponseWriter) error { + return s.store.Save(r, w, s) +} + +// Name returns the name used to register the session. +func (s *Session) Name() string { + return s.name +} + +// Store returns the session store used to register the session. +func (s *Session) Store() Store { + return s.store +} + +// Registry ------------------------------------------------------------------- + +// sessionInfo stores a session tracked by the registry. +type sessionInfo struct { + s *Session + e error +} + +// contextKey is the type used to store the registry in the context. +type contextKey int + +// registryKey is the key used to store the registry in the context. +const registryKey contextKey = 0 + +// GetRegistry returns a registry instance for the current request. +func GetRegistry(r *http.Request) *Registry { + registry := context.Get(r, registryKey) + if registry != nil { + return registry.(*Registry) + } + newRegistry := &Registry{ + request: r, + sessions: make(map[string]sessionInfo), + } + context.Set(r, registryKey, newRegistry) + return newRegistry +} + +// Registry stores sessions used during a request. +type Registry struct { + request *http.Request + sessions map[string]sessionInfo +} + +// Get registers and returns a session for the given name and session store. +// +// It returns a new session if there are no sessions registered for the name. +func (s *Registry) Get(store Store, name string) (session *Session, err error) { + if !isCookieNameValid(name) { + return nil, fmt.Errorf("sessions: invalid character in cookie name: %s", name) + } + if info, ok := s.sessions[name]; ok { + session, err = info.s, info.e + } else { + session, err = store.New(s.request, name) + session.name = name + s.sessions[name] = sessionInfo{s: session, e: err} + } + session.store = store + return +} + +// Save saves all sessions registered for the current request. +func (s *Registry) Save(w http.ResponseWriter) error { + var errMulti MultiError + for name, info := range s.sessions { + session := info.s + if session.store == nil { + errMulti = append(errMulti, fmt.Errorf( + "sessions: missing store for session %q", name)) + } else if err := session.store.Save(s.request, w, session); err != nil { + errMulti = append(errMulti, fmt.Errorf( + "sessions: error saving session %q -- %v", name, err)) + } + } + if errMulti != nil { + return errMulti + } + return nil +} + +// Helpers -------------------------------------------------------------------- + +func init() { + gob.Register([]interface{}{}) +} + +// Save saves all sessions used during the current request. +func Save(r *http.Request, w http.ResponseWriter) error { + return GetRegistry(r).Save(w) +} + +// NewCookie returns an http.Cookie with the options set. It also sets +// the Expires field calculated based on the MaxAge value, for Internet +// Explorer compatibility. +func NewCookie(name, value string, options *Options) *http.Cookie { + cookie := &http.Cookie{ + Name: name, + Value: value, + Path: options.Path, + Domain: options.Domain, + MaxAge: options.MaxAge, + Secure: options.Secure, + HttpOnly: options.HttpOnly, + } + if options.MaxAge > 0 { + d := time.Duration(options.MaxAge) * time.Second + cookie.Expires = time.Now().Add(d) + } else if options.MaxAge < 0 { + // Set it to the past to expire now. + cookie.Expires = time.Unix(1, 0) + } + return cookie +} + +// Error ---------------------------------------------------------------------- + +// MultiError stores multiple errors. +// +// Borrowed from the App Engine SDK. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/github.com/gorilla/sessions/sessions_test.go b/vendor/github.com/gorilla/sessions/sessions_test.go new file mode 100644 index 0000000..c166b05 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/sessions_test.go @@ -0,0 +1,160 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sessions + +import ( + "bytes" + "encoding/gob" + "net/http" + "net/http/httptest" + "testing" +) + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *httptest.ResponseRecorder { + return &httptest.ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// ---------------------------------------------------------------------------- + +type FlashMessage struct { + Type int + Message string +} + +func TestFlashes(t *testing.T) { + var req *http.Request + var rsp *httptest.ResponseRecorder + var hdr http.Header + var err error + var ok bool + var cookies []string + var session *Session + var flashes []interface{} + + store := NewCookieStore([]byte("secret-key")) + + // Round 1 ---------------------------------------------------------------- + + req, _ = http.NewRequest("GET", "http://localhost:8080/", nil) + rsp = NewRecorder() + // Get a session. + if session, err = store.Get(req, "session-key"); err != nil { + t.Fatalf("Error getting session: %v", err) + } + // Get a flash. + flashes = session.Flashes() + if len(flashes) != 0 { + t.Errorf("Expected empty flashes; Got %v", flashes) + } + // Add some flashes. + session.AddFlash("foo") + session.AddFlash("bar") + // Custom key. + session.AddFlash("baz", "custom_key") + // Save. + if err = Save(req, rsp); err != nil { + t.Fatalf("Error saving session: %v", err) + } + hdr = rsp.Header() + cookies, ok = hdr["Set-Cookie"] + if !ok || len(cookies) != 1 { + t.Fatal("No cookies. Header:", hdr) + } + + if _, err = store.Get(req, "session:key"); err.Error() != "sessions: invalid character in cookie name: session:key" { + t.Fatalf("Expected error due to invalid cookie name") + } + + // Round 2 ---------------------------------------------------------------- + + req, _ = http.NewRequest("GET", "http://localhost:8080/", nil) + req.Header.Add("Cookie", cookies[0]) + rsp = NewRecorder() + // Get a session. + if session, err = store.Get(req, "session-key"); err != nil { + t.Fatalf("Error getting session: %v", err) + } + // Check all saved values. + flashes = session.Flashes() + if len(flashes) != 2 { + t.Fatalf("Expected flashes; Got %v", flashes) + } + if flashes[0] != "foo" || flashes[1] != "bar" { + t.Errorf("Expected foo,bar; Got %v", flashes) + } + flashes = session.Flashes() + if len(flashes) != 0 { + t.Errorf("Expected dumped flashes; Got %v", flashes) + } + // Custom key. + flashes = session.Flashes("custom_key") + if len(flashes) != 1 { + t.Errorf("Expected flashes; Got %v", flashes) + } else if flashes[0] != "baz" { + t.Errorf("Expected baz; Got %v", flashes) + } + flashes = session.Flashes("custom_key") + if len(flashes) != 0 { + t.Errorf("Expected dumped flashes; Got %v", flashes) + } + + // Round 3 ---------------------------------------------------------------- + // Custom type + + req, _ = http.NewRequest("GET", "http://localhost:8080/", nil) + rsp = NewRecorder() + // Get a session. + if session, err = store.Get(req, "session-key"); err != nil { + t.Fatalf("Error getting session: %v", err) + } + // Get a flash. + flashes = session.Flashes() + if len(flashes) != 0 { + t.Errorf("Expected empty flashes; Got %v", flashes) + } + // Add some flashes. + session.AddFlash(&FlashMessage{42, "foo"}) + // Save. + if err = Save(req, rsp); err != nil { + t.Fatalf("Error saving session: %v", err) + } + hdr = rsp.Header() + cookies, ok = hdr["Set-Cookie"] + if !ok || len(cookies) != 1 { + t.Fatal("No cookies. Header:", hdr) + } + + // Round 4 ---------------------------------------------------------------- + // Custom type + + req, _ = http.NewRequest("GET", "http://localhost:8080/", nil) + req.Header.Add("Cookie", cookies[0]) + rsp = NewRecorder() + // Get a session. + if session, err = store.Get(req, "session-key"); err != nil { + t.Fatalf("Error getting session: %v", err) + } + // Check all saved values. + flashes = session.Flashes() + if len(flashes) != 1 { + t.Fatalf("Expected flashes; Got %v", flashes) + } + custom := flashes[0].(FlashMessage) + if custom.Type != 42 || custom.Message != "foo" { + t.Errorf("Expected %#v, got %#v", FlashMessage{42, "foo"}, custom) + } +} + +func init() { + gob.Register(FlashMessage{}) +} diff --git a/vendor/github.com/gorilla/sessions/store.go b/vendor/github.com/gorilla/sessions/store.go new file mode 100644 index 0000000..4ff6b6c --- /dev/null +++ b/vendor/github.com/gorilla/sessions/store.go @@ -0,0 +1,295 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sessions + +import ( + "encoding/base32" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/gorilla/securecookie" +) + +// Store is an interface for custom session stores. +// +// See CookieStore and FilesystemStore for examples. +type Store interface { + // Get should return a cached session. + Get(r *http.Request, name string) (*Session, error) + + // New should create and return a new session. + // + // Note that New should never return a nil session, even in the case of + // an error if using the Registry infrastructure to cache the session. + New(r *http.Request, name string) (*Session, error) + + // Save should persist session to the underlying store implementation. + Save(r *http.Request, w http.ResponseWriter, s *Session) error +} + +// CookieStore ---------------------------------------------------------------- + +// NewCookieStore returns a new CookieStore. +// +// Keys are defined in pairs to allow key rotation, but the common case is +// to set a single authentication key and optionally an encryption key. +// +// The first key in a pair is used for authentication and the second for +// encryption. The encryption key can be set to nil or omitted in the last +// pair, but the authentication key is required in all pairs. +// +// It is recommended to use an authentication key with 32 or 64 bytes. +// The encryption key, if set, must be either 16, 24, or 32 bytes to select +// AES-128, AES-192, or AES-256 modes. +// +// Use the convenience function securecookie.GenerateRandomKey() to create +// strong keys. +func NewCookieStore(keyPairs ...[]byte) *CookieStore { + cs := &CookieStore{ + Codecs: securecookie.CodecsFromPairs(keyPairs...), + Options: &Options{ + Path: "/", + MaxAge: 86400 * 30, + }, + } + + cs.MaxAge(cs.Options.MaxAge) + return cs +} + +// CookieStore stores sessions using secure cookies. +type CookieStore struct { + Codecs []securecookie.Codec + Options *Options // default configuration +} + +// Get returns a session for the given name after adding it to the registry. +// +// It returns a new session if the sessions doesn't exist. Access IsNew on +// the session to check if it is an existing session or a new one. +// +// It returns a new session and an error if the session exists but could +// not be decoded. +func (s *CookieStore) Get(r *http.Request, name string) (*Session, error) { + return GetRegistry(r).Get(s, name) +} + +// New returns a session for the given name without adding it to the registry. +// +// The difference between New() and Get() is that calling New() twice will +// decode the session data twice, while Get() registers and reuses the same +// decoded session after the first call. +func (s *CookieStore) New(r *http.Request, name string) (*Session, error) { + session := NewSession(s, name) + opts := *s.Options + session.Options = &opts + session.IsNew = true + var err error + if c, errCookie := r.Cookie(name); errCookie == nil { + err = securecookie.DecodeMulti(name, c.Value, &session.Values, + s.Codecs...) + if err == nil { + session.IsNew = false + } + } + return session, err +} + +// Save adds a single session to the response. +func (s *CookieStore) Save(r *http.Request, w http.ResponseWriter, + session *Session) error { + encoded, err := securecookie.EncodeMulti(session.Name(), session.Values, + s.Codecs...) + if err != nil { + return err + } + http.SetCookie(w, NewCookie(session.Name(), encoded, session.Options)) + return nil +} + +// MaxAge sets the maximum age for the store and the underlying cookie +// implementation. Individual sessions can be deleted by setting Options.MaxAge +// = -1 for that session. +func (s *CookieStore) MaxAge(age int) { + s.Options.MaxAge = age + + // Set the maxAge for each securecookie instance. + for _, codec := range s.Codecs { + if sc, ok := codec.(*securecookie.SecureCookie); ok { + sc.MaxAge(age) + } + } +} + +// FilesystemStore ------------------------------------------------------------ + +var fileMutex sync.RWMutex + +// NewFilesystemStore returns a new FilesystemStore. +// +// The path argument is the directory where sessions will be saved. If empty +// it will use os.TempDir(). +// +// See NewCookieStore() for a description of the other parameters. +func NewFilesystemStore(path string, keyPairs ...[]byte) *FilesystemStore { + if path == "" { + path = os.TempDir() + } + fs := &FilesystemStore{ + Codecs: securecookie.CodecsFromPairs(keyPairs...), + Options: &Options{ + Path: "/", + MaxAge: 86400 * 30, + }, + path: path, + } + + fs.MaxAge(fs.Options.MaxAge) + return fs +} + +// FilesystemStore stores sessions in the filesystem. +// +// It also serves as a reference for custom stores. +// +// This store is still experimental and not well tested. Feedback is welcome. +type FilesystemStore struct { + Codecs []securecookie.Codec + Options *Options // default configuration + path string +} + +// MaxLength restricts the maximum length of new sessions to l. +// If l is 0 there is no limit to the size of a session, use with caution. +// The default for a new FilesystemStore is 4096. +func (s *FilesystemStore) MaxLength(l int) { + for _, c := range s.Codecs { + if codec, ok := c.(*securecookie.SecureCookie); ok { + codec.MaxLength(l) + } + } +} + +// Get returns a session for the given name after adding it to the registry. +// +// See CookieStore.Get(). +func (s *FilesystemStore) Get(r *http.Request, name string) (*Session, error) { + return GetRegistry(r).Get(s, name) +} + +// New returns a session for the given name without adding it to the registry. +// +// See CookieStore.New(). +func (s *FilesystemStore) New(r *http.Request, name string) (*Session, error) { + session := NewSession(s, name) + opts := *s.Options + session.Options = &opts + session.IsNew = true + var err error + if c, errCookie := r.Cookie(name); errCookie == nil { + err = securecookie.DecodeMulti(name, c.Value, &session.ID, s.Codecs...) + if err == nil { + err = s.load(session) + if err == nil { + session.IsNew = false + } + } + } + return session, err +} + +// Save adds a single session to the response. +// +// If the Options.MaxAge of the session is <= 0 then the session file will be +// deleted from the store path. With this process it enforces the properly +// session cookie handling so no need to trust in the cookie management in the +// web browser. +func (s *FilesystemStore) Save(r *http.Request, w http.ResponseWriter, + session *Session) error { + // Delete if max-age is <= 0 + if session.Options.MaxAge <= 0 { + if err := s.erase(session); err != nil { + return err + } + http.SetCookie(w, NewCookie(session.Name(), "", session.Options)) + return nil + } + + if session.ID == "" { + // Because the ID is used in the filename, encode it to + // use alphanumeric characters only. + session.ID = strings.TrimRight( + base32.StdEncoding.EncodeToString( + securecookie.GenerateRandomKey(32)), "=") + } + if err := s.save(session); err != nil { + return err + } + encoded, err := securecookie.EncodeMulti(session.Name(), session.ID, + s.Codecs...) + if err != nil { + return err + } + http.SetCookie(w, NewCookie(session.Name(), encoded, session.Options)) + return nil +} + +// MaxAge sets the maximum age for the store and the underlying cookie +// implementation. Individual sessions can be deleted by setting Options.MaxAge +// = -1 for that session. +func (s *FilesystemStore) MaxAge(age int) { + s.Options.MaxAge = age + + // Set the maxAge for each securecookie instance. + for _, codec := range s.Codecs { + if sc, ok := codec.(*securecookie.SecureCookie); ok { + sc.MaxAge(age) + } + } +} + +// save writes encoded session.Values to a file. +func (s *FilesystemStore) save(session *Session) error { + encoded, err := securecookie.EncodeMulti(session.Name(), session.Values, + s.Codecs...) + if err != nil { + return err + } + filename := filepath.Join(s.path, "session_"+session.ID) + fileMutex.Lock() + defer fileMutex.Unlock() + return ioutil.WriteFile(filename, []byte(encoded), 0600) +} + +// load reads a file and decodes its content into session.Values. +func (s *FilesystemStore) load(session *Session) error { + filename := filepath.Join(s.path, "session_"+session.ID) + fileMutex.RLock() + defer fileMutex.RUnlock() + fdata, err := ioutil.ReadFile(filename) + if err != nil { + return err + } + if err = securecookie.DecodeMulti(session.Name(), string(fdata), + &session.Values, s.Codecs...); err != nil { + return err + } + return nil +} + +// delete session file +func (s *FilesystemStore) erase(session *Session) error { + filename := filepath.Join(s.path, "session_"+session.ID) + + fileMutex.RLock() + defer fileMutex.RUnlock() + + err := os.Remove(filename) + return err +} diff --git a/vendor/github.com/gorilla/sessions/store_test.go b/vendor/github.com/gorilla/sessions/store_test.go new file mode 100644 index 0000000..bfc53fa --- /dev/null +++ b/vendor/github.com/gorilla/sessions/store_test.go @@ -0,0 +1,125 @@ +package sessions + +import ( + "encoding/base64" + "net/http" + "net/http/httptest" + "testing" +) + +// Test for GH-8 for CookieStore +func TestGH8CookieStore(t *testing.T) { + originalPath := "/" + store := NewCookieStore() + store.Options.Path = originalPath + req, err := http.NewRequest("GET", "http://www.example.com", nil) + if err != nil { + t.Fatal("failed to create request", err) + } + + session, err := store.New(req, "hello") + if err != nil { + t.Fatal("failed to create session", err) + } + + store.Options.Path = "/foo" + if session.Options.Path != originalPath { + t.Fatalf("bad session path: got %q, want %q", session.Options.Path, originalPath) + } +} + +// Test for GH-8 for FilesystemStore +func TestGH8FilesystemStore(t *testing.T) { + originalPath := "/" + store := NewFilesystemStore("") + store.Options.Path = originalPath + req, err := http.NewRequest("GET", "http://www.example.com", nil) + if err != nil { + t.Fatal("failed to create request", err) + } + + session, err := store.New(req, "hello") + if err != nil { + t.Fatal("failed to create session", err) + } + + store.Options.Path = "/foo" + if session.Options.Path != originalPath { + t.Fatalf("bad session path: got %q, want %q", session.Options.Path, originalPath) + } +} + +// Test for GH-2. +func TestGH2MaxLength(t *testing.T) { + store := NewFilesystemStore("", []byte("some key")) + req, err := http.NewRequest("GET", "http://www.example.com", nil) + if err != nil { + t.Fatal("failed to create request", err) + } + w := httptest.NewRecorder() + + session, err := store.New(req, "my session") + session.Values["big"] = make([]byte, base64.StdEncoding.DecodedLen(4096*2)) + err = session.Save(req, w) + if err == nil { + t.Fatal("expected an error, got nil") + } + + store.MaxLength(4096 * 3) // A bit more than the value size to account for encoding overhead. + err = session.Save(req, w) + if err != nil { + t.Fatal("failed to Save:", err) + } +} + +// Test delete filesystem store with max-age: -1 +func TestGH8FilesystemStoreDelete(t *testing.T) { + store := NewFilesystemStore("", []byte("some key")) + req, err := http.NewRequest("GET", "http://www.example.com", nil) + if err != nil { + t.Fatal("failed to create request", err) + } + w := httptest.NewRecorder() + + session, err := store.New(req, "hello") + if err != nil { + t.Fatal("failed to create session", err) + } + + err = session.Save(req, w) + if err != nil { + t.Fatal("failed to save session", err) + } + + session.Options.MaxAge = -1 + err = session.Save(req, w) + if err != nil { + t.Fatal("failed to delete session", err) + } +} + +// Test delete filesystem store with max-age: 0 +func TestGH8FilesystemStoreDelete2(t *testing.T) { + store := NewFilesystemStore("", []byte("some key")) + req, err := http.NewRequest("GET", "http://www.example.com", nil) + if err != nil { + t.Fatal("failed to create request", err) + } + w := httptest.NewRecorder() + + session, err := store.New(req, "hello") + if err != nil { + t.Fatal("failed to create session", err) + } + + err = session.Save(req, w) + if err != nil { + t.Fatal("failed to save session", err) + } + + session.Options.MaxAge = 0 + err = session.Save(req, w) + if err != nil { + t.Fatal("failed to delete session", err) + } +} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE new file mode 100644 index 0000000..5f0d1fb --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md new file mode 100644 index 0000000..7a950d1 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/README.md @@ -0,0 +1,23 @@ +# mousetrap + +mousetrap is a tiny library that answers a single question. + +On a Windows machine, was the process invoked by someone double clicking on +the executable file while browsing in explorer? + +### Motivation + +Windows developers unfamiliar with command line tools will often "double-click" +the executable for a tool. Because most CLI tools print the help and then exit +when invoked without arguments, this is often very frustrating for those users. + +mousetrap provides a way to detect these invocations so that you can provide +more helpful behavior and instructions on how to run the CLI tool. To see what +this looks like, both from an organizational and a technical perspective, see +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ + +### The interface + +The library exposes a single interface: + + func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go new file mode 100644 index 0000000..9d2d8a4 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -0,0 +1,15 @@ +// +build !windows + +package mousetrap + +// StartedByExplorer returns true if the program was invoked by the user +// double-clicking on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +// +// On non-Windows platforms, it always returns false. +func StartedByExplorer() bool { + return false +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go new file mode 100644 index 0000000..336142a --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -0,0 +1,98 @@ +// +build windows +// +build !go1.4 + +package mousetrap + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const ( + // defined by the Win32 API + th32cs_snapprocess uintptr = 0x2 +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") + Process32First = kernel.MustFindProc("Process32FirstW") + Process32Next = kernel.MustFindProc("Process32NextW") +) + +// ProcessEntry32 structure defined by the Win32 API +type processEntry32 struct { + dwSize uint32 + cntUsage uint32 + th32ProcessID uint32 + th32DefaultHeapID int + th32ModuleID uint32 + cntThreads uint32 + th32ParentProcessID uint32 + pcPriClassBase int32 + dwFlags uint32 + szExeFile [syscall.MAX_PATH]uint16 +} + +func getProcessEntry(pid int) (pe *processEntry32, err error) { + snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) + if snapshot == uintptr(syscall.InvalidHandle) { + err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) + return + } + defer syscall.CloseHandle(syscall.Handle(snapshot)) + + var processEntry processEntry32 + processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) + ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32First: %v", e1) + return + } + + for { + if processEntry.th32ProcessID == uint32(pid) { + pe = &processEntry + return + } + + ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32Next: %v", e1) + return + } + } +} + +func getppid() (pid int, err error) { + pe, err := getProcessEntry(os.Getpid()) + if err != nil { + return + } + + pid = int(pe.th32ParentProcessID) + return +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + ppid, err := getppid() + if err != nil { + return false + } + + pe, err := getProcessEntry(ppid) + if err != nil { + return false + } + + name := syscall.UTF16ToString(pe.szExeFile[:]) + return name == "explorer.exe" +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go new file mode 100644 index 0000000..9a28e57 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go @@ -0,0 +1,46 @@ +// +build windows +// +build go1.4 + +package mousetrap + +import ( + "os" + "syscall" + "unsafe" +) + +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + pe, err := getProcessEntry(os.Getppid()) + if err != nil { + return false + } + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) +} diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore new file mode 100644 index 0000000..529841c --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +tags +environ diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml new file mode 100644 index 0000000..6bc68d6 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.travis.yml @@ -0,0 +1,27 @@ +# vim: ft=yaml sw=2 ts=2 + +language: go + +# enable database services +services: + - mysql + - postgresql + +# create test database +before_install: + - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;' + - psql -c 'create database sqlxtest;' -U postgres + - go get github.com/mattn/goveralls + - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true" + - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable" + - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db" + +# go versions to test +go: + - "1.8" + - "1.9" + - "1.10.x" + +# run tests w/ coverage +script: + - travis_retry $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE new file mode 100644 index 0000000..0d31edf --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/LICENSE @@ -0,0 +1,23 @@ + Copyright (c) 2013, Jason Moiron + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md new file mode 100644 index 0000000..c0db7f7 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/README.md @@ -0,0 +1,185 @@ +# sqlx + +[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) + +sqlx is a library which provides a set of extensions on go's standard +`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, +et al. all leave the underlying interfaces untouched, so that their interfaces +are a superset on the standard ones. This makes it relatively painless to +integrate existing codebases using database/sql with sqlx. + +Major additional concepts are: + +* Marshal rows into structs (with embedded struct support), maps, and slices +* Named parameter support including prepared statements +* `Get` and `Select` to go quickly from query to struct/slice + +In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), +there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that +explains how to use `database/sql` along with sqlx. + +## Recent Changes + +* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions. + +This breaks backwards compatibility, but it's in a way that is trivially fixable +(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in +active development currently. + +* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905). + +### Backwards Compatibility + +There is no Go1-like promise of absolute stability, but I take the issue seriously +and will maintain the library in a compatible state unless vital bugs prevent me +from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and +[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior, +a wider API cleanup was done at the time of fixing. It's possible this will happen +in future; if it does, a git tag will be provided for users requiring the old +behavior to continue to use it until such a time as they can migrate. + +## install + + go get github.com/jmoiron/sqlx + +## issues + +Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of +`Columns()` does not fully qualify column names in queries like: + +```sql +SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; +``` + +making a struct or map destination ambiguous. Use `AS` in your queries +to give columns distinct names, `rows.Scan` to scan them manually, or +`SliceScan` to get a slice of results. + +## usage + +Below is an example which shows some common use cases for sqlx. Check +[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more +usage. + + +```go +package main + +import ( + "database/sql" + "fmt" + "log" + + _ "github.com/lib/pq" + "github.com/jmoiron/sqlx" +) + +var schema = ` +CREATE TABLE person ( + first_name text, + last_name text, + email text +); + +CREATE TABLE place ( + country text, + city text NULL, + telcode integer +)` + +type Person struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string +} + +type Place struct { + Country string + City sql.NullString + TelCode int +} + +func main() { + // this Pings the database trying to connect, panics on error + // use sqlx.Open() for sql.Open() semantics + db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") + if err != nil { + log.Fatalln(err) + } + + // exec the schema or fail; multi-statement Exec behavior varies between + // database drivers; pq will exec them all, sqlite3 won't, ymmv + db.MustExec(schema) + + tx := db.MustBegin() + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") + tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") + // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person + tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) + tx.Commit() + + // Query the database, storing results in a []Person (wrapped in []interface{}) + people := []Person{} + db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") + jason, john := people[0], people[1] + + fmt.Printf("%#v\n%#v", jason, john) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} + + // You can also get a single result, a la QueryRow + jason = Person{} + err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") + fmt.Printf("%#v\n", jason) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + + // if you have null fields and use SELECT *, you must use sql.Null* in your struct + places := []Place{} + err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + fmt.Println(err) + return + } + usa, singsing, honkers := places[0], places[1], places[2] + + fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + + // Loop through rows using only one struct + place := Place{} + rows, err := db.Queryx("SELECT * FROM place") + for rows.Next() { + err := rows.StructScan(&place) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("%#v\n", place) + } + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + + // Named queries, using `:name` as the bindvar. Automatic bindvar support + // which takes into account the dbtype based on the driverName on sqlx.Open/Connect + _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, + map[string]interface{}{ + "first": "Bin", + "last": "Smuth", + "email": "bensmith@allblacks.nz", + }) + + // Selects Mr. Smith from the database + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) + + // Named queries can also use structs. Their bind names follow the same rules + // as the name -> db mapping, so struct fields are lowercased and the `db` tag + // is taken into consideration. + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) +} +``` + diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go new file mode 100644 index 0000000..0fdc443 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/bind.go @@ -0,0 +1,208 @@ +package sqlx + +import ( + "bytes" + "errors" + "reflect" + "strconv" + "strings" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Bindvar types supported by Rebind, BindMap and BindStruct. +const ( + UNKNOWN = iota + QUESTION + DOLLAR + NAMED +) + +// BindType returns the bindtype for a given database given a drivername. +func BindType(driverName string) int { + switch driverName { + case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres": + return DOLLAR + case "mysql": + return QUESTION + case "sqlite3": + return QUESTION + case "oci8", "ora", "goracle": + return NAMED + } + return UNKNOWN +} + +// FIXME: this should be able to be tolerant of escaped ?'s in queries without +// losing much speed, and should be to avoid confusion. + +// Rebind a query from the default bindtype (QUESTION) to the target bindtype. +func Rebind(bindType int, query string) string { + switch bindType { + case QUESTION, UNKNOWN: + return query + } + + // Add space enough for 10 params before we have to allocate + rqb := make([]byte, 0, len(query)+10) + + var i, j int + + for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { + rqb = append(rqb, query[:i]...) + + switch bindType { + case DOLLAR: + rqb = append(rqb, '$') + case NAMED: + rqb = append(rqb, ':', 'a', 'r', 'g') + } + + j++ + rqb = strconv.AppendInt(rqb, int64(j), 10) + + query = query[i+1:] + } + + return string(append(rqb, query...)) +} + +// Experimental implementation of Rebind which uses a bytes.Buffer. The code is +// much simpler and should be more resistant to odd unicode, but it is twice as +// slow. Kept here for benchmarking purposes and to possibly replace Rebind if +// problems arise with its somewhat naive handling of unicode. +func rebindBuff(bindType int, query string) string { + if bindType != DOLLAR { + return query + } + + b := make([]byte, 0, len(query)) + rqb := bytes.NewBuffer(b) + j := 1 + for _, r := range query { + if r == '?' { + rqb.WriteRune('$') + rqb.WriteString(strconv.Itoa(j)) + j++ + } else { + rqb.WriteRune(r) + } + } + + return rqb.String() +} + +// In expands slice values in args, returning the modified query string +// and a new arg list that can be executed by a database. The `query` should +// use the `?` bindVar. The return value uses the `?` bindVar. +func In(query string, args ...interface{}) (string, []interface{}, error) { + // argMeta stores reflect.Value and length for slices and + // the value itself for non-slice arguments + type argMeta struct { + v reflect.Value + i interface{} + length int + } + + var flatArgsCount int + var anySlices bool + + meta := make([]argMeta, len(args)) + + for i, arg := range args { + v := reflect.ValueOf(arg) + t := reflectx.Deref(v.Type()) + + // []byte is a driver.Value type so it should not be expanded + if t.Kind() == reflect.Slice && t != reflect.TypeOf([]byte{}) { + meta[i].length = v.Len() + meta[i].v = v + + anySlices = true + flatArgsCount += meta[i].length + + if meta[i].length == 0 { + return "", nil, errors.New("empty slice passed to 'in' query") + } + } else { + meta[i].i = arg + flatArgsCount++ + } + } + + // don't do any parsing if there aren't any slices; note that this means + // some errors that we might have caught below will not be returned. + if !anySlices { + return query, args, nil + } + + newArgs := make([]interface{}, 0, flatArgsCount) + buf := bytes.NewBuffer(make([]byte, 0, len(query)+len(", ?")*flatArgsCount)) + + var arg, offset int + + for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { + if arg >= len(meta) { + // if an argument wasn't passed, lets return an error; this is + // not actually how database/sql Exec/Query works, but since we are + // creating an argument list programmatically, we want to be able + // to catch these programmer errors earlier. + return "", nil, errors.New("number of bindVars exceeds arguments") + } + + argMeta := meta[arg] + arg++ + + // not a slice, continue. + // our questionmark will either be written before the next expansion + // of a slice or after the loop when writing the rest of the query + if argMeta.length == 0 { + offset = offset + i + 1 + newArgs = append(newArgs, argMeta.i) + continue + } + + // write everything up to and including our ? character + buf.WriteString(query[:offset+i+1]) + + for si := 1; si < argMeta.length; si++ { + buf.WriteString(", ?") + } + + newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) + + // slice the query and reset the offset. this avoids some bookkeeping for + // the write after the loop + query = query[offset+i+1:] + offset = 0 + } + + buf.WriteString(query) + + if arg < len(meta) { + return "", nil, errors.New("number of bindVars less than number arguments") + } + + return buf.String(), newArgs, nil +} + +func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { + switch val := v.Interface().(type) { + case []interface{}: + args = append(args, val...) + case []int: + for i := range val { + args = append(args, val[i]) + } + case []string: + for i := range val { + args = append(args, val[i]) + } + default: + for si := 0; si < vlen; si++ { + args = append(args, v.Index(si).Interface()) + } + } + + return args +} diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go new file mode 100644 index 0000000..e2b4e60 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/doc.go @@ -0,0 +1,12 @@ +// Package sqlx provides general purpose extensions to database/sql. +// +// It is intended to seamlessly wrap database/sql and provide convenience +// methods which are useful in the development of database driven applications. +// None of the underlying database/sql methods are changed. Instead all extended +// behavior is implemented through new methods defined on wrapper types. +// +// Additions include scanning into structs, named query support, rebinding +// queries for different drivers, convenient shorthands for common error handling +// and more. +// +package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go new file mode 100644 index 0000000..69eb954 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named.go @@ -0,0 +1,346 @@ +package sqlx + +// Named Query Support +// +// * BindMap - bind query bindvars to map/struct args +// * NamedExec, NamedQuery - named query w/ struct or map +// * NamedStmt - a pre-compiled named query which is a prepared statement +// +// Internal Interfaces: +// +// * compileNamedQuery - rebind a named query, returning a query and list of names +// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist +// +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strconv" + "unicode" + + "github.com/jmoiron/sqlx/reflectx" +) + +// NamedStmt is a prepared statement that executes named queries. Prepare it +// how you would execute a NamedQuery, but pass in a struct or map when executing. +type NamedStmt struct { + Params []string + QueryString string + Stmt *Stmt +} + +// Close closes the named statement. +func (n *NamedStmt) Close() error { + return n.Stmt.Close() +} + +// Exec executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.Exec(args...) +} + +// Query executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.Query(args...) +} + +// QueryRow executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRow(arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowx(args...) +} + +// MustExec execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExec(arg interface{}) sql.Result { + res, err := n.Exec(arg) + if err != nil { + panic(err) + } + return res +} + +// Queryx using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { + r, err := n.Query(arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowx(arg interface{}) *Row { + return n.QueryRow(arg) +} + +// Select using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { + rows, err := n.Queryx(arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { + r := n.QueryRowx(arg) + return r.scanAny(dest, false) +} + +// Unsafe creates an unsafe version of the NamedStmt +func (n *NamedStmt) Unsafe() *NamedStmt { + r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} + r.Stmt.unsafe = true + return r +} + +// A union interface of preparer and binder, required to be able to prepare +// named statements (as the bindtype must be determined). +type namedPreparer interface { + Preparer + binder +} + +func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := Preparex(p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + if maparg, ok := arg.(map[string]interface{}); ok { + return bindMapArgs(names, maparg) + } + return bindArgs(names, arg, m) +} + +// private interface to generate a list of interfaces from a given struct +// type, given a list of names to pull out of the struct. Used by public +// BindStruct interface. +func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + // grab the indirected value of arg + v := reflect.ValueOf(arg) + for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { + v = v.Elem() + } + + err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { + if len(t) == 0 { + return fmt.Errorf("could not find name %s in %#v", names[i], arg) + } + + val := reflectx.FieldByIndexesReadOnly(v, t) + arglist = append(arglist, val.Interface()) + + return nil + }) + + return arglist, err +} + +// like bindArgs, but for maps. +func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + for _, name := range names { + val, ok := arg[name] + if !ok { + return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) + } + arglist = append(arglist, val) + } + return arglist, nil +} + +// bindStruct binds a named parameter query with fields from a struct argument. +// The rules for binding field names to parameter names follow the same +// conventions as for StructScan, including obeying the `db` struct tags. +func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindArgs(names, arg, m) + if err != nil { + return "", []interface{}{}, err + } + + return bound, arglist, nil +} + +// bindMap binds a named parameter query with a map of arguments. +func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindMapArgs(names, args) + return bound, arglist, err +} + +// -- Compilation of Named Queries + +// Allow digits and letters in bind params; additionally runes are +// checked against underscores, meaning that bind params can have be +// alphanumeric with underscores. Mind the difference between unicode +// digits and numbers, where '5' is a digit but '五' is not. +var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} + +// FIXME: this function isn't safe for unicode named params, as a failing test +// can testify. This is not a regression but a failure of the original code +// as well. It should be modified to range over runes in a string rather than +// bytes, even though this is less convenient and slower. Hopefully the +// addition of the prepared NamedStmt (which will only do this once) will make +// up for the slightly slower ad-hoc NamedExec/NamedQuery. + +// compile a NamedQuery into an unbound query (using the '?' bindvar) and +// a list of names. +func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { + names = make([]string, 0, 10) + rebound := make([]byte, 0, len(qs)) + + inName := false + last := len(qs) - 1 + currentVar := 1 + name := make([]byte, 0, 10) + + for i, b := range qs { + // a ':' while we're in a name is an error + if b == ':' { + // if this is the second ':' in a '::' escape sequence, append a ':' + if inName && i > 0 && qs[i-1] == ':' { + rebound = append(rebound, ':') + inName = false + continue + } else if inName { + err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) + return query, names, err + } + inName = true + name = []byte{} + // if we're in a name, and this is an allowed character, continue + } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { + // append the byte to the name if we are in a name and not on the last byte + name = append(name, b) + // if we're in a name and it's not an allowed character, the name is done + } else if inName { + inName = false + // if this is the final byte of the string and it is part of the name, then + // make sure to add it to the name + if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { + name = append(name, b) + } + // add the string representation to the names list + names = append(names, string(name)) + // add a proper bindvar for the bindType + switch bindType { + // oracle only supports named type bind vars even for positional + case NAMED: + rebound = append(rebound, ':') + rebound = append(rebound, name...) + case QUESTION, UNKNOWN: + rebound = append(rebound, '?') + case DOLLAR: + rebound = append(rebound, '$') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + } + // add this byte to string unless it was not part of the name + if i != last { + rebound = append(rebound, b) + } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { + rebound = append(rebound, b) + } + } else { + // this is a normal byte and should just go onto the rebound query + rebound = append(rebound, b) + } + } + + return string(rebound), names, err +} + +// BindNamed binds a struct or a map to a query with named parameters. +// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. +func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(bindType, query, arg, mapper()) +} + +// Named takes a query using named parameters and an argument and +// returns a new query with a list of args that can be executed by +// a database. The return value uses the `?` bindvar. +func Named(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(QUESTION, query, arg, mapper()) +} + +func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + if maparg, ok := arg.(map[string]interface{}); ok { + return bindMap(bindType, query, maparg) + } + return bindStruct(bindType, query, arg, m) +} + +// NamedQuery binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Queryx(q, args...) +} + +// NamedExec uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query excution itself. +func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Exec(q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go new file mode 100644 index 0000000..9405007 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named_context.go @@ -0,0 +1,132 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" +) + +// A union interface of contextPreparer and binder, required to be able to +// prepare named statements with context (as the bindtype must be determined). +type namedPreparerContext interface { + PreparerContext + binder +} + +func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := PreparexContext(ctx, p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// ExecContext executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.ExecContext(ctx, args...) +} + +// QueryContext executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.QueryContext(ctx, args...) +} + +// QueryRowContext executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowxContext(ctx, args...) +} + +// MustExecContext execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { + res, err := n.ExecContext(ctx, arg) + if err != nil { + panic(err) + } + return res +} + +// QueryxContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { + r, err := n.QueryContext(ctx, arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { + return n.QueryRowContext(ctx, arg) +} + +// SelectContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { + rows, err := n.QueryxContext(ctx, arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// GetContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { + r := n.QueryRowxContext(ctx, arg) + return r.scanAny(dest, false) +} + +// NamedQueryContext binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.QueryxContext(ctx, q, args...) +} + +// NamedExecContext uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query excution itself. +func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.ExecContext(ctx, q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/named_context_test.go b/vendor/github.com/jmoiron/sqlx/named_context_test.go new file mode 100644 index 0000000..87e94ac --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named_context_test.go @@ -0,0 +1,136 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" + "testing" +) + +func TestNamedContextQueries(t *testing.T) { + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + test := Test{t} + var ns *NamedStmt + var err error + + ctx := context.Background() + + // Check that invalid preparations fail + ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first:name") + if err == nil { + t.Error("Expected an error with invalid prepared statement.") + } + + ns, err = db.PrepareNamedContext(ctx, "invalid sql") + if err == nil { + t.Error("Expected an error with invalid prepared statement.") + } + + // Check closing works as anticipated + ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first_name") + test.Error(err) + err = ns.Close() + test.Error(err) + + ns, err = db.PrepareNamedContext(ctx, ` + SELECT first_name, last_name, email + FROM person WHERE first_name=:first_name AND email=:email`) + test.Error(err) + + // test Queryx w/ uses Query + p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"} + + rows, err := ns.QueryxContext(ctx, p) + test.Error(err) + for rows.Next() { + var p2 Person + rows.StructScan(&p2) + if p.FirstName != p2.FirstName { + t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName) + } + if p.LastName != p2.LastName { + t.Errorf("got %s, expected %s", p.LastName, p2.LastName) + } + if p.Email != p2.Email { + t.Errorf("got %s, expected %s", p.Email, p2.Email) + } + } + + // test Select + people := make([]Person, 0, 5) + err = ns.SelectContext(ctx, &people, p) + test.Error(err) + + if len(people) != 1 { + t.Errorf("got %d results, expected %d", len(people), 1) + } + if p.FirstName != people[0].FirstName { + t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName) + } + if p.LastName != people[0].LastName { + t.Errorf("got %s, expected %s", p.LastName, people[0].LastName) + } + if p.Email != people[0].Email { + t.Errorf("got %s, expected %s", p.Email, people[0].Email) + } + + // test Exec + ns, err = db.PrepareNamedContext(ctx, ` + INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`) + test.Error(err) + + js := Person{ + FirstName: "Julien", + LastName: "Savea", + Email: "jsavea@ab.co.nz", + } + _, err = ns.ExecContext(ctx, js) + test.Error(err) + + // Make sure we can pull him out again + p2 := Person{} + db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email) + if p2.Email != js.Email { + t.Errorf("expected %s, got %s", js.Email, p2.Email) + } + + // test Txn NamedStmts + tx := db.MustBeginTx(ctx, nil) + txns := tx.NamedStmtContext(ctx, ns) + + // We're going to add Steven in this txn + sl := Person{ + FirstName: "Steven", + LastName: "Luatua", + Email: "sluatua@ab.co.nz", + } + + _, err = txns.ExecContext(ctx, sl) + test.Error(err) + // then rollback... + tx.Rollback() + // looking for Steven after a rollback should fail + err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) + if err != sql.ErrNoRows { + t.Errorf("expected no rows error, got %v", err) + } + + // now do the same, but commit + tx = db.MustBeginTx(ctx, nil) + txns = tx.NamedStmtContext(ctx, ns) + _, err = txns.ExecContext(ctx, sl) + test.Error(err) + tx.Commit() + + // looking for Steven after a Commit should succeed + err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) + test.Error(err) + if p2.Email != sl.Email { + t.Errorf("expected %s, got %s", sl.Email, p2.Email) + } + + }) +} diff --git a/vendor/github.com/jmoiron/sqlx/named_test.go b/vendor/github.com/jmoiron/sqlx/named_test.go new file mode 100644 index 0000000..d3459a8 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named_test.go @@ -0,0 +1,227 @@ +package sqlx + +import ( + "database/sql" + "testing" +) + +func TestCompileQuery(t *testing.T) { + table := []struct { + Q, R, D, N string + V []string + }{ + // basic test for named parameters, invalid char ',' terminating + { + Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`, + R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`, + D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`, + N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`, + V: []string{"name", "age", "first", "last"}, + }, + // This query tests a named parameter ending the string as well as numbers + { + Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`, + R: `SELECT * FROM a WHERE first_name=? AND last_name=?`, + D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`, + N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`, + V: []string{"name1", "name2"}, + }, + { + Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`, + R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`, + D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`, + N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`, + V: []string{"name1", "name2"}, + }, + { + Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`, + R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`, + D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`, + N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`, + V: []string{"first_name", "last_name"}, + }, + /* This unicode awareness test sadly fails, because of our byte-wise worldview. + * We could certainly iterate by Rune instead, though it's a great deal slower, + * it's probably the RightWay(tm) + { + Q: `INSERT INTO foo (a,b,c,d) VALUES (:ã‚, :b, :キコ, :åå‰)`, + R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`, + D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`, + N: []string{"name", "age", "first", "last"}, + }, + */ + } + + for _, test := range table { + qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION) + if err != nil { + t.Error(err) + } + if qr != test.R { + t.Errorf("expected %s, got %s", test.R, qr) + } + if len(names) != len(test.V) { + t.Errorf("expected %#v, got %#v", test.V, names) + } else { + for i, name := range names { + if name != test.V[i] { + t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name) + } + } + } + qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR) + if qd != test.D { + t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd) + } + + qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED) + if qq != test.N { + t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq)) + } + } +} + +type Test struct { + t *testing.T +} + +func (t Test) Error(err error, msg ...interface{}) { + if err != nil { + if len(msg) == 0 { + t.t.Error(err) + } else { + t.t.Error(msg...) + } + } +} + +func (t Test) Errorf(err error, format string, args ...interface{}) { + if err != nil { + t.t.Errorf(format, args...) + } +} + +func TestNamedQueries(t *testing.T) { + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + test := Test{t} + var ns *NamedStmt + var err error + + // Check that invalid preparations fail + ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name") + if err == nil { + t.Error("Expected an error with invalid prepared statement.") + } + + ns, err = db.PrepareNamed("invalid sql") + if err == nil { + t.Error("Expected an error with invalid prepared statement.") + } + + // Check closing works as anticipated + ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name") + test.Error(err) + err = ns.Close() + test.Error(err) + + ns, err = db.PrepareNamed(` + SELECT first_name, last_name, email + FROM person WHERE first_name=:first_name AND email=:email`) + test.Error(err) + + // test Queryx w/ uses Query + p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"} + + rows, err := ns.Queryx(p) + test.Error(err) + for rows.Next() { + var p2 Person + rows.StructScan(&p2) + if p.FirstName != p2.FirstName { + t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName) + } + if p.LastName != p2.LastName { + t.Errorf("got %s, expected %s", p.LastName, p2.LastName) + } + if p.Email != p2.Email { + t.Errorf("got %s, expected %s", p.Email, p2.Email) + } + } + + // test Select + people := make([]Person, 0, 5) + err = ns.Select(&people, p) + test.Error(err) + + if len(people) != 1 { + t.Errorf("got %d results, expected %d", len(people), 1) + } + if p.FirstName != people[0].FirstName { + t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName) + } + if p.LastName != people[0].LastName { + t.Errorf("got %s, expected %s", p.LastName, people[0].LastName) + } + if p.Email != people[0].Email { + t.Errorf("got %s, expected %s", p.Email, people[0].Email) + } + + // test Exec + ns, err = db.PrepareNamed(` + INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`) + test.Error(err) + + js := Person{ + FirstName: "Julien", + LastName: "Savea", + Email: "jsavea@ab.co.nz", + } + _, err = ns.Exec(js) + test.Error(err) + + // Make sure we can pull him out again + p2 := Person{} + db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email) + if p2.Email != js.Email { + t.Errorf("expected %s, got %s", js.Email, p2.Email) + } + + // test Txn NamedStmts + tx := db.MustBegin() + txns := tx.NamedStmt(ns) + + // We're going to add Steven in this txn + sl := Person{ + FirstName: "Steven", + LastName: "Luatua", + Email: "sluatua@ab.co.nz", + } + + _, err = txns.Exec(sl) + test.Error(err) + // then rollback... + tx.Rollback() + // looking for Steven after a rollback should fail + err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) + if err != sql.ErrNoRows { + t.Errorf("expected no rows error, got %v", err) + } + + // now do the same, but commit + tx = db.MustBegin() + txns = tx.NamedStmt(ns) + _, err = txns.Exec(sl) + test.Error(err) + tx.Commit() + + // looking for Steven after a Commit should succeed + err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) + test.Error(err) + if p2.Email != sl.Email { + t.Errorf("expected %s, got %s", sl.Email, p2.Email) + } + + }) +} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md new file mode 100644 index 0000000..f01d3d1 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md @@ -0,0 +1,17 @@ +# reflectx + +The sqlx package has special reflect needs. In particular, it needs to: + +* be able to map a name to a field +* understand embedded structs +* understand mapping names to fields by a particular tag +* user specified name -> field mapping functions + +These behaviors mimic the behaviors by the standard library marshallers and also the +behavior of standard Go accessors. + +The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is +addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct +tags in the ways that are vital to most marshallers, and they are slow. + +This reflectx package extends reflect to achieve these goals. diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go new file mode 100644 index 0000000..73c21eb --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go @@ -0,0 +1,441 @@ +// Package reflectx implements extensions to the standard reflect lib suitable +// for implementing marshalling and unmarshalling packages. The main Mapper type +// allows for Go-compatible named attribute access, including accessing embedded +// struct attributes and the ability to use functions and struct tags to +// customize field names. +// +package reflectx + +import ( + "reflect" + "runtime" + "strings" + "sync" +) + +// A FieldInfo is metadata for a struct field. +type FieldInfo struct { + Index []int + Path string + Field reflect.StructField + Zero reflect.Value + Name string + Options map[string]string + Embedded bool + Children []*FieldInfo + Parent *FieldInfo +} + +// A StructMap is an index of field metadata for a struct. +type StructMap struct { + Tree *FieldInfo + Index []*FieldInfo + Paths map[string]*FieldInfo + Names map[string]*FieldInfo +} + +// GetByPath returns a *FieldInfo for a given string path. +func (f StructMap) GetByPath(path string) *FieldInfo { + return f.Paths[path] +} + +// GetByTraversal returns a *FieldInfo for a given integer path. It is +// analogous to reflect.FieldByIndex, but using the cached traversal +// rather than re-executing the reflect machinery each time. +func (f StructMap) GetByTraversal(index []int) *FieldInfo { + if len(index) == 0 { + return nil + } + + tree := f.Tree + for _, i := range index { + if i >= len(tree.Children) || tree.Children[i] == nil { + return nil + } + tree = tree.Children[i] + } + return tree +} + +// Mapper is a general purpose mapper of names to struct fields. A Mapper +// behaves like most marshallers in the standard library, obeying a field tag +// for name mapping but also providing a basic transform function. +type Mapper struct { + cache map[reflect.Type]*StructMap + tagName string + tagMapFunc func(string) string + mapFunc func(string) string + mutex sync.Mutex +} + +// NewMapper returns a new mapper using the tagName as its struct field tag. +// If tagName is the empty string, it is ignored. +func NewMapper(tagName string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + } +} + +// NewMapperTagFunc returns a new mapper which contains a mapper for field names +// AND a mapper for tag values. This is useful for tags like json which can +// have values like "name,omitempty". +func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: mapFunc, + tagMapFunc: tagMapFunc, + } +} + +// NewMapperFunc returns a new mapper which optionally obeys a field tag and +// a struct field name mapper func given by f. Tags will take precedence, but +// for any other field, the mapped name will be f(field.Name) +func NewMapperFunc(tagName string, f func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: f, + } +} + +// TypeMap returns a mapping of field strings to int slices representing +// the traversal down the struct to reach the field. +func (m *Mapper) TypeMap(t reflect.Type) *StructMap { + m.mutex.Lock() + mapping, ok := m.cache[t] + if !ok { + mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) + m.cache[t] = mapping + } + m.mutex.Unlock() + return mapping +} + +// FieldMap returns the mapper's mapping of field names to reflect values. Panics +// if v's Kind is not Struct, or v is not Indirectable to a struct kind. +func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + r := map[string]reflect.Value{} + tm := m.TypeMap(v.Type()) + for tagName, fi := range tm.Names { + r[tagName] = FieldByIndexes(v, fi.Index) + } + return r +} + +// FieldByName returns a field by its mapped name as a reflect.Value. +// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. +// Returns zero Value if the name is not found. +func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + fi, ok := tm.Names[name] + if !ok { + return v + } + return FieldByIndexes(v, fi.Index) +} + +// FieldsByName returns a slice of values corresponding to the slice of names +// for the value. Panics if v's Kind is not Struct or v is not Indirectable +// to a struct Kind. Returns zero Value for each name not found. +func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + vals := make([]reflect.Value, 0, len(names)) + for _, name := range names { + fi, ok := tm.Names[name] + if !ok { + vals = append(vals, *new(reflect.Value)) + } else { + vals = append(vals, FieldByIndexes(v, fi.Index)) + } + } + return vals +} + +// TraversalsByName returns a slice of int slices which represent the struct +// traversals for each mapped name. Panics if t is not a struct or Indirectable +// to a struct. Returns empty int slice for each name not found. +func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { + r := make([][]int, 0, len(names)) + m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { + if i == nil { + r = append(r, []int{}) + } else { + r = append(r, i) + } + + return nil + }) + return r +} + +// TraversalsByNameFunc traverses the mapped names and calls fn with the index of +// each name and the struct traversal represented by that name. Panics if t is not +// a struct or Indirectable to a struct. Returns the first error returned by fn or nil. +func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { + t = Deref(t) + mustBe(t, reflect.Struct) + tm := m.TypeMap(t) + for i, name := range names { + fi, ok := tm.Names[name] + if !ok { + if err := fn(i, nil); err != nil { + return err + } + } else { + if err := fn(i, fi.Index); err != nil { + return err + } + } + } + return nil +} + +// FieldByIndexes returns a value for the field given by the struct traversal +// for the given value. +func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + // if this is a pointer and it's nil, allocate a new value and set it + if v.Kind() == reflect.Ptr && v.IsNil() { + alloc := reflect.New(Deref(v.Type())) + v.Set(alloc) + } + if v.Kind() == reflect.Map && v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + } + return v +} + +// FieldByIndexesReadOnly returns a value for a particular struct traversal, +// but is not concerned with allocating nil pointers because the value is +// going to be used for reading and not setting. +func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + } + return v +} + +// Deref is Indirect for reflect.Types +func Deref(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +// -- helpers & utilities -- + +type kinder interface { + Kind() reflect.Kind +} + +// mustBe checks a value against a kind, panicing with a reflect.ValueError +// if the kind isn't that which is required. +func mustBe(v kinder, expected reflect.Kind) { + if k := v.Kind(); k != expected { + panic(&reflect.ValueError{Method: methodName(), Kind: k}) + } +} + +// methodName returns the caller of the function calling methodName +func methodName() string { + pc, _, _, _ := runtime.Caller(2) + f := runtime.FuncForPC(pc) + if f == nil { + return "unknown method" + } + return f.Name() +} + +type typeQueue struct { + t reflect.Type + fi *FieldInfo + pp string // Parent path +} + +// A copying append that creates a new slice each time. +func apnd(is []int, i int) []int { + x := make([]int, len(is)+1) + for p, n := range is { + x[p] = n + } + x[len(x)-1] = i + return x +} + +type mapf func(string) string + +// parseName parses the tag and the target name for the given field using +// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the +// field's name to a target name, and tagMapFunc for mapping the tag to +// a target name. +func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { + // first, set the fieldName to the field's name + fieldName = field.Name + // if a mapFunc is set, use that to override the fieldName + if mapFunc != nil { + fieldName = mapFunc(fieldName) + } + + // if there's no tag to look for, return the field name + if tagName == "" { + return "", fieldName + } + + // if this tag is not set using the normal convention in the tag, + // then return the fieldname.. this check is done because according + // to the reflect documentation: + // If the tag does not have the conventional format, + // the value returned by Get is unspecified. + // which doesn't sound great. + if !strings.Contains(string(field.Tag), tagName+":") { + return "", fieldName + } + + // at this point we're fairly sure that we have a tag, so lets pull it out + tag = field.Tag.Get(tagName) + + // if we have a mapper function, call it on the whole tag + // XXX: this is a change from the old version, which pulled out the name + // before the tagMapFunc could be run, but I think this is the right way + if tagMapFunc != nil { + tag = tagMapFunc(tag) + } + + // finally, split the options from the name + parts := strings.Split(tag, ",") + fieldName = parts[0] + + return tag, fieldName +} + +// parseOptions parses options out of a tag string, skipping the name +func parseOptions(tag string) map[string]string { + parts := strings.Split(tag, ",") + options := make(map[string]string, len(parts)) + if len(parts) > 1 { + for _, opt := range parts[1:] { + // short circuit potentially expensive split op + if strings.Contains(opt, "=") { + kv := strings.Split(opt, "=") + options[kv[0]] = kv[1] + continue + } + options[opt] = "" + } + } + return options +} + +// getMapping returns a mapping for the t type, using the tagName, mapFunc and +// tagMapFunc to determine the canonical names of fields. +func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { + m := []*FieldInfo{} + + root := &FieldInfo{} + queue := []typeQueue{} + queue = append(queue, typeQueue{Deref(t), root, ""}) + +QueueLoop: + for len(queue) != 0 { + // pop the first item off of the queue + tq := queue[0] + queue = queue[1:] + + // ignore recursive field + for p := tq.fi.Parent; p != nil; p = p.Parent { + if tq.fi.Field.Type == p.Field.Type { + continue QueueLoop + } + } + + nChildren := 0 + if tq.t.Kind() == reflect.Struct { + nChildren = tq.t.NumField() + } + tq.fi.Children = make([]*FieldInfo, nChildren) + + // iterate through all of its fields + for fieldPos := 0; fieldPos < nChildren; fieldPos++ { + + f := tq.t.Field(fieldPos) + + // parse the tag and the target name using the mapping options for this field + tag, name := parseName(f, tagName, mapFunc, tagMapFunc) + + // if the name is "-", disabled via a tag, skip it + if name == "-" { + continue + } + + fi := FieldInfo{ + Field: f, + Name: name, + Zero: reflect.New(f.Type).Elem(), + Options: parseOptions(tag), + } + + // if the path is empty this path is just the name + if tq.pp == "" { + fi.Path = fi.Name + } else { + fi.Path = tq.pp + "." + fi.Name + } + + // skip unexported fields + if len(f.PkgPath) != 0 && !f.Anonymous { + continue + } + + // bfs search of anonymous embedded structs + if f.Anonymous { + pp := tq.pp + if tag != "" { + pp = fi.Path + } + + fi.Embedded = true + fi.Index = apnd(tq.fi.Index, fieldPos) + nChildren := 0 + ft := Deref(f.Type) + if ft.Kind() == reflect.Struct { + nChildren = ft.NumField() + } + fi.Children = make([]*FieldInfo, nChildren) + queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) + } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) + queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) + } + + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Parent = tq.fi + tq.fi.Children[fieldPos] = &fi + m = append(m, &fi) + } + } + + flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} + for _, fi := range flds.Index { + flds.Paths[fi.Path] = fi + if fi.Name != "" && !fi.Embedded { + flds.Names[fi.Path] = fi + } + } + + return flds +} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go new file mode 100644 index 0000000..d3879ed --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go @@ -0,0 +1,974 @@ +package reflectx + +import ( + "reflect" + "strings" + "testing" +) + +func ival(v reflect.Value) int { + return v.Interface().(int) +} + +func TestBasic(t *testing.T) { + type Foo struct { + A int + B int + C int + } + + f := Foo{1, 2, 3} + fv := reflect.ValueOf(f) + m := NewMapperFunc("", func(s string) string { return s }) + + v := m.FieldByName(fv, "A") + if ival(v) != f.A { + t.Errorf("Expecting %d, got %d", ival(v), f.A) + } + v = m.FieldByName(fv, "B") + if ival(v) != f.B { + t.Errorf("Expecting %d, got %d", f.B, ival(v)) + } + v = m.FieldByName(fv, "C") + if ival(v) != f.C { + t.Errorf("Expecting %d, got %d", f.C, ival(v)) + } +} + +func TestBasicEmbedded(t *testing.T) { + type Foo struct { + A int + } + + type Bar struct { + Foo // `db:""` is implied for an embedded struct + B int + C int `db:"-"` + } + + type Baz struct { + A int + Bar `db:"Bar"` + } + + m := NewMapperFunc("db", func(s string) string { return s }) + + z := Baz{} + z.A = 1 + z.B = 2 + z.C = 4 + z.Bar.Foo.A = 3 + + zv := reflect.ValueOf(z) + fields := m.TypeMap(reflect.TypeOf(z)) + + if len(fields.Index) != 5 { + t.Errorf("Expecting 5 fields") + } + + // for _, fi := range fields.Index { + // log.Println(fi) + // } + + v := m.FieldByName(zv, "A") + if ival(v) != z.A { + t.Errorf("Expecting %d, got %d", z.A, ival(v)) + } + v = m.FieldByName(zv, "Bar.B") + if ival(v) != z.Bar.B { + t.Errorf("Expecting %d, got %d", z.Bar.B, ival(v)) + } + v = m.FieldByName(zv, "Bar.A") + if ival(v) != z.Bar.Foo.A { + t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v)) + } + v = m.FieldByName(zv, "Bar.C") + if _, ok := v.Interface().(int); ok { + t.Errorf("Expecting Bar.C to not exist") + } + + fi := fields.GetByPath("Bar.C") + if fi != nil { + t.Errorf("Bar.C should not exist") + } +} + +func TestEmbeddedSimple(t *testing.T) { + type UUID [16]byte + type MyID struct { + UUID + } + type Item struct { + ID MyID + } + z := Item{} + + m := NewMapper("db") + m.TypeMap(reflect.TypeOf(z)) +} + +func TestBasicEmbeddedWithTags(t *testing.T) { + type Foo struct { + A int `db:"a"` + } + + type Bar struct { + Foo // `db:""` is implied for an embedded struct + B int `db:"b"` + } + + type Baz struct { + A int `db:"a"` + Bar // `db:""` is implied for an embedded struct + } + + m := NewMapper("db") + + z := Baz{} + z.A = 1 + z.B = 2 + z.Bar.Foo.A = 3 + + zv := reflect.ValueOf(z) + fields := m.TypeMap(reflect.TypeOf(z)) + + if len(fields.Index) != 5 { + t.Errorf("Expecting 5 fields") + } + + // for _, fi := range fields.index { + // log.Println(fi) + // } + + v := m.FieldByName(zv, "a") + if ival(v) != z.Bar.Foo.A { // the dominant field + t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v)) + } + v = m.FieldByName(zv, "b") + if ival(v) != z.B { + t.Errorf("Expecting %d, got %d", z.B, ival(v)) + } +} + +func TestFlatTags(t *testing.T) { + m := NewMapper("db") + + type Asset struct { + Title string `db:"title"` + } + type Post struct { + Author string `db:"author,required"` + Asset Asset `db:""` + } + // Post columns: (author title) + + post := Post{Author: "Joe", Asset: Asset{Title: "Hello"}} + pv := reflect.ValueOf(post) + + v := m.FieldByName(pv, "author") + if v.Interface().(string) != post.Author { + t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) + } + v = m.FieldByName(pv, "title") + if v.Interface().(string) != post.Asset.Title { + t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) + } +} + +func TestNestedStruct(t *testing.T) { + m := NewMapper("db") + + type Details struct { + Active bool `db:"active"` + } + type Asset struct { + Title string `db:"title"` + Details Details `db:"details"` + } + type Post struct { + Author string `db:"author,required"` + Asset `db:"asset"` + } + // Post columns: (author asset.title asset.details.active) + + post := Post{ + Author: "Joe", + Asset: Asset{Title: "Hello", Details: Details{Active: true}}, + } + pv := reflect.ValueOf(post) + + v := m.FieldByName(pv, "author") + if v.Interface().(string) != post.Author { + t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) + } + v = m.FieldByName(pv, "title") + if _, ok := v.Interface().(string); ok { + t.Errorf("Expecting field to not exist") + } + v = m.FieldByName(pv, "asset.title") + if v.Interface().(string) != post.Asset.Title { + t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) + } + v = m.FieldByName(pv, "asset.details.active") + if v.Interface().(bool) != post.Asset.Details.Active { + t.Errorf("Expecting %v, got %v", post.Asset.Details.Active, v.Interface().(bool)) + } +} + +func TestInlineStruct(t *testing.T) { + m := NewMapperTagFunc("db", strings.ToLower, nil) + + type Employee struct { + Name string + ID int + } + type Boss Employee + type person struct { + Employee `db:"employee"` + Boss `db:"boss"` + } + // employees columns: (employee.name employee.id boss.name boss.id) + + em := person{Employee: Employee{Name: "Joe", ID: 2}, Boss: Boss{Name: "Dick", ID: 1}} + ev := reflect.ValueOf(em) + + fields := m.TypeMap(reflect.TypeOf(em)) + if len(fields.Index) != 6 { + t.Errorf("Expecting 6 fields") + } + + v := m.FieldByName(ev, "employee.name") + if v.Interface().(string) != em.Employee.Name { + t.Errorf("Expecting %s, got %s", em.Employee.Name, v.Interface().(string)) + } + v = m.FieldByName(ev, "boss.id") + if ival(v) != em.Boss.ID { + t.Errorf("Expecting %v, got %v", em.Boss.ID, ival(v)) + } +} + +func TestRecursiveStruct(t *testing.T) { + type Person struct { + Parent *Person + } + m := NewMapperFunc("db", strings.ToLower) + var p *Person + m.TypeMap(reflect.TypeOf(p)) +} + +func TestFieldsEmbedded(t *testing.T) { + m := NewMapper("db") + + type Person struct { + Name string `db:"name,size=64"` + } + type Place struct { + Name string `db:"name"` + } + type Article struct { + Title string `db:"title"` + } + type PP struct { + Person `db:"person,required"` + Place `db:",someflag"` + Article `db:",required"` + } + // PP columns: (person.name name title) + + pp := PP{} + pp.Person.Name = "Peter" + pp.Place.Name = "Toronto" + pp.Article.Title = "Best city ever" + + fields := m.TypeMap(reflect.TypeOf(pp)) + // for i, f := range fields { + // log.Println(i, f) + // } + + ppv := reflect.ValueOf(pp) + + v := m.FieldByName(ppv, "person.name") + if v.Interface().(string) != pp.Person.Name { + t.Errorf("Expecting %s, got %s", pp.Person.Name, v.Interface().(string)) + } + + v = m.FieldByName(ppv, "name") + if v.Interface().(string) != pp.Place.Name { + t.Errorf("Expecting %s, got %s", pp.Place.Name, v.Interface().(string)) + } + + v = m.FieldByName(ppv, "title") + if v.Interface().(string) != pp.Article.Title { + t.Errorf("Expecting %s, got %s", pp.Article.Title, v.Interface().(string)) + } + + fi := fields.GetByPath("person") + if _, ok := fi.Options["required"]; !ok { + t.Errorf("Expecting required option to be set") + } + if !fi.Embedded { + t.Errorf("Expecting field to be embedded") + } + if len(fi.Index) != 1 || fi.Index[0] != 0 { + t.Errorf("Expecting index to be [0]") + } + + fi = fields.GetByPath("person.name") + if fi == nil { + t.Errorf("Expecting person.name to exist") + } + if fi.Path != "person.name" { + t.Errorf("Expecting %s, got %s", "person.name", fi.Path) + } + if fi.Options["size"] != "64" { + t.Errorf("Expecting %s, got %s", "64", fi.Options["size"]) + } + + fi = fields.GetByTraversal([]int{1, 0}) + if fi == nil { + t.Errorf("Expecting traveral to exist") + } + if fi.Path != "name" { + t.Errorf("Expecting %s, got %s", "name", fi.Path) + } + + fi = fields.GetByTraversal([]int{2}) + if fi == nil { + t.Errorf("Expecting traversal to exist") + } + if _, ok := fi.Options["required"]; !ok { + t.Errorf("Expecting required option to be set") + } + + trs := m.TraversalsByName(reflect.TypeOf(pp), []string{"person.name", "name", "title"}) + if !reflect.DeepEqual(trs, [][]int{{0, 0}, {1, 0}, {2, 0}}) { + t.Errorf("Expecting traversal: %v", trs) + } +} + +func TestPtrFields(t *testing.T) { + m := NewMapperTagFunc("db", strings.ToLower, nil) + type Asset struct { + Title string + } + type Post struct { + *Asset `db:"asset"` + Author string + } + + post := &Post{Author: "Joe", Asset: &Asset{Title: "Hiyo"}} + pv := reflect.ValueOf(post) + + fields := m.TypeMap(reflect.TypeOf(post)) + if len(fields.Index) != 3 { + t.Errorf("Expecting 3 fields") + } + + v := m.FieldByName(pv, "asset.title") + if v.Interface().(string) != post.Asset.Title { + t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) + } + v = m.FieldByName(pv, "author") + if v.Interface().(string) != post.Author { + t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) + } +} + +func TestNamedPtrFields(t *testing.T) { + m := NewMapperTagFunc("db", strings.ToLower, nil) + + type User struct { + Name string + } + + type Asset struct { + Title string + + Owner *User `db:"owner"` + } + type Post struct { + Author string + + Asset1 *Asset `db:"asset1"` + Asset2 *Asset `db:"asset2"` + } + + post := &Post{Author: "Joe", Asset1: &Asset{Title: "Hiyo", Owner: &User{"Username"}}} // Let Asset2 be nil + pv := reflect.ValueOf(post) + + fields := m.TypeMap(reflect.TypeOf(post)) + if len(fields.Index) != 9 { + t.Errorf("Expecting 9 fields") + } + + v := m.FieldByName(pv, "asset1.title") + if v.Interface().(string) != post.Asset1.Title { + t.Errorf("Expecting %s, got %s", post.Asset1.Title, v.Interface().(string)) + } + v = m.FieldByName(pv, "asset1.owner.name") + if v.Interface().(string) != post.Asset1.Owner.Name { + t.Errorf("Expecting %s, got %s", post.Asset1.Owner.Name, v.Interface().(string)) + } + v = m.FieldByName(pv, "asset2.title") + if v.Interface().(string) != post.Asset2.Title { + t.Errorf("Expecting %s, got %s", post.Asset2.Title, v.Interface().(string)) + } + v = m.FieldByName(pv, "asset2.owner.name") + if v.Interface().(string) != post.Asset2.Owner.Name { + t.Errorf("Expecting %s, got %s", post.Asset2.Owner.Name, v.Interface().(string)) + } + v = m.FieldByName(pv, "author") + if v.Interface().(string) != post.Author { + t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) + } +} + +func TestFieldMap(t *testing.T) { + type Foo struct { + A int + B int + C int + } + + f := Foo{1, 2, 3} + m := NewMapperFunc("db", strings.ToLower) + + fm := m.FieldMap(reflect.ValueOf(f)) + + if len(fm) != 3 { + t.Errorf("Expecting %d keys, got %d", 3, len(fm)) + } + if fm["a"].Interface().(int) != 1 { + t.Errorf("Expecting %d, got %d", 1, ival(fm["a"])) + } + if fm["b"].Interface().(int) != 2 { + t.Errorf("Expecting %d, got %d", 2, ival(fm["b"])) + } + if fm["c"].Interface().(int) != 3 { + t.Errorf("Expecting %d, got %d", 3, ival(fm["c"])) + } +} + +func TestTagNameMapping(t *testing.T) { + type Strategy struct { + StrategyID string `protobuf:"bytes,1,opt,name=strategy_id" json:"strategy_id,omitempty"` + StrategyName string + } + + m := NewMapperTagFunc("json", strings.ToUpper, func(value string) string { + if strings.Contains(value, ",") { + return strings.Split(value, ",")[0] + } + return value + }) + strategy := Strategy{"1", "Alpah"} + mapping := m.TypeMap(reflect.TypeOf(strategy)) + + for _, key := range []string{"strategy_id", "STRATEGYNAME"} { + if fi := mapping.GetByPath(key); fi == nil { + t.Errorf("Expecting to find key %s in mapping but did not.", key) + } + } +} + +func TestMapping(t *testing.T) { + type Person struct { + ID int + Name string + WearsGlasses bool `db:"wears_glasses"` + } + + m := NewMapperFunc("db", strings.ToLower) + p := Person{1, "Jason", true} + mapping := m.TypeMap(reflect.TypeOf(p)) + + for _, key := range []string{"id", "name", "wears_glasses"} { + if fi := mapping.GetByPath(key); fi == nil { + t.Errorf("Expecting to find key %s in mapping but did not.", key) + } + } + + type SportsPerson struct { + Weight int + Age int + Person + } + s := SportsPerson{Weight: 100, Age: 30, Person: p} + mapping = m.TypeMap(reflect.TypeOf(s)) + for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} { + if fi := mapping.GetByPath(key); fi == nil { + t.Errorf("Expecting to find key %s in mapping but did not.", key) + } + } + + type RugbyPlayer struct { + Position int + IsIntense bool `db:"is_intense"` + IsAllBlack bool `db:"-"` + SportsPerson + } + r := RugbyPlayer{12, true, false, s} + mapping = m.TypeMap(reflect.TypeOf(r)) + for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} { + if fi := mapping.GetByPath(key); fi == nil { + t.Errorf("Expecting to find key %s in mapping but did not.", key) + } + } + + if fi := mapping.GetByPath("isallblack"); fi != nil { + t.Errorf("Expecting to ignore `IsAllBlack` field") + } +} + +func TestGetByTraversal(t *testing.T) { + type C struct { + C0 int + C1 int + } + type B struct { + B0 string + B1 *C + } + type A struct { + A0 int + A1 B + } + + testCases := []struct { + Index []int + ExpectedName string + ExpectNil bool + }{ + { + Index: []int{0}, + ExpectedName: "A0", + }, + { + Index: []int{1, 0}, + ExpectedName: "B0", + }, + { + Index: []int{1, 1, 1}, + ExpectedName: "C1", + }, + { + Index: []int{3, 4, 5}, + ExpectNil: true, + }, + { + Index: []int{}, + ExpectNil: true, + }, + { + Index: nil, + ExpectNil: true, + }, + } + + m := NewMapperFunc("db", func(n string) string { return n }) + tm := m.TypeMap(reflect.TypeOf(A{})) + + for i, tc := range testCases { + fi := tm.GetByTraversal(tc.Index) + if tc.ExpectNil { + if fi != nil { + t.Errorf("%d: expected nil, got %v", i, fi) + } + continue + } + + if fi == nil { + t.Errorf("%d: expected %s, got nil", i, tc.ExpectedName) + continue + } + + if fi.Name != tc.ExpectedName { + t.Errorf("%d: expected %s, got %s", i, tc.ExpectedName, fi.Name) + } + } +} + +// TestMapperMethodsByName tests Mapper methods FieldByName and TraversalsByName +func TestMapperMethodsByName(t *testing.T) { + type C struct { + C0 string + C1 int + } + type B struct { + B0 *C `db:"B0"` + B1 C `db:"B1"` + B2 string `db:"B2"` + } + type A struct { + A0 *B `db:"A0"` + B `db:"A1"` + A2 int + a3 int + } + + val := &A{ + A0: &B{ + B0: &C{C0: "0", C1: 1}, + B1: C{C0: "2", C1: 3}, + B2: "4", + }, + B: B{ + B0: nil, + B1: C{C0: "5", C1: 6}, + B2: "7", + }, + A2: 8, + } + + testCases := []struct { + Name string + ExpectInvalid bool + ExpectedValue interface{} + ExpectedIndexes []int + }{ + { + Name: "A0.B0.C0", + ExpectedValue: "0", + ExpectedIndexes: []int{0, 0, 0}, + }, + { + Name: "A0.B0.C1", + ExpectedValue: 1, + ExpectedIndexes: []int{0, 0, 1}, + }, + { + Name: "A0.B1.C0", + ExpectedValue: "2", + ExpectedIndexes: []int{0, 1, 0}, + }, + { + Name: "A0.B1.C1", + ExpectedValue: 3, + ExpectedIndexes: []int{0, 1, 1}, + }, + { + Name: "A0.B2", + ExpectedValue: "4", + ExpectedIndexes: []int{0, 2}, + }, + { + Name: "A1.B0.C0", + ExpectedValue: "", + ExpectedIndexes: []int{1, 0, 0}, + }, + { + Name: "A1.B0.C1", + ExpectedValue: 0, + ExpectedIndexes: []int{1, 0, 1}, + }, + { + Name: "A1.B1.C0", + ExpectedValue: "5", + ExpectedIndexes: []int{1, 1, 0}, + }, + { + Name: "A1.B1.C1", + ExpectedValue: 6, + ExpectedIndexes: []int{1, 1, 1}, + }, + { + Name: "A1.B2", + ExpectedValue: "7", + ExpectedIndexes: []int{1, 2}, + }, + { + Name: "A2", + ExpectedValue: 8, + ExpectedIndexes: []int{2}, + }, + { + Name: "XYZ", + ExpectInvalid: true, + ExpectedIndexes: []int{}, + }, + { + Name: "a3", + ExpectInvalid: true, + ExpectedIndexes: []int{}, + }, + } + + // build the names array from the test cases + names := make([]string, len(testCases)) + for i, tc := range testCases { + names[i] = tc.Name + } + m := NewMapperFunc("db", func(n string) string { return n }) + v := reflect.ValueOf(val) + values := m.FieldsByName(v, names) + if len(values) != len(testCases) { + t.Errorf("expected %d values, got %d", len(testCases), len(values)) + t.FailNow() + } + indexes := m.TraversalsByName(v.Type(), names) + if len(indexes) != len(testCases) { + t.Errorf("expected %d traversals, got %d", len(testCases), len(indexes)) + t.FailNow() + } + for i, val := range values { + tc := testCases[i] + traversal := indexes[i] + if !reflect.DeepEqual(tc.ExpectedIndexes, traversal) { + t.Errorf("expected %v, got %v", tc.ExpectedIndexes, traversal) + t.FailNow() + } + val = reflect.Indirect(val) + if tc.ExpectInvalid { + if val.IsValid() { + t.Errorf("%d: expected zero value, got %v", i, val) + } + continue + } + if !val.IsValid() { + t.Errorf("%d: expected valid value, got %v", i, val) + continue + } + actualValue := reflect.Indirect(val).Interface() + if !reflect.DeepEqual(tc.ExpectedValue, actualValue) { + t.Errorf("%d: expected %v, got %v", i, tc.ExpectedValue, actualValue) + } + } +} + +func TestFieldByIndexes(t *testing.T) { + type C struct { + C0 bool + C1 string + C2 int + C3 map[string]int + } + type B struct { + B1 C + B2 *C + } + type A struct { + A1 B + A2 *B + } + testCases := []struct { + value interface{} + indexes []int + expectedValue interface{} + readOnly bool + }{ + { + value: A{ + A1: B{B1: C{C0: true}}, + }, + indexes: []int{0, 0, 0}, + expectedValue: true, + readOnly: true, + }, + { + value: A{ + A2: &B{B2: &C{C1: "answer"}}, + }, + indexes: []int{1, 1, 1}, + expectedValue: "answer", + readOnly: true, + }, + { + value: &A{}, + indexes: []int{1, 1, 3}, + expectedValue: map[string]int{}, + }, + } + + for i, tc := range testCases { + checkResults := func(v reflect.Value) { + if tc.expectedValue == nil { + if !v.IsNil() { + t.Errorf("%d: expected nil, actual %v", i, v.Interface()) + } + } else { + if !reflect.DeepEqual(tc.expectedValue, v.Interface()) { + t.Errorf("%d: expected %v, actual %v", i, tc.expectedValue, v.Interface()) + } + } + } + + checkResults(FieldByIndexes(reflect.ValueOf(tc.value), tc.indexes)) + if tc.readOnly { + checkResults(FieldByIndexesReadOnly(reflect.ValueOf(tc.value), tc.indexes)) + } + } +} + +func TestMustBe(t *testing.T) { + typ := reflect.TypeOf(E1{}) + mustBe(typ, reflect.Struct) + + defer func() { + if r := recover(); r != nil { + valueErr, ok := r.(*reflect.ValueError) + if !ok { + t.Errorf("unexpected Method: %s", valueErr.Method) + t.Error("expected panic with *reflect.ValueError") + return + } + if valueErr.Method != "github.com/jmoiron/sqlx/reflectx.TestMustBe" { + } + if valueErr.Kind != reflect.String { + t.Errorf("unexpected Kind: %s", valueErr.Kind) + } + } else { + t.Error("expected panic") + } + }() + + typ = reflect.TypeOf("string") + mustBe(typ, reflect.Struct) + t.Error("got here, didn't expect to") +} + +type E1 struct { + A int +} +type E2 struct { + E1 + B int +} +type E3 struct { + E2 + C int +} +type E4 struct { + E3 + D int +} + +func BenchmarkFieldNameL1(b *testing.B) { + e4 := E4{D: 1} + for i := 0; i < b.N; i++ { + v := reflect.ValueOf(e4) + f := v.FieldByName("D") + if f.Interface().(int) != 1 { + b.Fatal("Wrong value.") + } + } +} + +func BenchmarkFieldNameL4(b *testing.B) { + e4 := E4{} + e4.A = 1 + for i := 0; i < b.N; i++ { + v := reflect.ValueOf(e4) + f := v.FieldByName("A") + if f.Interface().(int) != 1 { + b.Fatal("Wrong value.") + } + } +} + +func BenchmarkFieldPosL1(b *testing.B) { + e4 := E4{D: 1} + for i := 0; i < b.N; i++ { + v := reflect.ValueOf(e4) + f := v.Field(1) + if f.Interface().(int) != 1 { + b.Fatal("Wrong value.") + } + } +} + +func BenchmarkFieldPosL4(b *testing.B) { + e4 := E4{} + e4.A = 1 + for i := 0; i < b.N; i++ { + v := reflect.ValueOf(e4) + f := v.Field(0) + f = f.Field(0) + f = f.Field(0) + f = f.Field(0) + if f.Interface().(int) != 1 { + b.Fatal("Wrong value.") + } + } +} + +func BenchmarkFieldByIndexL4(b *testing.B) { + e4 := E4{} + e4.A = 1 + idx := []int{0, 0, 0, 0} + for i := 0; i < b.N; i++ { + v := reflect.ValueOf(e4) + f := FieldByIndexes(v, idx) + if f.Interface().(int) != 1 { + b.Fatal("Wrong value.") + } + } +} + +func BenchmarkTraversalsByName(b *testing.B) { + type A struct { + Value int + } + + type B struct { + A A + } + + type C struct { + B B + } + + type D struct { + C C + } + + m := NewMapper("") + t := reflect.TypeOf(D{}) + names := []string{"C", "B", "A", "Value"} + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if l := len(m.TraversalsByName(t, names)); l != len(names) { + b.Errorf("expected %d values, got %d", len(names), l) + } + } +} + +func BenchmarkTraversalsByNameFunc(b *testing.B) { + type A struct { + Z int + } + + type B struct { + A A + } + + type C struct { + B B + } + + type D struct { + C C + } + + m := NewMapper("") + t := reflect.TypeOf(D{}) + names := []string{"C", "B", "A", "Z", "Y"} + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + var l int + + if err := m.TraversalsByNameFunc(t, names, func(_ int, _ []int) error { + l++ + return nil + }); err != nil { + b.Errorf("unexpected error %s", err) + } + + if l != len(names) { + b.Errorf("expected %d values, got %d", len(names), l) + } + } +} \ No newline at end of file diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go new file mode 100644 index 0000000..4385c3f --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx.go @@ -0,0 +1,1047 @@ +package sqlx + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Although the NameMapper is convenient, in practice it should not +// be relied on except for application code. If you are writing a library +// that uses sqlx, you should be aware that the name mappings you expect +// can be overridden by your user's application. + +// NameMapper is used to map column names to struct field names. By default, +// it uses strings.ToLower to lowercase struct field names. It can be set +// to whatever you want, but it is encouraged to be set before sqlx is used +// as name-to-field mappings are cached after first use on a type. +var NameMapper = strings.ToLower +var origMapper = reflect.ValueOf(NameMapper) + +// Rather than creating on init, this is created when necessary so that +// importers have time to customize the NameMapper. +var mpr *reflectx.Mapper + +// mprMu protects mpr. +var mprMu sync.Mutex + +// mapper returns a valid mapper using the configured NameMapper func. +func mapper() *reflectx.Mapper { + mprMu.Lock() + defer mprMu.Unlock() + + if mpr == nil { + mpr = reflectx.NewMapperFunc("db", NameMapper) + } else if origMapper != reflect.ValueOf(NameMapper) { + // if NameMapper has changed, create a new mapper + mpr = reflectx.NewMapperFunc("db", NameMapper) + origMapper = reflect.ValueOf(NameMapper) + } + return mpr +} + +// isScannable takes the reflect.Type and the actual dest value and returns +// whether or not it's Scannable. Something is scannable if: +// * it is not a struct +// * it implements sql.Scanner +// * it has no exported fields +func isScannable(t reflect.Type) bool { + if reflect.PtrTo(t).Implements(_scannerInterface) { + return true + } + if t.Kind() != reflect.Struct { + return true + } + + // it's not important that we use the right mapper for this particular object, + // we're only concerned on how many exported fields this struct has + m := mapper() + if len(m.TypeMap(t).Index) == 0 { + return true + } + return false +} + +// ColScanner is an interface used by MapScan and SliceScan +type ColScanner interface { + Columns() ([]string, error) + Scan(dest ...interface{}) error + Err() error +} + +// Queryer is an interface used by Get and Select +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) + Queryx(query string, args ...interface{}) (*Rows, error) + QueryRowx(query string, args ...interface{}) *Row +} + +// Execer is an interface used by MustExec and LoadFile +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Binder is an interface for something which can bind queries (Tx, DB) +type binder interface { + DriverName() string + Rebind(string) string + BindNamed(string, interface{}) (string, []interface{}, error) +} + +// Ext is a union interface which can bind, query, and exec, used by +// NamedQuery and NamedExec. +type Ext interface { + binder + Queryer + Execer +} + +// Preparer is an interface used by Preparex. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// determine if any of our extensions are unsafe +func isUnsafe(i interface{}) bool { + switch v := i.(type) { + case Row: + return v.unsafe + case *Row: + return v.unsafe + case Rows: + return v.unsafe + case *Rows: + return v.unsafe + case NamedStmt: + return v.Stmt.unsafe + case *NamedStmt: + return v.Stmt.unsafe + case Stmt: + return v.unsafe + case *Stmt: + return v.unsafe + case qStmt: + return v.unsafe + case *qStmt: + return v.unsafe + case DB: + return v.unsafe + case *DB: + return v.unsafe + case Tx: + return v.unsafe + case *Tx: + return v.unsafe + case sql.Rows, *sql.Rows: + return false + default: + return false + } +} + +func mapperFor(i interface{}) *reflectx.Mapper { + switch i.(type) { + case DB: + return i.(DB).Mapper + case *DB: + return i.(*DB).Mapper + case Tx: + return i.(Tx).Mapper + case *Tx: + return i.(*Tx).Mapper + default: + return mapper() + } +} + +var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() +var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// Row is a reimplementation of sql.Row in order to gain access to the underlying +// sql.Rows.Columns() data, necessary for StructScan. +type Row struct { + err error + unsafe bool + rows *sql.Rows + Mapper *reflectx.Mapper +} + +// Scan is a fixed implementation of sql.Row.Scan, which does not discard the +// underlying error from the internal rows object if it exists. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + + // TODO(bradfitz): for now we need to defensively clone all + // []byte that the driver returned (not permitting + // *RawBytes in Rows.Scan), since we're about to close + // the Rows in our defer, when we return from this function. + // the contract with the driver.Next(...) interface is that it + // can return slices into read-only temporary memory that's + // only valid until the next Scan/Close. But the TODO is that + // for a lot of drivers, this copy will be unnecessary. We + // should provide an optional interface for drivers to + // implement to say, "don't worry, the []bytes that I return + // from Next will not be modified again." (for instance, if + // they were obtained from the network anyway) But for now we + // don't care. + defer r.rows.Close() + for _, dp := range dest { + if _, ok := dp.(*sql.RawBytes); ok { + return errors.New("sql: RawBytes isn't allowed on Row.Scan") + } + } + + if !r.rows.Next() { + if err := r.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := r.rows.Scan(dest...) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + if err := r.rows.Close(); err != nil { + return err + } + return nil +} + +// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually +// returned by Row.Scan() +func (r *Row) Columns() ([]string, error) { + if r.err != nil { + return []string{}, r.err + } + return r.rows.Columns() +} + +// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error +func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { + if r.err != nil { + return []*sql.ColumnType{}, r.err + } + return r.rows.ColumnTypes() +} + +// Err returns the error encountered while scanning. +func (r *Row) Err() error { + return r.err +} + +// DB is a wrapper around sql.DB which keeps track of the driverName upon Open, +// used mostly to automatically bind named queries using the right bindvars. +type DB struct { + *sql.DB + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The +// driverName of the original database is required for named query support. +func NewDb(db *sql.DB, driverName string) *DB { + return &DB{DB: db, driverName: driverName, Mapper: mapper()} +} + +// DriverName returns the driverName passed to the Open function for this DB. +func (db *DB) DriverName() string { + return db.driverName +} + +// Open is the same as sql.Open, but returns an *sqlx.DB instead. +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err +} + +// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. +func MustOpen(driverName, dataSourceName string) *DB { + db, err := Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// MapperFunc sets a new mapper for this db using the default sqlx struct tag +// and the provided mapper function. +func (db *DB) MapperFunc(mf func(string) string) { + db.Mapper = reflectx.NewMapperFunc("db", mf) +} + +// Rebind transforms a query from QUESTION to the DB driver's bindvar type. +func (db *DB) Rebind(query string) string { + return Rebind(BindType(db.driverName), query) +} + +// Unsafe returns a version of DB which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its +// safety behavior. +func (db *DB) Unsafe() *DB { + return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} +} + +// BindNamed binds a query using the DB driver's bindvar type. +func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) +} + +// NamedQuery using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(db, query, arg) +} + +// NamedExec using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(db, query, arg) +} + +// Select using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { + return Select(db, dest, query, args...) +} + +// Get using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { + return Get(db, dest, query, args...) +} + +// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +func (db *DB) MustBegin() *Tx { + tx, err := db.Beginx() + if err != nil { + panic(err) + } + return tx +} + +// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. +func (db *DB) Beginx() (*Tx, error) { + tx, err := db.DB.Begin() + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Queryx queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowx queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowx(query string, args ...interface{}) *Row { + rows, err := db.DB.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustExec (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(db, query, args...) +} + +// Preparex returns an sqlx.Stmt instead of a sql.Stmt +func (db *DB) Preparex(query string) (*Stmt, error) { + return Preparex(db, query) +} + +// PrepareNamed returns an sqlx.NamedStmt +func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(db, query) +} + +// Tx is an sqlx wrapper around sql.Tx with extra functionality +type Tx struct { + *sql.Tx + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// DriverName returns the driverName used by the DB which began this transaction. +func (tx *Tx) DriverName() string { + return tx.driverName +} + +// Rebind a query within a transaction's bindvar type. +func (tx *Tx) Rebind(query string) string { + return Rebind(BindType(tx.driverName), query) +} + +// Unsafe returns a version of Tx which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (tx *Tx) Unsafe() *Tx { + return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} +} + +// BindNamed binds a query within a transaction's bindvar type. +func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) +} + +// NamedQuery within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(tx, query, arg) +} + +// NamedExec a named query within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(tx, query, arg) +} + +// Select within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { + return Select(tx, dest, query, args...) +} + +// Queryx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// QueryRowx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { + rows, err := tx.Tx.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// Get within a transaction. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { + return Get(tx, dest, query, args...) +} + +// MustExec runs MustExec within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(tx, query, args...) +} + +// Preparex a statement within a transaction. +func (tx *Tx) Preparex(query string) (*Stmt, error) { + return Preparex(tx, query) +} + +// Stmtx returns a version of the prepared statement which runs within a transaction. Provided +// stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) Stmtx(stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case sql.Stmt: + s = &v + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} +} + +// NamedStmt returns a version of the prepared statement which runs within a transaction. +func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.Stmtx(stmt.Stmt), + } +} + +// PrepareNamed returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(tx, query) +} + +// Stmt is an sqlx wrapper around sql.Stmt with extra functionality +type Stmt struct { + *sql.Stmt + unsafe bool + Mapper *reflectx.Mapper +} + +// Unsafe returns a version of Stmt which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (s *Stmt) Unsafe() *Stmt { + return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} +} + +// Select using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Select(dest interface{}, args ...interface{}) error { + return Select(&qStmt{s}, dest, "", args...) +} + +// Get using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) Get(dest interface{}, args ...interface{}) error { + return Get(&qStmt{s}, dest, "", args...) +} + +// MustExec (panic) using this statement. Note that the query portion of the error +// output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExec(args ...interface{}) sql.Result { + return MustExec(&qStmt{s}, "", args...) +} + +// QueryRowx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowx(args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowx("", args...) +} + +// Queryx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.Queryx("", args...) +} + +// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by +// implementing those interfaces and ignoring the `query` argument. +type qStmt struct{ *Stmt } + +func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.Query(args...) +} + +func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.Query(args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { + rows, err := q.Stmt.Query(args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.Exec(args...) +} + +// Rows is a wrapper around sql.Rows which caches costly reflect operations +// during a looped StructScan +type Rows struct { + *sql.Rows + unsafe bool + Mapper *reflectx.Mapper + // these fields cache memory use for a rows during iteration w/ structScan + started bool + fields [][]int + values []interface{} +} + +// SliceScan using this Rows. +func (r *Rows) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Rows) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. +// Use this and iterate over Rows manually when the memory load of Select() might be +// prohibitive. *Rows.StructScan caches the reflect work of matching up column +// positions to fields to avoid that overhead per scan, which means it is not safe +// to run StructScan on the same Rows instance with different struct types. +func (r *Rows) StructScan(dest interface{}) error { + v := reflect.ValueOf(dest) + + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + + v = v.Elem() + + if !r.started { + columns, err := r.Columns() + if err != nil { + return err + } + m := r.Mapper + + r.fields = m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(r.fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + r.values = make([]interface{}, len(columns)) + r.started = true + } + + err := fieldsByTraversal(v, r.fields, r.values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + err = r.Scan(r.values...) + if err != nil { + return err + } + return r.Err() +} + +// Connect to a database and verify with a ping. +func Connect(driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + err = db.Ping() + if err != nil { + db.Close() + return nil, err + } + return db, nil +} + +// MustConnect connects to a database and panics on error. +func MustConnect(driverName, dataSourceName string) *DB { + db, err := Connect(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// Preparex prepares a statement. +func Preparex(p Preparer, query string) (*Stmt, error) { + s, err := p.Prepare(query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// Select executes a query using the provided Queryer, and StructScans each row +// into dest, which must be a slice. If the slice elements are scannable, then +// the result set must have only one column. Otherwise, StructScan is used. +// The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { + rows, err := q.Queryx(query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get does a QueryRow using the provided Queryer, and scans the resulting row +// to dest. If dest is scannable, the result must only have one column. Otherwise, +// StructScan is used. Get will return sql.ErrNoRows like row.Scan would. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowx(query, args...) + return r.scanAny(dest, false) +} + +// LoadFile exec's every statement in a file (as a single call to Exec). +// LoadFile may return a nil *sql.Result if errors are encountered locating or +// reading the file at path. LoadFile reads the entire file into memory, so it +// is not suitable for loading large data dumps, but can be useful for initializing +// schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFile(e Execer, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.Exec(string(contents)) + return &res, err +} + +// MustExec execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExec(e Execer, query string, args ...interface{}) sql.Result { + res, err := e.Exec(query, args...) + if err != nil { + panic(err) + } + return res +} + +// SliceScan using this Rows. +func (r *Row) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Row) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +func (r *Row) scanAny(dest interface{}, structOnly bool) error { + if r.err != nil { + return r.err + } + if r.rows == nil { + r.err = sql.ErrNoRows + return r.err + } + defer r.rows.Close() + + v := reflect.ValueOf(dest) + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if v.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + + base := reflectx.Deref(v.Type()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := r.Columns() + if err != nil { + return err + } + + if scannable && len(columns) > 1 { + return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) + } + + if scannable { + return r.Scan(dest) + } + + m := r.Mapper + + fields := m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values := make([]interface{}, len(columns)) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + return r.Scan(values...) +} + +// StructScan a single Row into dest. +func (r *Row) StructScan(dest interface{}) error { + return r.scanAny(dest, true) +} + +// SliceScan a row, returning a []interface{} with values similar to MapScan. +// This function is primarily intended for use where the number of columns +// is not known. Because you can pass an []interface{} directly to Scan, +// it's recommended that you do that as it will not have to allocate new +// slices per row. +func SliceScan(r ColScanner) ([]interface{}, error) { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return []interface{}{}, err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + + if err != nil { + return values, err + } + + for i := range columns { + values[i] = *(values[i].(*interface{})) + } + + return values, r.Err() +} + +// MapScan scans a single Row into the dest map[string]interface{}. +// Use this to get results for SQL that might not be under your control +// (for instance, if you're building an interface for an SQL server that +// executes SQL from input). Please do not use this as a primary interface! +// This will modify the map sent to it in place, so reuse the same map with +// care. Columns which occur more than once in the result will overwrite +// each other! +func MapScan(r ColScanner, dest map[string]interface{}) error { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + if err != nil { + return err + } + + for i, column := range columns { + dest[column] = *(values[i].(*interface{})) + } + + return r.Err() +} + +type rowsi interface { + Close() error + Columns() ([]string, error) + Err() error + Next() bool + Scan(...interface{}) error +} + +// structOnlyError returns an error appropriate for type when a non-scannable +// struct is expected but something else is given +func structOnlyError(t reflect.Type) error { + isStruct := t.Kind() == reflect.Struct + isScanner := reflect.PtrTo(t).Implements(_scannerInterface) + if !isStruct { + return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) + } + if isScanner { + return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) + } + return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) +} + +// scanAll scans all rows into a destination, which must be a slice of any +// type. If the destination slice type is a Struct, then StructScan will be +// used on each row. If the destination is some other kind of base type, then +// each row must only have one column which can scan into that type. This +// allows you to do something like: +// +// rows, _ := db.Query("select id from people;") +// var ids []int +// scanAll(rows, &ids, false) +// +// and ids will be a list of the id results. I realize that this is a desirable +// interface to expose to users, but for now it will only be exposed via changes +// to `Get` and `Select`. The reason that this has been implemented like this is +// this is the only way to not duplicate reflect work in the new API while +// maintaining backwards compatibility. +func scanAll(rows rowsi, dest interface{}, structOnly bool) error { + var v, vp reflect.Value + + value := reflect.ValueOf(dest) + + // json.Unmarshal returns errors for these + if value.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if value.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + direct := reflect.Indirect(value) + + slice, err := baseType(value.Type(), reflect.Slice) + if err != nil { + return err + } + + isPtr := slice.Elem().Kind() == reflect.Ptr + base := reflectx.Deref(slice.Elem()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := rows.Columns() + if err != nil { + return err + } + + // if it's a base type make sure it only has 1 column; if not return an error + if scannable && len(columns) > 1 { + return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) + } + + if !scannable { + var values []interface{} + var m *reflectx.Mapper + + switch rows.(type) { + case *Rows: + m = rows.(*Rows).Mapper + default: + m = mapper() + } + + fields := m.TraversalsByName(base, columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values = make([]interface{}, len(columns)) + + for rows.Next() { + // create a new struct type (which returns PtrTo) and indirect it + vp = reflect.New(base) + v = reflect.Indirect(vp) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + + // scan into the struct field pointers and append to our results + err = rows.Scan(values...) + if err != nil { + return err + } + + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, v)) + } + } + } else { + for rows.Next() { + vp = reflect.New(base) + err = rows.Scan(vp.Interface()) + if err != nil { + return err + } + // append + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, reflect.Indirect(vp))) + } + } + } + + return rows.Err() +} + +// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately +// it doesn't really feel like it's named properly. There is an incongruency +// between this and the way that StructScan (which might better be ScanStruct +// anyway) works on a rows object. + +// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. +// StructScan will scan in the entire rows result, so if you do not want to +// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. +// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. +func StructScan(rows rowsi, dest interface{}) error { + return scanAll(rows, dest, true) + +} + +// reflect helpers + +func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { + t = reflectx.Deref(t) + if t.Kind() != expected { + return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) + } + return t, nil +} + +// fieldsByName fills a values interface with fields from the passed value based +// on the traversals in int. If ptrs is true, return addresses instead of values. +// We write this instead of using FieldsByName to save allocations and map lookups +// when iterating over many rows. Empty traversals will get an interface pointer. +// Because of the necessity of requesting ptrs or values, it's considered a bit too +// specialized for inclusion in reflectx itself. +func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return errors.New("argument not a struct") + } + + for i, traversal := range traversals { + if len(traversal) == 0 { + values[i] = new(interface{}) + continue + } + f := reflectx.FieldByIndexes(v, traversal) + if ptrs { + values[i] = f.Addr().Interface() + } else { + values[i] = f.Interface() + } + } + return nil +} + +func missingFields(transversals [][]int) (field int, err error) { + for i, t := range transversals { + if len(t) == 0 { + return i, errors.New("missing field") + } + } + return 0, nil +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go new file mode 100644 index 0000000..d58ff33 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go @@ -0,0 +1,348 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" +) + +// ConnectContext to a database and verify with a ping. +func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return db, err + } + err = db.PingContext(ctx) + return db, err +} + +// QueryerContext is an interface used by GetContext and SelectContext +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) + QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row +} + +// PreparerContext is an interface used by PreparexContext. +type PreparerContext interface { + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// ExecerContext is an interface used by MustExecContext and LoadFileContext +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// ExtContext is a union interface which can bind, query, and exec, with Context +// used by NamedQueryContext and NamedExecContext. +type ExtContext interface { + binder + QueryerContext + ExecerContext +} + +// SelectContext executes a query using the provided Queryer, and StructScans +// each row into dest, which must be a slice. If the slice elements are +// scannable, then the result set must have only one column. Otherwise, +// StructScan is used. The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + rows, err := q.QueryxContext(ctx, query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// PreparexContext prepares a statement. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { + s, err := p.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// GetContext does a QueryRow using the provided Queryer, and scans the +// resulting row to dest. If dest is scannable, the result must only have one +// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like +// row.Scan would. Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowxContext(ctx, query, args...) + return r.scanAny(dest, false) +} + +// LoadFileContext exec's every statement in a file (as a single call to Exec). +// LoadFileContext may return a nil *sql.Result if errors are encountered +// locating or reading the file at path. LoadFile reads the entire file into +// memory, so it is not suitable for loading large data dumps, but can be useful +// for initializing schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.ExecContext(ctx, string(contents)) + return &res, err +} + +// MustExecContext execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { + res, err := e.ExecContext(ctx, query, args...) + if err != nil { + panic(err) + } + return res +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, db, query) +} + +// NamedQueryContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { + return NamedQueryContext(ctx, db, query, arg) +} + +// NamedExecContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, db, query, arg) +} + +// SelectContext using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, db, dest, query, args...) +} + +// GetContext using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, db, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, db, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.DB.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// MustBeginContext is canceled. +func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { + tx, err := db.BeginTxx(ctx, opts) + if err != nil { + panic(err) + } + return tx +} + +// MustExecContext (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, db, query, args...) +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := db.DB.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// StmtxContext returns a version of the prepared statement which runs within a +// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case sql.Stmt: + s = &v + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} +} + +// NamedStmtContext returns a version of the prepared statement which runs +// within a transaction. +func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.StmtxContext(ctx, stmt.Stmt), + } +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, tx, query) +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, tx, query) +} + +// MustExecContext runs MustExecContext within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, tx, query, args...) +} + +// QueryxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// SelectContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, tx, dest, query, args...) +} + +// GetContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, tx, dest, query, args...) +} + +// QueryRowxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.Tx.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// NamedExecContext using this Tx. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, tx, query, arg) +} + +// SelectContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return SelectContext(ctx, &qStmt{s}, dest, "", args...) +} + +// GetContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return GetContext(ctx, &qStmt{s}, dest, "", args...) +} + +// MustExecContext (panic) using this statement. Note that the query portion of +// the error output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { + return MustExecContext(ctx, &qStmt{s}, "", args...) +} + +// QueryRowxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowxContext(ctx, "", args...) +} + +// QueryxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.QueryxContext(ctx, "", args...) +} + +func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.QueryContext(ctx, args...) +} + +func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.QueryContext(ctx, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := q.Stmt.QueryContext(ctx, args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.ExecContext(ctx, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go b/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go new file mode 100644 index 0000000..85e112b --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go @@ -0,0 +1,1344 @@ +// +build go1.8 + +// The following environment variables, if set, will be used: +// +// * SQLX_SQLITE_DSN +// * SQLX_POSTGRES_DSN +// * SQLX_MYSQL_DSN +// +// Set any of these variables to 'skip' to skip them. Note that for MySQL, +// the string '?parseTime=True' will be appended to the DSN if it's not there +// already. +// +package sqlx + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "strings" + "testing" + "time" + + _ "github.com/go-sql-driver/mysql" + "github.com/jmoiron/sqlx/reflectx" + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" +) + +func MultiExecContext(ctx context.Context, e ExecerContext, query string) { + stmts := strings.Split(query, ";\n") + if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 { + stmts = stmts[:len(stmts)-1] + } + for _, s := range stmts { + _, err := e.ExecContext(ctx, s) + if err != nil { + fmt.Println(err, s) + } + } +} + +func RunWithSchemaContext(ctx context.Context, schema Schema, t *testing.T, test func(ctx context.Context, db *DB, t *testing.T)) { + runner := func(ctx context.Context, db *DB, t *testing.T, create, drop string) { + defer func() { + MultiExecContext(ctx, db, drop) + }() + + MultiExecContext(ctx, db, create) + test(ctx, db, t) + } + + if TestPostgres { + create, drop := schema.Postgres() + runner(ctx, pgdb, t, create, drop) + } + if TestSqlite { + create, drop := schema.Sqlite3() + runner(ctx, sldb, t, create, drop) + } + if TestMysql { + create, drop := schema.MySQL() + runner(ctx, mysqldb, t, create, drop) + } +} + +func loadDefaultFixtureContext(ctx context.Context, db *DB, t *testing.T) { + tx := db.MustBeginTx(ctx, nil) + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net") + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") + if db.DriverName() == "mysql" { + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27") + } else { + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27") + } + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444") + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444") + tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444") + tx.Commit() +} + +// Test a new backwards compatible feature, that missing scan destinations +// will silently scan into sql.RawText rather than failing/panicing +func TestMissingNamesContextContext(t *testing.T) { + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + loadDefaultFixtureContext(ctx, db, t) + type PersonPlus struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string + //AddedAt time.Time `db:"added_at"` + } + + // test Select first + pps := []PersonPlus{} + // pps lacks added_at destination + err := db.SelectContext(ctx, &pps, "SELECT * FROM person") + if err == nil { + t.Error("Expected missing name from Select to fail, but it did not.") + } + + // test Get + pp := PersonPlus{} + err = db.GetContext(ctx, &pp, "SELECT * FROM person LIMIT 1") + if err == nil { + t.Error("Expected missing name Get to fail, but it did not.") + } + + // test naked StructScan + pps = []PersonPlus{} + rows, err := db.QueryContext(ctx, "SELECT * FROM person LIMIT 1") + if err != nil { + t.Fatal(err) + } + rows.Next() + err = StructScan(rows, &pps) + if err == nil { + t.Error("Expected missing name in StructScan to fail, but it did not.") + } + rows.Close() + + // now try various things with unsafe set. + db = db.Unsafe() + pps = []PersonPlus{} + err = db.SelectContext(ctx, &pps, "SELECT * FROM person") + if err != nil { + t.Error(err) + } + + // test Get + pp = PersonPlus{} + err = db.GetContext(ctx, &pp, "SELECT * FROM person LIMIT 1") + if err != nil { + t.Error(err) + } + + // test naked StructScan + pps = []PersonPlus{} + rowsx, err := db.QueryxContext(ctx, "SELECT * FROM person LIMIT 1") + if err != nil { + t.Fatal(err) + } + rowsx.Next() + err = StructScan(rowsx, &pps) + if err != nil { + t.Error(err) + } + rowsx.Close() + + // test Named stmt + if !isUnsafe(db) { + t.Error("Expected db to be unsafe, but it isn't") + } + nstmt, err := db.PrepareNamedContext(ctx, `SELECT * FROM person WHERE first_name != :name`) + if err != nil { + t.Fatal(err) + } + // its internal stmt should be marked unsafe + if !nstmt.Stmt.unsafe { + t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety") + } + pps = []PersonPlus{} + err = nstmt.SelectContext(ctx, &pps, map[string]interface{}{"name": "Jason"}) + if err != nil { + t.Fatal(err) + } + if len(pps) != 1 { + t.Errorf("Expected 1 person back, got %d", len(pps)) + } + + // test it with a safe db + db.unsafe = false + if isUnsafe(db) { + t.Error("expected db to be safe but it isn't") + } + nstmt, err = db.PrepareNamedContext(ctx, `SELECT * FROM person WHERE first_name != :name`) + if err != nil { + t.Fatal(err) + } + // it should be safe + if isUnsafe(nstmt) { + t.Error("NamedStmt did not inherit safety") + } + nstmt.Unsafe() + if !isUnsafe(nstmt) { + t.Error("expected newly unsafed NamedStmt to be unsafe") + } + pps = []PersonPlus{} + err = nstmt.SelectContext(ctx, &pps, map[string]interface{}{"name": "Jason"}) + if err != nil { + t.Fatal(err) + } + if len(pps) != 1 { + t.Errorf("Expected 1 person back, got %d", len(pps)) + } + + }) +} + +func TestEmbeddedStructsContextContext(t *testing.T) { + type Loop1 struct{ Person } + type Loop2 struct{ Loop1 } + type Loop3 struct{ Loop2 } + + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + loadDefaultFixtureContext(ctx, db, t) + peopleAndPlaces := []PersonPlace{} + err := db.SelectContext( + ctx, + &peopleAndPlaces, + `SELECT person.*, place.* FROM + person natural join place`) + if err != nil { + t.Fatal(err) + } + for _, pp := range peopleAndPlaces { + if len(pp.Person.FirstName) == 0 { + t.Errorf("Expected non zero lengthed first name.") + } + if len(pp.Place.Country) == 0 { + t.Errorf("Expected non zero lengthed country.") + } + } + + // test embedded structs with StructScan + rows, err := db.QueryxContext( + ctx, + `SELECT person.*, place.* FROM + person natural join place`) + if err != nil { + t.Error(err) + } + + perp := PersonPlace{} + rows.Next() + err = rows.StructScan(&perp) + if err != nil { + t.Error(err) + } + + if len(perp.Person.FirstName) == 0 { + t.Errorf("Expected non zero lengthed first name.") + } + if len(perp.Place.Country) == 0 { + t.Errorf("Expected non zero lengthed country.") + } + + rows.Close() + + // test the same for embedded pointer structs + peopleAndPlacesPtrs := []PersonPlacePtr{} + err = db.SelectContext( + ctx, + &peopleAndPlacesPtrs, + `SELECT person.*, place.* FROM + person natural join place`) + if err != nil { + t.Fatal(err) + } + for _, pp := range peopleAndPlacesPtrs { + if len(pp.Person.FirstName) == 0 { + t.Errorf("Expected non zero lengthed first name.") + } + if len(pp.Place.Country) == 0 { + t.Errorf("Expected non zero lengthed country.") + } + } + + // test "deep nesting" + l3s := []Loop3{} + err = db.SelectContext(ctx, &l3s, `select * from person`) + if err != nil { + t.Fatal(err) + } + for _, l3 := range l3s { + if len(l3.Loop2.Loop1.Person.FirstName) == 0 { + t.Errorf("Expected non zero lengthed first name.") + } + } + + // test "embed conflicts" + ec := []EmbedConflict{} + err = db.SelectContext(ctx, &ec, `select * from person`) + // I'm torn between erroring here or having some kind of working behavior + // in order to allow for more flexibility in destination structs + if err != nil { + t.Errorf("Was not expecting an error on embed conflicts.") + } + }) +} + +func TestJoinQueryContext(t *testing.T) { + type Employee struct { + Name string + ID int64 + // BossID is an id into the employee table + BossID sql.NullInt64 `db:"boss_id"` + } + type Boss Employee + + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + loadDefaultFixtureContext(ctx, db, t) + + var employees []struct { + Employee + Boss `db:"boss"` + } + + err := db.SelectContext(ctx, + &employees, + `SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees + JOIN employees AS boss ON employees.boss_id = boss.id`) + if err != nil { + t.Fatal(err) + } + + for _, em := range employees { + if len(em.Employee.Name) == 0 { + t.Errorf("Expected non zero lengthed name.") + } + if em.Employee.BossID.Int64 != em.Boss.ID { + t.Errorf("Expected boss ids to match") + } + } + }) +} + +func TestJoinQueryNamedPointerStructsContext(t *testing.T) { + type Employee struct { + Name string + ID int64 + // BossID is an id into the employee table + BossID sql.NullInt64 `db:"boss_id"` + } + type Boss Employee + + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + loadDefaultFixtureContext(ctx, db, t) + + var employees []struct { + Emp1 *Employee `db:"emp1"` + Emp2 *Employee `db:"emp2"` + *Boss `db:"boss"` + } + + err := db.SelectContext(ctx, + &employees, + `SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id", + emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id", + boss.id "boss.id", boss.name "boss.name" FROM employees AS emp + JOIN employees AS boss ON emp.boss_id = boss.id + `) + if err != nil { + t.Fatal(err) + } + + for _, em := range employees { + if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 { + t.Errorf("Expected non zero lengthed name.") + } + if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID { + t.Errorf("Expected boss ids to match") + } + } + }) +} + +func TestSelectSliceMapTimeContext(t *testing.T) { + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + loadDefaultFixtureContext(ctx, db, t) + rows, err := db.QueryxContext(ctx, "SELECT * FROM person") + if err != nil { + t.Fatal(err) + } + for rows.Next() { + _, err := rows.SliceScan() + if err != nil { + t.Error(err) + } + } + + rows, err = db.QueryxContext(ctx, "SELECT * FROM person") + if err != nil { + t.Fatal(err) + } + for rows.Next() { + m := map[string]interface{}{} + err := rows.MapScan(m) + if err != nil { + t.Error(err) + } + } + + }) +} + +func TestNilReceiverContext(t *testing.T) { + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + loadDefaultFixtureContext(ctx, db, t) + var p *Person + err := db.GetContext(ctx, p, "SELECT * FROM person LIMIT 1") + if err == nil { + t.Error("Expected error when getting into nil struct ptr.") + } + var pp *[]Person + err = db.SelectContext(ctx, pp, "SELECT * FROM person") + if err == nil { + t.Error("Expected an error when selecting into nil slice ptr.") + } + }) +} + +func TestNamedQueryContext(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE place ( + id integer PRIMARY KEY, + name text NULL + ); + CREATE TABLE person ( + first_name text NULL, + last_name text NULL, + email text NULL + ); + CREATE TABLE placeperson ( + first_name text NULL, + last_name text NULL, + email text NULL, + place_id integer NULL + ); + CREATE TABLE jsperson ( + "FIRST" text NULL, + last_name text NULL, + "EMAIL" text NULL + );`, + drop: ` + drop table person; + drop table jsperson; + drop table place; + drop table placeperson; + `, + } + + RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { + type Person struct { + FirstName sql.NullString `db:"first_name"` + LastName sql.NullString `db:"last_name"` + Email sql.NullString + } + + p := Person{ + FirstName: sql.NullString{String: "ben", Valid: true}, + LastName: sql.NullString{String: "doe", Valid: true}, + Email: sql.NullString{String: "ben@doe.com", Valid: true}, + } + + q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)` + _, err := db.NamedExecContext(ctx, q1, p) + if err != nil { + log.Fatal(err) + } + + p2 := &Person{} + rows, err := db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first_name", p) + if err != nil { + log.Fatal(err) + } + for rows.Next() { + err = rows.StructScan(p2) + if err != nil { + t.Error(err) + } + if p2.FirstName.String != "ben" { + t.Error("Expected first name of `ben`, got " + p2.FirstName.String) + } + if p2.LastName.String != "doe" { + t.Error("Expected first name of `doe`, got " + p2.LastName.String) + } + } + + // these are tests for #73; they verify that named queries work if you've + // changed the db mapper. This code checks both NamedQuery "ad-hoc" style + // queries and NamedStmt queries, which use different code paths internally. + old := *db.Mapper + + type JSONPerson struct { + FirstName sql.NullString `json:"FIRST"` + LastName sql.NullString `json:"last_name"` + Email sql.NullString + } + + jp := JSONPerson{ + FirstName: sql.NullString{String: "ben", Valid: true}, + LastName: sql.NullString{String: "smith", Valid: true}, + Email: sql.NullString{String: "ben@smith.com", Valid: true}, + } + + db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper) + + // prepare queries for case sensitivity to test our ToUpper function. + // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line + // strings are `` we use "" by default and swap out for MySQL + pdb := func(s string, db *DB) string { + if db.DriverName() == "mysql" { + return strings.Replace(s, `"`, "`", -1) + } + return s + } + + q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)` + _, err = db.NamedExecContext(ctx, pdb(q1, db), jp) + if err != nil { + t.Fatal(err, db.DriverName()) + } + + // Checks that a person pulled out of the db matches the one we put in + check := func(t *testing.T, rows *Rows) { + jp = JSONPerson{} + for rows.Next() { + err = rows.StructScan(&jp) + if err != nil { + t.Error(err) + } + if jp.FirstName.String != "ben" { + t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName()) + } + if jp.LastName.String != "smith" { + t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName()) + } + if jp.Email.String != "ben@smith.com" { + t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName()) + } + } + } + + ns, err := db.PrepareNamed(pdb(` + SELECT * FROM jsperson + WHERE + "FIRST"=:FIRST AND + last_name=:last_name AND + "EMAIL"=:EMAIL + `, db)) + + if err != nil { + t.Fatal(err) + } + rows, err = ns.QueryxContext(ctx, jp) + if err != nil { + t.Fatal(err) + } + + check(t, rows) + + // Check exactly the same thing, but with db.NamedQuery, which does not go + // through the PrepareNamed/NamedStmt path. + rows, err = db.NamedQueryContext(ctx, pdb(` + SELECT * FROM jsperson + WHERE + "FIRST"=:FIRST AND + last_name=:last_name AND + "EMAIL"=:EMAIL + `, db), jp) + if err != nil { + t.Fatal(err) + } + + check(t, rows) + + db.Mapper = &old + + // Test nested structs + type Place struct { + ID int `db:"id"` + Name sql.NullString `db:"name"` + } + type PlacePerson struct { + FirstName sql.NullString `db:"first_name"` + LastName sql.NullString `db:"last_name"` + Email sql.NullString + Place Place `db:"place"` + } + + pl := Place{ + Name: sql.NullString{String: "myplace", Valid: true}, + } + + pp := PlacePerson{ + FirstName: sql.NullString{String: "ben", Valid: true}, + LastName: sql.NullString{String: "doe", Valid: true}, + Email: sql.NullString{String: "ben@doe.com", Valid: true}, + } + + q2 := `INSERT INTO place (id, name) VALUES (1, :name)` + _, err = db.NamedExecContext(ctx, q2, pl) + if err != nil { + log.Fatal(err) + } + + id := 1 + pp.Place.ID = id + + q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)` + _, err = db.NamedExecContext(ctx, q3, pp) + if err != nil { + log.Fatal(err) + } + + pp2 := &PlacePerson{} + rows, err = db.NamedQueryContext(ctx, ` + SELECT + first_name, + last_name, + email, + place.id AS "place.id", + place.name AS "place.name" + FROM placeperson + INNER JOIN place ON place.id = placeperson.place_id + WHERE + place.id=:place.id`, pp) + if err != nil { + log.Fatal(err) + } + for rows.Next() { + err = rows.StructScan(pp2) + if err != nil { + t.Error(err) + } + if pp2.FirstName.String != "ben" { + t.Error("Expected first name of `ben`, got " + pp2.FirstName.String) + } + if pp2.LastName.String != "doe" { + t.Error("Expected first name of `doe`, got " + pp2.LastName.String) + } + if pp2.Place.Name.String != "myplace" { + t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String) + } + if pp2.Place.ID != pp.Place.ID { + t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID) + } + } + }) +} + +func TestNilInsertsContext(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE tt ( + id integer, + value text NULL DEFAULT NULL + );`, + drop: "drop table tt;", + } + + RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { + type TT struct { + ID int + Value *string + } + var v, v2 TT + r := db.Rebind + + db.MustExecContext(ctx, r(`INSERT INTO tt (id) VALUES (1)`)) + db.GetContext(ctx, &v, r(`SELECT * FROM tt`)) + if v.ID != 1 { + t.Errorf("Expecting id of 1, got %v", v.ID) + } + if v.Value != nil { + t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) + } + + v.ID = 2 + // NOTE: this incidentally uncovered a bug which was that named queries with + // pointer destinations would not work if the passed value here was not addressable, + // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for + // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly + // function. This next line is important as it provides the only coverage for this. + db.NamedExecContext(ctx, `INSERT INTO tt (id, value) VALUES (:id, :value)`, v) + + db.GetContext(ctx, &v2, r(`SELECT * FROM tt WHERE id=2`)) + if v.ID != v2.ID { + t.Errorf("%v != %v", v.ID, v2.ID) + } + if v2.Value != nil { + t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) + } + }) +} + +func TestScanErrorContext(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE kv ( + k text, + v integer + );`, + drop: `drop table kv;`, + } + + RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { + type WrongTypes struct { + K int + V string + } + _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1) + if err != nil { + t.Error(err) + } + + rows, err := db.QueryxContext(ctx, "SELECT * FROM kv") + if err != nil { + t.Error(err) + } + for rows.Next() { + var wt WrongTypes + err := rows.StructScan(&wt) + if err == nil { + t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName()) + } + } + }) +} + +// FIXME: this function is kinda big but it slows things down to be constantly +// loading and reloading the schema.. + +func TestUsageContext(t *testing.T) { + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + loadDefaultFixtureContext(ctx, db, t) + slicemembers := []SliceMember{} + err := db.SelectContext(ctx, &slicemembers, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + t.Fatal(err) + } + + people := []Person{} + + err = db.SelectContext(ctx, &people, "SELECT * FROM person ORDER BY first_name ASC") + if err != nil { + t.Fatal(err) + } + + jason, john := people[0], people[1] + if jason.FirstName != "Jason" { + t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName) + } + if jason.LastName != "Moiron" { + t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName) + } + if jason.Email != "jmoiron@jmoiron.net" { + t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email) + } + if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" { + t.Errorf("John Doe's person record not what expected: Got %v\n", john) + } + + jason = Person{} + err = db.GetContext(ctx, &jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason") + + if err != nil { + t.Fatal(err) + } + if jason.FirstName != "Jason" { + t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName) + } + + err = db.GetContext(ctx, &jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar") + if err == nil { + t.Errorf("Expecting an error, got nil\n") + } + if err != sql.ErrNoRows { + t.Errorf("Expected sql.ErrNoRows, got %v\n", err) + } + + // The following tests check statement reuse, which was actually a problem + // due to copying being done when creating Stmt's which was eventually removed + stmt1, err := db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) + if err != nil { + t.Fatal(err) + } + jason = Person{} + + row := stmt1.QueryRowx("DoesNotExist") + row.Scan(&jason) + row = stmt1.QueryRowx("DoesNotExist") + row.Scan(&jason) + + err = stmt1.GetContext(ctx, &jason, "DoesNotExist User") + if err == nil { + t.Error("Expected an error") + } + err = stmt1.GetContext(ctx, &jason, "DoesNotExist User 2") + if err == nil { + t.Fatal(err) + } + + stmt2, err := db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) + if err != nil { + t.Fatal(err) + } + jason = Person{} + tx, err := db.Beginx() + if err != nil { + t.Fatal(err) + } + tstmt2 := tx.Stmtx(stmt2) + row2 := tstmt2.QueryRowx("Jason") + err = row2.StructScan(&jason) + if err != nil { + t.Error(err) + } + tx.Commit() + + places := []*Place{} + err = db.SelectContext(ctx, &places, "SELECT telcode FROM place ORDER BY telcode ASC") + if err != nil { + t.Fatal(err) + } + + usa, singsing, honkers := places[0], places[1], places[2] + + if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { + t.Errorf("Expected integer telcodes to work, got %#v", places) + } + + placesptr := []PlacePtr{} + err = db.SelectContext(ctx, &placesptr, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + t.Error(err) + } + //fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2]) + + // if you have null fields and use SELECT *, you must use sql.Null* in your struct + // this test also verifies that you can use either a []Struct{} or a []*Struct{} + places2 := []Place{} + err = db.SelectContext(ctx, &places2, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + t.Fatal(err) + } + + usa, singsing, honkers = &places2[0], &places2[1], &places2[2] + + // this should return a type error that &p is not a pointer to a struct slice + p := Place{} + err = db.SelectContext(ctx, &p, "SELECT * FROM place ORDER BY telcode ASC") + if err == nil { + t.Errorf("Expected an error, argument to select should be a pointer to a struct slice") + } + + // this should be an error + pl := []Place{} + err = db.SelectContext(ctx, pl, "SELECT * FROM place ORDER BY telcode ASC") + if err == nil { + t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.") + } + + if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { + t.Errorf("Expected integer telcodes to work, got %#v", places) + } + + stmt, err := db.PreparexContext(ctx, db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC")) + if err != nil { + t.Error(err) + } + + places = []*Place{} + err = stmt.SelectContext(ctx, &places, 10) + if len(places) != 2 { + t.Error("Expected 2 places, got 0.") + } + if err != nil { + t.Fatal(err) + } + singsing, honkers = places[0], places[1] + if singsing.TelCode != 65 || honkers.TelCode != 852 { + t.Errorf("Expected the right telcodes, got %#v", places) + } + + rows, err := db.QueryxContext(ctx, "SELECT * FROM place") + if err != nil { + t.Fatal(err) + } + place := Place{} + for rows.Next() { + err = rows.StructScan(&place) + if err != nil { + t.Fatal(err) + } + } + + rows, err = db.QueryxContext(ctx, "SELECT * FROM place") + if err != nil { + t.Fatal(err) + } + m := map[string]interface{}{} + for rows.Next() { + err = rows.MapScan(m) + if err != nil { + t.Fatal(err) + } + _, ok := m["country"] + if !ok { + t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m) + } + } + + rows, err = db.QueryxContext(ctx, "SELECT * FROM place") + if err != nil { + t.Fatal(err) + } + for rows.Next() { + s, err := rows.SliceScan() + if err != nil { + t.Error(err) + } + if len(s) != 3 { + t.Errorf("Expected 3 columns in result, got %d\n", len(s)) + } + } + + // test advanced querying + // test that NamedExec works with a map as well as a struct + _, err = db.NamedExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{ + "first": "Bin", + "last": "Smuth", + "email": "bensmith@allblacks.nz", + }) + if err != nil { + t.Fatal(err) + } + + // ensure that if the named param happens right at the end it still works + // ensure that NamedQuery works with a map[string]interface{} + rows, err = db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"}) + if err != nil { + t.Fatal(err) + } + + ben := &Person{} + for rows.Next() { + err = rows.StructScan(ben) + if err != nil { + t.Fatal(err) + } + if ben.FirstName != "Bin" { + t.Fatal("Expected first name of `Bin`, got " + ben.FirstName) + } + if ben.LastName != "Smuth" { + t.Fatal("Expected first name of `Smuth`, got " + ben.LastName) + } + } + + ben.FirstName = "Ben" + ben.LastName = "Smith" + ben.Email = "binsmuth@allblacks.nz" + + // Insert via a named query using the struct + _, err = db.NamedExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben) + + if err != nil { + t.Fatal(err) + } + + rows, err = db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first_name", ben) + if err != nil { + t.Fatal(err) + } + for rows.Next() { + err = rows.StructScan(ben) + if err != nil { + t.Fatal(err) + } + if ben.FirstName != "Ben" { + t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) + } + if ben.LastName != "Smith" { + t.Fatal("Expected first name of `Smith`, got " + ben.LastName) + } + } + // ensure that Get does not panic on emppty result set + person := &Person{} + err = db.GetContext(ctx, person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist") + if err == nil { + t.Fatal("Should have got an error for Get on non-existant row.") + } + + // lets test prepared statements some more + + stmt, err = db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) + if err != nil { + t.Fatal(err) + } + rows, err = stmt.QueryxContext(ctx, "Ben") + if err != nil { + t.Fatal(err) + } + for rows.Next() { + err = rows.StructScan(ben) + if err != nil { + t.Fatal(err) + } + if ben.FirstName != "Ben" { + t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) + } + if ben.LastName != "Smith" { + t.Fatal("Expected first name of `Smith`, got " + ben.LastName) + } + } + + john = Person{} + stmt, err = db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) + if err != nil { + t.Error(err) + } + err = stmt.GetContext(ctx, &john, "John") + if err != nil { + t.Error(err) + } + + // test name mapping + // THIS USED TO WORK BUT WILL NO LONGER WORK. + db.MapperFunc(strings.ToUpper) + rsa := CPlace{} + err = db.GetContext(ctx, &rsa, "SELECT * FROM capplace;") + if err != nil { + t.Error(err, "in db:", db.DriverName()) + } + db.MapperFunc(strings.ToLower) + + // create a copy and change the mapper, then verify the copy behaves + // differently from the original. + dbCopy := NewDb(db.DB, db.DriverName()) + dbCopy.MapperFunc(strings.ToUpper) + err = dbCopy.GetContext(ctx, &rsa, "SELECT * FROM capplace;") + if err != nil { + fmt.Println(db.DriverName()) + t.Error(err) + } + + err = db.GetContext(ctx, &rsa, "SELECT * FROM cappplace;") + if err == nil { + t.Error("Expected no error, got ", err) + } + + // test base type slices + var sdest []string + rows, err = db.QueryxContext(ctx, "SELECT email FROM person ORDER BY email ASC;") + if err != nil { + t.Error(err) + } + err = scanAll(rows, &sdest, false) + if err != nil { + t.Error(err) + } + + // test Get with base types + var count int + err = db.GetContext(ctx, &count, "SELECT count(*) FROM person;") + if err != nil { + t.Error(err) + } + if count != len(sdest) { + t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest)) + } + + // test Get and Select with time.Time, #84 + var addedAt time.Time + err = db.GetContext(ctx, &addedAt, "SELECT added_at FROM person LIMIT 1;") + if err != nil { + t.Error(err) + } + + var addedAts []time.Time + err = db.SelectContext(ctx, &addedAts, "SELECT added_at FROM person;") + if err != nil { + t.Error(err) + } + + // test it on a double pointer + var pcount *int + err = db.GetContext(ctx, &pcount, "SELECT count(*) FROM person;") + if err != nil { + t.Error(err) + } + if *pcount != count { + t.Errorf("expected %d = %d", *pcount, count) + } + + // test Select... + sdest = []string{} + err = db.SelectContext(ctx, &sdest, "SELECT first_name FROM person ORDER BY first_name ASC;") + if err != nil { + t.Error(err) + } + expected := []string{"Ben", "Bin", "Jason", "John"} + for i, got := range sdest { + if got != expected[i] { + t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got) + } + } + + var nsdest []sql.NullString + err = db.SelectContext(ctx, &nsdest, "SELECT city FROM place ORDER BY city ASC") + if err != nil { + t.Error(err) + } + for _, val := range nsdest { + if val.Valid && val.String != "New York" { + t.Errorf("expected single valid result to be `New York`, but got %s", val.String) + } + } + }) +} + +// tests that sqlx will not panic when the wrong driver is passed because +// of an automatic nil dereference in sqlx.Open(), which was fixed. +func TestDoNotPanicOnConnectContext(t *testing.T) { + _, err := ConnectContext(context.Background(), "bogus", "hehe") + if err == nil { + t.Errorf("Should return error when using bogus driverName") + } +} + +func TestEmbeddedMapsContext(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE message ( + string text, + properties text + );`, + drop: `drop table message;`, + } + + RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { + messages := []Message{ + {"Hello, World", PropertyMap{"one": "1", "two": "2"}}, + {"Thanks, Joy", PropertyMap{"pull": "request"}}, + } + q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);` + for _, m := range messages { + _, err := db.NamedExecContext(ctx, q1, m) + if err != nil { + t.Fatal(err) + } + } + var count int + err := db.GetContext(ctx, &count, "SELECT count(*) FROM message") + if err != nil { + t.Fatal(err) + } + if count != len(messages) { + t.Fatalf("Expected %d messages in DB, found %d", len(messages), count) + } + + var m Message + err = db.GetContext(ctx, &m, "SELECT * FROM message LIMIT 1;") + if err != nil { + t.Fatal(err) + } + if m.Properties == nil { + t.Fatal("Expected m.Properties to not be nil, but it was.") + } + }) +} + +func TestIssue197Context(t *testing.T) { + // this test actually tests for a bug in database/sql: + // https://github.com/golang/go/issues/13905 + // this potentially makes _any_ named type that is an alias for []byte + // unsafe to use in a lot of different ways (basically, unsafe to hold + // onto after loading from the database). + t.Skip() + + type mybyte []byte + type Var struct{ Raw json.RawMessage } + type Var2 struct{ Raw []byte } + type Var3 struct{ Raw mybyte } + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + var err error + var v, q Var + if err = db.GetContext(ctx, &v, `SELECT '{"a": "b"}' AS raw`); err != nil { + t.Fatal(err) + } + if err = db.GetContext(ctx, &q, `SELECT 'null' AS raw`); err != nil { + t.Fatal(err) + } + + var v2, q2 Var2 + if err = db.GetContext(ctx, &v2, `SELECT '{"a": "b"}' AS raw`); err != nil { + t.Fatal(err) + } + if err = db.GetContext(ctx, &q2, `SELECT 'null' AS raw`); err != nil { + t.Fatal(err) + } + + var v3, q3 Var3 + if err = db.QueryRowContext(ctx, `SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil { + t.Fatal(err) + } + if err = db.QueryRowContext(ctx, `SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil { + t.Fatal(err) + } + t.Fail() + }) +} + +func TestInContext(t *testing.T) { + // some quite normal situations + type tr struct { + q string + args []interface{} + c int + } + tests := []tr{ + {"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?", + []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}, + 7}, + {"SELECT * FROM foo WHERE x in (?)", + []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}}, + 8}, + } + for _, test := range tests { + q, a, err := In(test.q, test.args...) + if err != nil { + t.Error(err) + } + if len(a) != test.c { + t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a) + } + if strings.Count(q, "?") != test.c { + t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?")) + } + } + + // too many bindVars, but no slices, so short circuits parsing + // i'm not sure if this is the right behavior; this query/arg combo + // might not work, but we shouldn't parse if we don't need to + { + orig := "SELECT * FROM foo WHERE x = ? AND y = ?" + q, a, err := In(orig, "foo", "bar", "baz") + if err != nil { + t.Error(err) + } + if len(a) != 3 { + t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a) + } + if q != orig { + t.Error("Expected unchanged query.") + } + } + + tests = []tr{ + // too many bindvars; slice present so should return error during parse + {"SELECT * FROM foo WHERE x = ? and y = ?", + []interface{}{"foo", []int{1, 2, 3}, "bar"}, + 0}, + // empty slice, should return error before parse + {"SELECT * FROM foo WHERE x = ?", + []interface{}{[]int{}}, + 0}, + // too *few* bindvars, should return an error + {"SELECT * FROM foo WHERE x = ? AND y in (?)", + []interface{}{[]int{1, 2, 3}}, + 0}, + } + for _, test := range tests { + _, _, err := In(test.q, test.args...) + if err == nil { + t.Error("Expected an error, but got nil.") + } + } + RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { + loadDefaultFixtureContext(ctx, db, t) + //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") + //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") + //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") + telcodes := []int{852, 65} + q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode" + query, args, err := In(q, telcodes) + if err != nil { + t.Error(err) + } + query = db.Rebind(query) + places := []Place{} + err = db.SelectContext(ctx, &places, query, args...) + if err != nil { + t.Error(err) + } + if len(places) != 2 { + t.Fatalf("Expecting 2 results, got %d", len(places)) + } + if places[0].TelCode != 65 { + t.Errorf("Expecting singapore first, but got %#v", places[0]) + } + if places[1].TelCode != 852 { + t.Errorf("Expecting hong kong second, but got %#v", places[1]) + } + }) +} + +func TestEmbeddedLiteralsContext(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE x ( + k text + );`, + drop: `drop table x;`, + } + + RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { + type t1 struct { + K *string + } + type t2 struct { + Inline struct { + F string + } + K *string + } + + db.MustExecContext(ctx, db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three") + + target := t1{} + err := db.GetContext(ctx, &target, db.Rebind("SELECT * FROM x WHERE k=?"), "one") + if err != nil { + t.Error(err) + } + if *target.K != "one" { + t.Error("Expected target.K to be `one`, got ", target.K) + } + + target2 := t2{} + err = db.GetContext(ctx, &target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one") + if err != nil { + t.Error(err) + } + if *target2.K != "one" { + t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K) + } + }) +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_test.go b/vendor/github.com/jmoiron/sqlx/sqlx_test.go new file mode 100644 index 0000000..e26c980 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx_test.go @@ -0,0 +1,1802 @@ +// The following environment variables, if set, will be used: +// +// * SQLX_SQLITE_DSN +// * SQLX_POSTGRES_DSN +// * SQLX_MYSQL_DSN +// +// Set any of these variables to 'skip' to skip them. Note that for MySQL, +// the string '?parseTime=True' will be appended to the DSN if it's not there +// already. +// +package sqlx + +import ( + "database/sql" + "database/sql/driver" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "strings" + "testing" + "time" + + _ "github.com/go-sql-driver/mysql" + "github.com/jmoiron/sqlx/reflectx" + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" +) + +/* compile time checks that Db, Tx, Stmt (qStmt) implement expected interfaces */ +var _, _ Ext = &DB{}, &Tx{} +var _, _ ColScanner = &Row{}, &Rows{} +var _ Queryer = &qStmt{} +var _ Execer = &qStmt{} + +var TestPostgres = true +var TestSqlite = true +var TestMysql = true + +var sldb *DB +var pgdb *DB +var mysqldb *DB +var active = []*DB{} + +func init() { + ConnectAll() +} + +func ConnectAll() { + var err error + + pgdsn := os.Getenv("SQLX_POSTGRES_DSN") + mydsn := os.Getenv("SQLX_MYSQL_DSN") + sqdsn := os.Getenv("SQLX_SQLITE_DSN") + + TestPostgres = pgdsn != "skip" + TestMysql = mydsn != "skip" + TestSqlite = sqdsn != "skip" + + if !strings.Contains(mydsn, "parseTime=true") { + mydsn += "?parseTime=true" + } + + if TestPostgres { + pgdb, err = Connect("postgres", pgdsn) + if err != nil { + fmt.Printf("Disabling PG tests:\n %v\n", err) + TestPostgres = false + } + } else { + fmt.Println("Disabling Postgres tests.") + } + + if TestMysql { + mysqldb, err = Connect("mysql", mydsn) + if err != nil { + fmt.Printf("Disabling MySQL tests:\n %v", err) + TestMysql = false + } + } else { + fmt.Println("Disabling MySQL tests.") + } + + if TestSqlite { + sldb, err = Connect("sqlite3", sqdsn) + if err != nil { + fmt.Printf("Disabling SQLite:\n %v", err) + TestSqlite = false + } + } else { + fmt.Println("Disabling SQLite tests.") + } +} + +type Schema struct { + create string + drop string +} + +func (s Schema) Postgres() (string, string) { + return s.create, s.drop +} + +func (s Schema) MySQL() (string, string) { + return strings.Replace(s.create, `"`, "`", -1), s.drop +} + +func (s Schema) Sqlite3() (string, string) { + return strings.Replace(s.create, `now()`, `CURRENT_TIMESTAMP`, -1), s.drop +} + +var defaultSchema = Schema{ + create: ` +CREATE TABLE person ( + first_name text, + last_name text, + email text, + added_at timestamp default now() +); + +CREATE TABLE place ( + country text, + city text NULL, + telcode integer +); + +CREATE TABLE capplace ( + "COUNTRY" text, + "CITY" text NULL, + "TELCODE" integer +); + +CREATE TABLE nullperson ( + first_name text NULL, + last_name text NULL, + email text NULL +); + +CREATE TABLE employees ( + name text, + id integer, + boss_id integer +); + +`, + drop: ` +drop table person; +drop table place; +drop table capplace; +drop table nullperson; +drop table employees; +`, +} + +type Person struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string + AddedAt time.Time `db:"added_at"` +} + +type Person2 struct { + FirstName sql.NullString `db:"first_name"` + LastName sql.NullString `db:"last_name"` + Email sql.NullString +} + +type Place struct { + Country string + City sql.NullString + TelCode int +} + +type PlacePtr struct { + Country string + City *string + TelCode int +} + +type PersonPlace struct { + Person + Place +} + +type PersonPlacePtr struct { + *Person + *Place +} + +type EmbedConflict struct { + FirstName string `db:"first_name"` + Person +} + +type SliceMember struct { + Country string + City sql.NullString + TelCode int + People []Person `db:"-"` + Addresses []Place `db:"-"` +} + +// Note that because of field map caching, we need a new type here +// if we've used Place already somewhere in sqlx +type CPlace Place + +func MultiExec(e Execer, query string) { + stmts := strings.Split(query, ";\n") + if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 { + stmts = stmts[:len(stmts)-1] + } + for _, s := range stmts { + _, err := e.Exec(s) + if err != nil { + fmt.Println(err, s) + } + } +} + +func RunWithSchema(schema Schema, t *testing.T, test func(db *DB, t *testing.T)) { + runner := func(db *DB, t *testing.T, create, drop string) { + defer func() { + MultiExec(db, drop) + }() + + MultiExec(db, create) + test(db, t) + } + + if TestPostgres { + create, drop := schema.Postgres() + runner(pgdb, t, create, drop) + } + if TestSqlite { + create, drop := schema.Sqlite3() + runner(sldb, t, create, drop) + } + if TestMysql { + create, drop := schema.MySQL() + runner(mysqldb, t, create, drop) + } +} + +func loadDefaultFixture(db *DB, t *testing.T) { + tx := db.MustBegin() + tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net") + tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") + tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") + tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") + if db.DriverName() == "mysql" { + tx.MustExec(tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27") + } else { + tx.MustExec(tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27") + } + tx.MustExec(tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444") + tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444") + tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444") + tx.Commit() +} + +// Test a new backwards compatible feature, that missing scan destinations +// will silently scan into sql.RawText rather than failing/panicing +func TestMissingNames(t *testing.T) { + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + type PersonPlus struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string + //AddedAt time.Time `db:"added_at"` + } + + // test Select first + pps := []PersonPlus{} + // pps lacks added_at destination + err := db.Select(&pps, "SELECT * FROM person") + if err == nil { + t.Error("Expected missing name from Select to fail, but it did not.") + } + + // test Get + pp := PersonPlus{} + err = db.Get(&pp, "SELECT * FROM person LIMIT 1") + if err == nil { + t.Error("Expected missing name Get to fail, but it did not.") + } + + // test naked StructScan + pps = []PersonPlus{} + rows, err := db.Query("SELECT * FROM person LIMIT 1") + if err != nil { + t.Fatal(err) + } + rows.Next() + err = StructScan(rows, &pps) + if err == nil { + t.Error("Expected missing name in StructScan to fail, but it did not.") + } + rows.Close() + + // now try various things with unsafe set. + db = db.Unsafe() + pps = []PersonPlus{} + err = db.Select(&pps, "SELECT * FROM person") + if err != nil { + t.Error(err) + } + + // test Get + pp = PersonPlus{} + err = db.Get(&pp, "SELECT * FROM person LIMIT 1") + if err != nil { + t.Error(err) + } + + // test naked StructScan + pps = []PersonPlus{} + rowsx, err := db.Queryx("SELECT * FROM person LIMIT 1") + if err != nil { + t.Fatal(err) + } + rowsx.Next() + err = StructScan(rowsx, &pps) + if err != nil { + t.Error(err) + } + rowsx.Close() + + // test Named stmt + if !isUnsafe(db) { + t.Error("Expected db to be unsafe, but it isn't") + } + nstmt, err := db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`) + if err != nil { + t.Fatal(err) + } + // its internal stmt should be marked unsafe + if !nstmt.Stmt.unsafe { + t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety") + } + pps = []PersonPlus{} + err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"}) + if err != nil { + t.Fatal(err) + } + if len(pps) != 1 { + t.Errorf("Expected 1 person back, got %d", len(pps)) + } + + // test it with a safe db + db.unsafe = false + if isUnsafe(db) { + t.Error("expected db to be safe but it isn't") + } + nstmt, err = db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`) + if err != nil { + t.Fatal(err) + } + // it should be safe + if isUnsafe(nstmt) { + t.Error("NamedStmt did not inherit safety") + } + nstmt.Unsafe() + if !isUnsafe(nstmt) { + t.Error("expected newly unsafed NamedStmt to be unsafe") + } + pps = []PersonPlus{} + err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"}) + if err != nil { + t.Fatal(err) + } + if len(pps) != 1 { + t.Errorf("Expected 1 person back, got %d", len(pps)) + } + + }) +} + +func TestEmbeddedStructs(t *testing.T) { + type Loop1 struct{ Person } + type Loop2 struct{ Loop1 } + type Loop3 struct{ Loop2 } + + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + peopleAndPlaces := []PersonPlace{} + err := db.Select( + &peopleAndPlaces, + `SELECT person.*, place.* FROM + person natural join place`) + if err != nil { + t.Fatal(err) + } + for _, pp := range peopleAndPlaces { + if len(pp.Person.FirstName) == 0 { + t.Errorf("Expected non zero lengthed first name.") + } + if len(pp.Place.Country) == 0 { + t.Errorf("Expected non zero lengthed country.") + } + } + + // test embedded structs with StructScan + rows, err := db.Queryx( + `SELECT person.*, place.* FROM + person natural join place`) + if err != nil { + t.Error(err) + } + + perp := PersonPlace{} + rows.Next() + err = rows.StructScan(&perp) + if err != nil { + t.Error(err) + } + + if len(perp.Person.FirstName) == 0 { + t.Errorf("Expected non zero lengthed first name.") + } + if len(perp.Place.Country) == 0 { + t.Errorf("Expected non zero lengthed country.") + } + + rows.Close() + + // test the same for embedded pointer structs + peopleAndPlacesPtrs := []PersonPlacePtr{} + err = db.Select( + &peopleAndPlacesPtrs, + `SELECT person.*, place.* FROM + person natural join place`) + if err != nil { + t.Fatal(err) + } + for _, pp := range peopleAndPlacesPtrs { + if len(pp.Person.FirstName) == 0 { + t.Errorf("Expected non zero lengthed first name.") + } + if len(pp.Place.Country) == 0 { + t.Errorf("Expected non zero lengthed country.") + } + } + + // test "deep nesting" + l3s := []Loop3{} + err = db.Select(&l3s, `select * from person`) + if err != nil { + t.Fatal(err) + } + for _, l3 := range l3s { + if len(l3.Loop2.Loop1.Person.FirstName) == 0 { + t.Errorf("Expected non zero lengthed first name.") + } + } + + // test "embed conflicts" + ec := []EmbedConflict{} + err = db.Select(&ec, `select * from person`) + // I'm torn between erroring here or having some kind of working behavior + // in order to allow for more flexibility in destination structs + if err != nil { + t.Errorf("Was not expecting an error on embed conflicts.") + } + }) +} + +func TestJoinQuery(t *testing.T) { + type Employee struct { + Name string + ID int64 + // BossID is an id into the employee table + BossID sql.NullInt64 `db:"boss_id"` + } + type Boss Employee + + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + + var employees []struct { + Employee + Boss `db:"boss"` + } + + err := db.Select( + &employees, + `SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees + JOIN employees AS boss ON employees.boss_id = boss.id`) + if err != nil { + t.Fatal(err) + } + + for _, em := range employees { + if len(em.Employee.Name) == 0 { + t.Errorf("Expected non zero lengthed name.") + } + if em.Employee.BossID.Int64 != em.Boss.ID { + t.Errorf("Expected boss ids to match") + } + } + }) +} + +func TestJoinQueryNamedPointerStructs(t *testing.T) { + type Employee struct { + Name string + ID int64 + // BossID is an id into the employee table + BossID sql.NullInt64 `db:"boss_id"` + } + type Boss Employee + + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + + var employees []struct { + Emp1 *Employee `db:"emp1"` + Emp2 *Employee `db:"emp2"` + *Boss `db:"boss"` + } + + err := db.Select( + &employees, + `SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id", + emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id", + boss.id "boss.id", boss.name "boss.name" FROM employees AS emp + JOIN employees AS boss ON emp.boss_id = boss.id + `) + if err != nil { + t.Fatal(err) + } + + for _, em := range employees { + if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 { + t.Errorf("Expected non zero lengthed name.") + } + if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID { + t.Errorf("Expected boss ids to match") + } + } + }) +} + +func TestSelectSliceMapTime(t *testing.T) { + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + rows, err := db.Queryx("SELECT * FROM person") + if err != nil { + t.Fatal(err) + } + for rows.Next() { + _, err := rows.SliceScan() + if err != nil { + t.Error(err) + } + } + + rows, err = db.Queryx("SELECT * FROM person") + if err != nil { + t.Fatal(err) + } + for rows.Next() { + m := map[string]interface{}{} + err := rows.MapScan(m) + if err != nil { + t.Error(err) + } + } + + }) +} + +func TestNilReceiver(t *testing.T) { + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + var p *Person + err := db.Get(p, "SELECT * FROM person LIMIT 1") + if err == nil { + t.Error("Expected error when getting into nil struct ptr.") + } + var pp *[]Person + err = db.Select(pp, "SELECT * FROM person") + if err == nil { + t.Error("Expected an error when selecting into nil slice ptr.") + } + }) +} + +func TestNamedQuery(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE place ( + id integer PRIMARY KEY, + name text NULL + ); + CREATE TABLE person ( + first_name text NULL, + last_name text NULL, + email text NULL + ); + CREATE TABLE placeperson ( + first_name text NULL, + last_name text NULL, + email text NULL, + place_id integer NULL + ); + CREATE TABLE jsperson ( + "FIRST" text NULL, + last_name text NULL, + "EMAIL" text NULL + );`, + drop: ` + drop table person; + drop table jsperson; + drop table place; + drop table placeperson; + `, + } + + RunWithSchema(schema, t, func(db *DB, t *testing.T) { + type Person struct { + FirstName sql.NullString `db:"first_name"` + LastName sql.NullString `db:"last_name"` + Email sql.NullString + } + + p := Person{ + FirstName: sql.NullString{String: "ben", Valid: true}, + LastName: sql.NullString{String: "doe", Valid: true}, + Email: sql.NullString{String: "ben@doe.com", Valid: true}, + } + + q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)` + _, err := db.NamedExec(q1, p) + if err != nil { + log.Fatal(err) + } + + p2 := &Person{} + rows, err := db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", p) + if err != nil { + log.Fatal(err) + } + for rows.Next() { + err = rows.StructScan(p2) + if err != nil { + t.Error(err) + } + if p2.FirstName.String != "ben" { + t.Error("Expected first name of `ben`, got " + p2.FirstName.String) + } + if p2.LastName.String != "doe" { + t.Error("Expected first name of `doe`, got " + p2.LastName.String) + } + } + + // these are tests for #73; they verify that named queries work if you've + // changed the db mapper. This code checks both NamedQuery "ad-hoc" style + // queries and NamedStmt queries, which use different code paths internally. + old := *db.Mapper + + type JSONPerson struct { + FirstName sql.NullString `json:"FIRST"` + LastName sql.NullString `json:"last_name"` + Email sql.NullString + } + + jp := JSONPerson{ + FirstName: sql.NullString{String: "ben", Valid: true}, + LastName: sql.NullString{String: "smith", Valid: true}, + Email: sql.NullString{String: "ben@smith.com", Valid: true}, + } + + db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper) + + // prepare queries for case sensitivity to test our ToUpper function. + // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line + // strings are `` we use "" by default and swap out for MySQL + pdb := func(s string, db *DB) string { + if db.DriverName() == "mysql" { + return strings.Replace(s, `"`, "`", -1) + } + return s + } + + q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)` + _, err = db.NamedExec(pdb(q1, db), jp) + if err != nil { + t.Fatal(err, db.DriverName()) + } + + // Checks that a person pulled out of the db matches the one we put in + check := func(t *testing.T, rows *Rows) { + jp = JSONPerson{} + for rows.Next() { + err = rows.StructScan(&jp) + if err != nil { + t.Error(err) + } + if jp.FirstName.String != "ben" { + t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName()) + } + if jp.LastName.String != "smith" { + t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName()) + } + if jp.Email.String != "ben@smith.com" { + t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName()) + } + } + } + + ns, err := db.PrepareNamed(pdb(` + SELECT * FROM jsperson + WHERE + "FIRST"=:FIRST AND + last_name=:last_name AND + "EMAIL"=:EMAIL + `, db)) + + if err != nil { + t.Fatal(err) + } + rows, err = ns.Queryx(jp) + if err != nil { + t.Fatal(err) + } + + check(t, rows) + + // Check exactly the same thing, but with db.NamedQuery, which does not go + // through the PrepareNamed/NamedStmt path. + rows, err = db.NamedQuery(pdb(` + SELECT * FROM jsperson + WHERE + "FIRST"=:FIRST AND + last_name=:last_name AND + "EMAIL"=:EMAIL + `, db), jp) + if err != nil { + t.Fatal(err) + } + + check(t, rows) + + db.Mapper = &old + + // Test nested structs + type Place struct { + ID int `db:"id"` + Name sql.NullString `db:"name"` + } + type PlacePerson struct { + FirstName sql.NullString `db:"first_name"` + LastName sql.NullString `db:"last_name"` + Email sql.NullString + Place Place `db:"place"` + } + + pl := Place{ + Name: sql.NullString{String: "myplace", Valid: true}, + } + + pp := PlacePerson{ + FirstName: sql.NullString{String: "ben", Valid: true}, + LastName: sql.NullString{String: "doe", Valid: true}, + Email: sql.NullString{String: "ben@doe.com", Valid: true}, + } + + q2 := `INSERT INTO place (id, name) VALUES (1, :name)` + _, err = db.NamedExec(q2, pl) + if err != nil { + log.Fatal(err) + } + + id := 1 + pp.Place.ID = id + + q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)` + _, err = db.NamedExec(q3, pp) + if err != nil { + log.Fatal(err) + } + + pp2 := &PlacePerson{} + rows, err = db.NamedQuery(` + SELECT + first_name, + last_name, + email, + place.id AS "place.id", + place.name AS "place.name" + FROM placeperson + INNER JOIN place ON place.id = placeperson.place_id + WHERE + place.id=:place.id`, pp) + if err != nil { + log.Fatal(err) + } + for rows.Next() { + err = rows.StructScan(pp2) + if err != nil { + t.Error(err) + } + if pp2.FirstName.String != "ben" { + t.Error("Expected first name of `ben`, got " + pp2.FirstName.String) + } + if pp2.LastName.String != "doe" { + t.Error("Expected first name of `doe`, got " + pp2.LastName.String) + } + if pp2.Place.Name.String != "myplace" { + t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String) + } + if pp2.Place.ID != pp.Place.ID { + t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID) + } + } + }) +} + +func TestNilInserts(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE tt ( + id integer, + value text NULL DEFAULT NULL + );`, + drop: "drop table tt;", + } + + RunWithSchema(schema, t, func(db *DB, t *testing.T) { + type TT struct { + ID int + Value *string + } + var v, v2 TT + r := db.Rebind + + db.MustExec(r(`INSERT INTO tt (id) VALUES (1)`)) + db.Get(&v, r(`SELECT * FROM tt`)) + if v.ID != 1 { + t.Errorf("Expecting id of 1, got %v", v.ID) + } + if v.Value != nil { + t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) + } + + v.ID = 2 + // NOTE: this incidentally uncovered a bug which was that named queries with + // pointer destinations would not work if the passed value here was not addressable, + // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for + // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly + // function. This next line is important as it provides the only coverage for this. + db.NamedExec(`INSERT INTO tt (id, value) VALUES (:id, :value)`, v) + + db.Get(&v2, r(`SELECT * FROM tt WHERE id=2`)) + if v.ID != v2.ID { + t.Errorf("%v != %v", v.ID, v2.ID) + } + if v2.Value != nil { + t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) + } + }) +} + +func TestScanError(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE kv ( + k text, + v integer + );`, + drop: `drop table kv;`, + } + + RunWithSchema(schema, t, func(db *DB, t *testing.T) { + type WrongTypes struct { + K int + V string + } + _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1) + if err != nil { + t.Error(err) + } + + rows, err := db.Queryx("SELECT * FROM kv") + if err != nil { + t.Error(err) + } + for rows.Next() { + var wt WrongTypes + err := rows.StructScan(&wt) + if err == nil { + t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName()) + } + } + }) +} + +// FIXME: this function is kinda big but it slows things down to be constantly +// loading and reloading the schema.. + +func TestUsage(t *testing.T) { + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + slicemembers := []SliceMember{} + err := db.Select(&slicemembers, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + t.Fatal(err) + } + + people := []Person{} + + err = db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") + if err != nil { + t.Fatal(err) + } + + jason, john := people[0], people[1] + if jason.FirstName != "Jason" { + t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName) + } + if jason.LastName != "Moiron" { + t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName) + } + if jason.Email != "jmoiron@jmoiron.net" { + t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email) + } + if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" { + t.Errorf("John Doe's person record not what expected: Got %v\n", john) + } + + jason = Person{} + err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason") + + if err != nil { + t.Fatal(err) + } + if jason.FirstName != "Jason" { + t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName) + } + + err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar") + if err == nil { + t.Errorf("Expecting an error, got nil\n") + } + if err != sql.ErrNoRows { + t.Errorf("Expected sql.ErrNoRows, got %v\n", err) + } + + // The following tests check statement reuse, which was actually a problem + // due to copying being done when creating Stmt's which was eventually removed + stmt1, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) + if err != nil { + t.Fatal(err) + } + jason = Person{} + + row := stmt1.QueryRowx("DoesNotExist") + row.Scan(&jason) + row = stmt1.QueryRowx("DoesNotExist") + row.Scan(&jason) + + err = stmt1.Get(&jason, "DoesNotExist User") + if err == nil { + t.Error("Expected an error") + } + err = stmt1.Get(&jason, "DoesNotExist User 2") + if err == nil { + t.Fatal(err) + } + + stmt2, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) + if err != nil { + t.Fatal(err) + } + jason = Person{} + tx, err := db.Beginx() + if err != nil { + t.Fatal(err) + } + tstmt2 := tx.Stmtx(stmt2) + row2 := tstmt2.QueryRowx("Jason") + err = row2.StructScan(&jason) + if err != nil { + t.Error(err) + } + tx.Commit() + + places := []*Place{} + err = db.Select(&places, "SELECT telcode FROM place ORDER BY telcode ASC") + if err != nil { + t.Fatal(err) + } + + usa, singsing, honkers := places[0], places[1], places[2] + + if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { + t.Errorf("Expected integer telcodes to work, got %#v", places) + } + + placesptr := []PlacePtr{} + err = db.Select(&placesptr, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + t.Error(err) + } + //fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2]) + + // if you have null fields and use SELECT *, you must use sql.Null* in your struct + // this test also verifies that you can use either a []Struct{} or a []*Struct{} + places2 := []Place{} + err = db.Select(&places2, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + t.Fatal(err) + } + + usa, singsing, honkers = &places2[0], &places2[1], &places2[2] + + // this should return a type error that &p is not a pointer to a struct slice + p := Place{} + err = db.Select(&p, "SELECT * FROM place ORDER BY telcode ASC") + if err == nil { + t.Errorf("Expected an error, argument to select should be a pointer to a struct slice") + } + + // this should be an error + pl := []Place{} + err = db.Select(pl, "SELECT * FROM place ORDER BY telcode ASC") + if err == nil { + t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.") + } + + if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { + t.Errorf("Expected integer telcodes to work, got %#v", places) + } + + stmt, err := db.Preparex(db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC")) + if err != nil { + t.Error(err) + } + + places = []*Place{} + err = stmt.Select(&places, 10) + if len(places) != 2 { + t.Error("Expected 2 places, got 0.") + } + if err != nil { + t.Fatal(err) + } + singsing, honkers = places[0], places[1] + if singsing.TelCode != 65 || honkers.TelCode != 852 { + t.Errorf("Expected the right telcodes, got %#v", places) + } + + rows, err := db.Queryx("SELECT * FROM place") + if err != nil { + t.Fatal(err) + } + place := Place{} + for rows.Next() { + err = rows.StructScan(&place) + if err != nil { + t.Fatal(err) + } + } + + rows, err = db.Queryx("SELECT * FROM place") + if err != nil { + t.Fatal(err) + } + m := map[string]interface{}{} + for rows.Next() { + err = rows.MapScan(m) + if err != nil { + t.Fatal(err) + } + _, ok := m["country"] + if !ok { + t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m) + } + } + + rows, err = db.Queryx("SELECT * FROM place") + if err != nil { + t.Fatal(err) + } + for rows.Next() { + s, err := rows.SliceScan() + if err != nil { + t.Error(err) + } + if len(s) != 3 { + t.Errorf("Expected 3 columns in result, got %d\n", len(s)) + } + } + + // test advanced querying + // test that NamedExec works with a map as well as a struct + _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{ + "first": "Bin", + "last": "Smuth", + "email": "bensmith@allblacks.nz", + }) + if err != nil { + t.Fatal(err) + } + + // ensure that if the named param happens right at the end it still works + // ensure that NamedQuery works with a map[string]interface{} + rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"}) + if err != nil { + t.Fatal(err) + } + + ben := &Person{} + for rows.Next() { + err = rows.StructScan(ben) + if err != nil { + t.Fatal(err) + } + if ben.FirstName != "Bin" { + t.Fatal("Expected first name of `Bin`, got " + ben.FirstName) + } + if ben.LastName != "Smuth" { + t.Fatal("Expected first name of `Smuth`, got " + ben.LastName) + } + } + + ben.FirstName = "Ben" + ben.LastName = "Smith" + ben.Email = "binsmuth@allblacks.nz" + + // Insert via a named query using the struct + _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben) + + if err != nil { + t.Fatal(err) + } + + rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", ben) + if err != nil { + t.Fatal(err) + } + for rows.Next() { + err = rows.StructScan(ben) + if err != nil { + t.Fatal(err) + } + if ben.FirstName != "Ben" { + t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) + } + if ben.LastName != "Smith" { + t.Fatal("Expected first name of `Smith`, got " + ben.LastName) + } + } + // ensure that Get does not panic on emppty result set + person := &Person{} + err = db.Get(person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist") + if err == nil { + t.Fatal("Should have got an error for Get on non-existant row.") + } + + // lets test prepared statements some more + + stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Queryx("Ben") + if err != nil { + t.Fatal(err) + } + for rows.Next() { + err = rows.StructScan(ben) + if err != nil { + t.Fatal(err) + } + if ben.FirstName != "Ben" { + t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) + } + if ben.LastName != "Smith" { + t.Fatal("Expected first name of `Smith`, got " + ben.LastName) + } + } + + john = Person{} + stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) + if err != nil { + t.Error(err) + } + err = stmt.Get(&john, "John") + if err != nil { + t.Error(err) + } + + // test name mapping + // THIS USED TO WORK BUT WILL NO LONGER WORK. + db.MapperFunc(strings.ToUpper) + rsa := CPlace{} + err = db.Get(&rsa, "SELECT * FROM capplace;") + if err != nil { + t.Error(err, "in db:", db.DriverName()) + } + db.MapperFunc(strings.ToLower) + + // create a copy and change the mapper, then verify the copy behaves + // differently from the original. + dbCopy := NewDb(db.DB, db.DriverName()) + dbCopy.MapperFunc(strings.ToUpper) + err = dbCopy.Get(&rsa, "SELECT * FROM capplace;") + if err != nil { + fmt.Println(db.DriverName()) + t.Error(err) + } + + err = db.Get(&rsa, "SELECT * FROM cappplace;") + if err == nil { + t.Error("Expected no error, got ", err) + } + + // test base type slices + var sdest []string + rows, err = db.Queryx("SELECT email FROM person ORDER BY email ASC;") + if err != nil { + t.Error(err) + } + err = scanAll(rows, &sdest, false) + if err != nil { + t.Error(err) + } + + // test Get with base types + var count int + err = db.Get(&count, "SELECT count(*) FROM person;") + if err != nil { + t.Error(err) + } + if count != len(sdest) { + t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest)) + } + + // test Get and Select with time.Time, #84 + var addedAt time.Time + err = db.Get(&addedAt, "SELECT added_at FROM person LIMIT 1;") + if err != nil { + t.Error(err) + } + + var addedAts []time.Time + err = db.Select(&addedAts, "SELECT added_at FROM person;") + if err != nil { + t.Error(err) + } + + // test it on a double pointer + var pcount *int + err = db.Get(&pcount, "SELECT count(*) FROM person;") + if err != nil { + t.Error(err) + } + if *pcount != count { + t.Errorf("expected %d = %d", *pcount, count) + } + + // test Select... + sdest = []string{} + err = db.Select(&sdest, "SELECT first_name FROM person ORDER BY first_name ASC;") + if err != nil { + t.Error(err) + } + expected := []string{"Ben", "Bin", "Jason", "John"} + for i, got := range sdest { + if got != expected[i] { + t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got) + } + } + + var nsdest []sql.NullString + err = db.Select(&nsdest, "SELECT city FROM place ORDER BY city ASC") + if err != nil { + t.Error(err) + } + for _, val := range nsdest { + if val.Valid && val.String != "New York" { + t.Errorf("expected single valid result to be `New York`, but got %s", val.String) + } + } + }) +} + +type Product struct { + ProductID int +} + +// tests that sqlx will not panic when the wrong driver is passed because +// of an automatic nil dereference in sqlx.Open(), which was fixed. +func TestDoNotPanicOnConnect(t *testing.T) { + db, err := Connect("bogus", "hehe") + if err == nil { + t.Errorf("Should return error when using bogus driverName") + } + if db != nil { + t.Errorf("Should not return the db on a connect failure") + } +} + +func TestRebind(t *testing.T) { + q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` + q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` + + s1 := Rebind(DOLLAR, q1) + s2 := Rebind(DOLLAR, q2) + + if s1 != `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)` { + t.Errorf("q1 failed") + } + + if s2 != `INSERT INTO foo (a, b, c) VALUES ($1, $2, "foo"), ("Hi", $3, $4)` { + t.Errorf("q2 failed") + } + + s1 = Rebind(NAMED, q1) + s2 = Rebind(NAMED, q2) + + ex1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ` + + `(:arg1, :arg2, :arg3, :arg4, :arg5, :arg6, :arg7, :arg8, :arg9, :arg10)` + if s1 != ex1 { + t.Error("q1 failed on Named params") + } + + ex2 := `INSERT INTO foo (a, b, c) VALUES (:arg1, :arg2, "foo"), ("Hi", :arg3, :arg4)` + if s2 != ex2 { + t.Error("q2 failed on Named params") + } +} + +func TestBindMap(t *testing.T) { + // Test that it works.. + q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` + am := map[string]interface{}{ + "name": "Jason Moiron", + "age": 30, + "first": "Jason", + "last": "Moiron", + } + + bq, args, _ := bindMap(QUESTION, q1, am) + expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)` + if bq != expect { + t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) + } + + if args[0].(string) != "Jason Moiron" { + t.Errorf("Expected `Jason Moiron`, got %v\n", args[0]) + } + + if args[1].(int) != 30 { + t.Errorf("Expected 30, got %v\n", args[1]) + } + + if args[2].(string) != "Jason" { + t.Errorf("Expected Jason, got %v\n", args[2]) + } + + if args[3].(string) != "Moiron" { + t.Errorf("Expected Moiron, got %v\n", args[3]) + } +} + +// Test for #117, embedded nil maps + +type Message struct { + Text string `db:"string"` + Properties PropertyMap `db:"properties"` // Stored as JSON in the database +} + +type PropertyMap map[string]string + +// Implement driver.Valuer and sql.Scanner interfaces on PropertyMap +func (p PropertyMap) Value() (driver.Value, error) { + if len(p) == 0 { + return nil, nil + } + return json.Marshal(p) +} + +func (p PropertyMap) Scan(src interface{}) error { + v := reflect.ValueOf(src) + if !v.IsValid() || v.CanAddr() && v.IsNil() { + return nil + } + switch ts := src.(type) { + case []byte: + return json.Unmarshal(ts, &p) + case string: + return json.Unmarshal([]byte(ts), &p) + default: + return fmt.Errorf("Could not not decode type %T -> %T", src, p) + } +} + +func TestEmbeddedMaps(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE message ( + string text, + properties text + );`, + drop: `drop table message;`, + } + + RunWithSchema(schema, t, func(db *DB, t *testing.T) { + messages := []Message{ + {"Hello, World", PropertyMap{"one": "1", "two": "2"}}, + {"Thanks, Joy", PropertyMap{"pull": "request"}}, + } + q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);` + for _, m := range messages { + _, err := db.NamedExec(q1, m) + if err != nil { + t.Fatal(err) + } + } + var count int + err := db.Get(&count, "SELECT count(*) FROM message") + if err != nil { + t.Fatal(err) + } + if count != len(messages) { + t.Fatalf("Expected %d messages in DB, found %d", len(messages), count) + } + + var m Message + err = db.Get(&m, "SELECT * FROM message LIMIT 1;") + if err != nil { + t.Fatal(err) + } + if m.Properties == nil { + t.Fatal("Expected m.Properties to not be nil, but it was.") + } + }) +} + +func TestIssue197(t *testing.T) { + // this test actually tests for a bug in database/sql: + // https://github.com/golang/go/issues/13905 + // this potentially makes _any_ named type that is an alias for []byte + // unsafe to use in a lot of different ways (basically, unsafe to hold + // onto after loading from the database). + t.Skip() + + type mybyte []byte + type Var struct{ Raw json.RawMessage } + type Var2 struct{ Raw []byte } + type Var3 struct{ Raw mybyte } + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + var err error + var v, q Var + if err = db.Get(&v, `SELECT '{"a": "b"}' AS raw`); err != nil { + t.Fatal(err) + } + if err = db.Get(&q, `SELECT 'null' AS raw`); err != nil { + t.Fatal(err) + } + + var v2, q2 Var2 + if err = db.Get(&v2, `SELECT '{"a": "b"}' AS raw`); err != nil { + t.Fatal(err) + } + if err = db.Get(&q2, `SELECT 'null' AS raw`); err != nil { + t.Fatal(err) + } + + var v3, q3 Var3 + if err = db.QueryRow(`SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil { + t.Fatal(err) + } + if err = db.QueryRow(`SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil { + t.Fatal(err) + } + t.Fail() + }) +} + +func TestIn(t *testing.T) { + // some quite normal situations + type tr struct { + q string + args []interface{} + c int + } + tests := []tr{ + {"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?", + []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}, + 7}, + {"SELECT * FROM foo WHERE x in (?)", + []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}}, + 8}, + {"SELECT * FROM foo WHERE x = ? AND y in (?)", + []interface{}{[]byte("foo"), []int{0, 5, 3}}, + 4}, + } + for _, test := range tests { + q, a, err := In(test.q, test.args...) + if err != nil { + t.Error(err) + } + if len(a) != test.c { + t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a) + } + if strings.Count(q, "?") != test.c { + t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?")) + } + } + + // too many bindVars, but no slices, so short circuits parsing + // i'm not sure if this is the right behavior; this query/arg combo + // might not work, but we shouldn't parse if we don't need to + { + orig := "SELECT * FROM foo WHERE x = ? AND y = ?" + q, a, err := In(orig, "foo", "bar", "baz") + if err != nil { + t.Error(err) + } + if len(a) != 3 { + t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a) + } + if q != orig { + t.Error("Expected unchanged query.") + } + } + + tests = []tr{ + // too many bindvars; slice present so should return error during parse + {"SELECT * FROM foo WHERE x = ? and y = ?", + []interface{}{"foo", []int{1, 2, 3}, "bar"}, + 0}, + // empty slice, should return error before parse + {"SELECT * FROM foo WHERE x = ?", + []interface{}{[]int{}}, + 0}, + // too *few* bindvars, should return an error + {"SELECT * FROM foo WHERE x = ? AND y in (?)", + []interface{}{[]int{1, 2, 3}}, + 0}, + } + for _, test := range tests { + _, _, err := In(test.q, test.args...) + if err == nil { + t.Error("Expected an error, but got nil.") + } + } + RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) { + loadDefaultFixture(db, t) + //tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") + //tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") + //tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") + telcodes := []int{852, 65} + q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode" + query, args, err := In(q, telcodes) + if err != nil { + t.Error(err) + } + query = db.Rebind(query) + places := []Place{} + err = db.Select(&places, query, args...) + if err != nil { + t.Error(err) + } + if len(places) != 2 { + t.Fatalf("Expecting 2 results, got %d", len(places)) + } + if places[0].TelCode != 65 { + t.Errorf("Expecting singapore first, but got %#v", places[0]) + } + if places[1].TelCode != 852 { + t.Errorf("Expecting hong kong second, but got %#v", places[1]) + } + }) +} + +func TestBindStruct(t *testing.T) { + var err error + + q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` + + type tt struct { + Name string + Age int + First string + Last string + } + + type tt2 struct { + Field1 string `db:"field_1"` + Field2 string `db:"field_2"` + } + + type tt3 struct { + tt2 + Name string + } + + am := tt{"Jason Moiron", 30, "Jason", "Moiron"} + + bq, args, _ := bindStruct(QUESTION, q1, am, mapper()) + expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)` + if bq != expect { + t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) + } + + if args[0].(string) != "Jason Moiron" { + t.Errorf("Expected `Jason Moiron`, got %v\n", args[0]) + } + + if args[1].(int) != 30 { + t.Errorf("Expected 30, got %v\n", args[1]) + } + + if args[2].(string) != "Jason" { + t.Errorf("Expected Jason, got %v\n", args[2]) + } + + if args[3].(string) != "Moiron" { + t.Errorf("Expected Moiron, got %v\n", args[3]) + } + + am2 := tt2{"Hello", "World"} + bq, args, _ = bindStruct(QUESTION, "INSERT INTO foo (a, b) VALUES (:field_2, :field_1)", am2, mapper()) + expect = `INSERT INTO foo (a, b) VALUES (?, ?)` + if bq != expect { + t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) + } + + if args[0].(string) != "World" { + t.Errorf("Expected 'World', got %s\n", args[0].(string)) + } + if args[1].(string) != "Hello" { + t.Errorf("Expected 'Hello', got %s\n", args[1].(string)) + } + + am3 := tt3{Name: "Hello!"} + am3.Field1 = "Hello" + am3.Field2 = "World" + + bq, args, err = bindStruct(QUESTION, "INSERT INTO foo (a, b, c) VALUES (:name, :field_1, :field_2)", am3, mapper()) + + if err != nil { + t.Fatal(err) + } + + expect = `INSERT INTO foo (a, b, c) VALUES (?, ?, ?)` + if bq != expect { + t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) + } + + if args[0].(string) != "Hello!" { + t.Errorf("Expected 'Hello!', got %s\n", args[0].(string)) + } + if args[1].(string) != "Hello" { + t.Errorf("Expected 'Hello', got %s\n", args[1].(string)) + } + if args[2].(string) != "World" { + t.Errorf("Expected 'World', got %s\n", args[0].(string)) + } +} + +func TestEmbeddedLiterals(t *testing.T) { + var schema = Schema{ + create: ` + CREATE TABLE x ( + k text + );`, + drop: `drop table x;`, + } + + RunWithSchema(schema, t, func(db *DB, t *testing.T) { + type t1 struct { + K *string + } + type t2 struct { + Inline struct { + F string + } + K *string + } + + db.MustExec(db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three") + + target := t1{} + err := db.Get(&target, db.Rebind("SELECT * FROM x WHERE k=?"), "one") + if err != nil { + t.Error(err) + } + if *target.K != "one" { + t.Error("Expected target.K to be `one`, got ", target.K) + } + + target2 := t2{} + err = db.Get(&target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one") + if err != nil { + t.Error(err) + } + if *target2.K != "one" { + t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K) + } + }) +} + +func BenchmarkBindStruct(b *testing.B) { + b.StopTimer() + q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` + type t struct { + Name string + Age int + First string + Last string + } + am := t{"Jason Moiron", 30, "Jason", "Moiron"} + b.StartTimer() + for i := 0; i < b.N; i++ { + bindStruct(DOLLAR, q1, am, mapper()) + } +} + +func BenchmarkBindMap(b *testing.B) { + b.StopTimer() + q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` + am := map[string]interface{}{ + "name": "Jason Moiron", + "age": 30, + "first": "Jason", + "last": "Moiron", + } + b.StartTimer() + for i := 0; i < b.N; i++ { + bindMap(DOLLAR, q1, am) + } +} + +func BenchmarkIn(b *testing.B) { + q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` + + for i := 0; i < b.N; i++ { + _, _, _ = In(q, []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}...) + } +} + +func BenchmarkIn1k(b *testing.B) { + q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` + + var vals [1000]interface{} + + for i := 0; i < b.N; i++ { + _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) + } +} + +func BenchmarkIn1kInt(b *testing.B) { + q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` + + var vals [1000]int + + for i := 0; i < b.N; i++ { + _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) + } +} + +func BenchmarkIn1kString(b *testing.B) { + q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` + + var vals [1000]string + + for i := 0; i < b.N; i++ { + _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) + } +} + +func BenchmarkRebind(b *testing.B) { + b.StopTimer() + q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)` + q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` + b.StartTimer() + + for i := 0; i < b.N; i++ { + Rebind(DOLLAR, q1) + Rebind(DOLLAR, q2) + } +} + +func BenchmarkRebindBuffer(b *testing.B) { + b.StopTimer() + q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)` + q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` + b.StartTimer() + + for i := 0; i < b.N; i++ { + rebindBuff(DOLLAR, q1) + rebindBuff(DOLLAR, q2) + } +} diff --git a/vendor/github.com/jmoiron/sqlx/types/README.md b/vendor/github.com/jmoiron/sqlx/types/README.md new file mode 100644 index 0000000..713abe5 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/types/README.md @@ -0,0 +1,5 @@ +# types + +The types package provides some useful types which implement the `sql.Scanner` +and `driver.Valuer` interfaces, suitable for use as scan and value targets with +database/sql. diff --git a/vendor/github.com/jmoiron/sqlx/types/types.go b/vendor/github.com/jmoiron/sqlx/types/types.go new file mode 100644 index 0000000..7b014c1 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/types/types.go @@ -0,0 +1,172 @@ +package types + +import ( + "bytes" + "compress/gzip" + "database/sql/driver" + "encoding/json" + "errors" + + "io/ioutil" +) + +// GzippedText is a []byte which transparently gzips data being submitted to +// a database and ungzips data being Scanned from a database. +type GzippedText []byte + +// Value implements the driver.Valuer interface, gzipping the raw value of +// this GzippedText. +func (g GzippedText) Value() (driver.Value, error) { + b := make([]byte, 0, len(g)) + buf := bytes.NewBuffer(b) + w := gzip.NewWriter(buf) + w.Write(g) + w.Close() + return buf.Bytes(), nil + +} + +// Scan implements the sql.Scanner interface, ungzipping the value coming off +// the wire and storing the raw result in the GzippedText. +func (g *GzippedText) Scan(src interface{}) error { + var source []byte + switch src.(type) { + case string: + source = []byte(src.(string)) + case []byte: + source = src.([]byte) + default: + return errors.New("Incompatible type for GzippedText") + } + reader, err := gzip.NewReader(bytes.NewReader(source)) + if err != nil { + return err + } + defer reader.Close() + b, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + *g = GzippedText(b) + return nil +} + +// JSONText is a json.RawMessage, which is a []byte underneath. +// Value() validates the json format in the source, and returns an error if +// the json is not valid. Scan does no validation. JSONText additionally +// implements `Unmarshal`, which unmarshals the json within to an interface{} +type JSONText json.RawMessage + +var emptyJSON = JSONText("{}") + +// MarshalJSON returns the *j as the JSON encoding of j. +func (j JSONText) MarshalJSON() ([]byte, error) { + if len(j) == 0 { + return emptyJSON, nil + } + return j, nil +} + +// UnmarshalJSON sets *j to a copy of data +func (j *JSONText) UnmarshalJSON(data []byte) error { + if j == nil { + return errors.New("JSONText: UnmarshalJSON on nil pointer") + } + *j = append((*j)[0:0], data...) + return nil +} + +// Value returns j as a value. This does a validating unmarshal into another +// RawMessage. If j is invalid json, it returns an error. +func (j JSONText) Value() (driver.Value, error) { + var m json.RawMessage + var err = j.Unmarshal(&m) + if err != nil { + return []byte{}, err + } + return []byte(j), nil +} + +// Scan stores the src in *j. No validation is done. +func (j *JSONText) Scan(src interface{}) error { + var source []byte + switch t := src.(type) { + case string: + source = []byte(t) + case []byte: + if len(t) == 0 { + source = emptyJSON + } else { + source = t + } + case nil: + *j = emptyJSON + default: + return errors.New("Incompatible type for JSONText") + } + *j = JSONText(append((*j)[0:0], source...)) + return nil +} + +// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal. +func (j *JSONText) Unmarshal(v interface{}) error { + if len(*j) == 0 { + *j = emptyJSON + } + return json.Unmarshal([]byte(*j), v) +} + +// String supports pretty printing for JSONText types. +func (j JSONText) String() string { + return string(j) +} + +// NullJSONText represents a JSONText that may be null. +// NullJSONText implements the scanner interface so +// it can be used as a scan destination, similar to NullString. +type NullJSONText struct { + JSONText + Valid bool // Valid is true if JSONText is not NULL +} + +// Scan implements the Scanner interface. +func (n *NullJSONText) Scan(value interface{}) error { + if value == nil { + n.JSONText, n.Valid = emptyJSON, false + return nil + } + n.Valid = true + return n.JSONText.Scan(value) +} + +// Value implements the driver Valuer interface. +func (n NullJSONText) Value() (driver.Value, error) { + if !n.Valid { + return nil, nil + } + return n.JSONText.Value() +} + +// BitBool is an implementation of a bool for the MySQL type BIT(1). +// This type allows you to avoid wasting an entire byte for MySQL's boolean type TINYINT. +type BitBool bool + +// Value implements the driver.Valuer interface, +// and turns the BitBool into a bitfield (BIT(1)) for MySQL storage. +func (b BitBool) Value() (driver.Value, error) { + if b { + return []byte{1}, nil + } + return []byte{0}, nil +} + +// Scan implements the sql.Scanner interface, +// and turns the bitfield incoming from MySQL into a BitBool +func (b *BitBool) Scan(src interface{}) error { + v, ok := src.([]byte) + if !ok { + return errors.New("bad []byte type assertion") + } + *b = v[0] == 1 + return nil +} diff --git a/vendor/github.com/jmoiron/sqlx/types/types_test.go b/vendor/github.com/jmoiron/sqlx/types/types_test.go new file mode 100644 index 0000000..29813d1 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/types/types_test.go @@ -0,0 +1,127 @@ +package types + +import "testing" + +func TestGzipText(t *testing.T) { + g := GzippedText("Hello, world") + v, err := g.Value() + if err != nil { + t.Errorf("Was not expecting an error") + } + err = (&g).Scan(v) + if err != nil { + t.Errorf("Was not expecting an error") + } + if string(g) != "Hello, world" { + t.Errorf("Was expecting the string we sent in (Hello World), got %s", string(g)) + } +} + +func TestJSONText(t *testing.T) { + j := JSONText(`{"foo": 1, "bar": 2}`) + v, err := j.Value() + if err != nil { + t.Errorf("Was not expecting an error") + } + err = (&j).Scan(v) + if err != nil { + t.Errorf("Was not expecting an error") + } + m := map[string]interface{}{} + j.Unmarshal(&m) + + if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 { + t.Errorf("Expected valid json but got some garbage instead? %#v", m) + } + + j = JSONText(`{"foo": 1, invalid, false}`) + v, err = j.Value() + if err == nil { + t.Errorf("Was expecting invalid json to fail!") + } + + j = JSONText("") + v, err = j.Value() + if err != nil { + t.Errorf("Was not expecting an error") + } + + err = (&j).Scan(v) + if err != nil { + t.Errorf("Was not expecting an error") + } + + j = JSONText(nil) + v, err = j.Value() + if err != nil { + t.Errorf("Was not expecting an error") + } + + err = (&j).Scan(v) + if err != nil { + t.Errorf("Was not expecting an error") + } +} + +func TestNullJSONText(t *testing.T) { + j := NullJSONText{} + err := j.Scan(`{"foo": 1, "bar": 2}`) + if err != nil { + t.Errorf("Was not expecting an error") + } + v, err := j.Value() + if err != nil { + t.Errorf("Was not expecting an error") + } + err = (&j).Scan(v) + if err != nil { + t.Errorf("Was not expecting an error") + } + m := map[string]interface{}{} + j.Unmarshal(&m) + + if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 { + t.Errorf("Expected valid json but got some garbage instead? %#v", m) + } + + j = NullJSONText{} + err = j.Scan(nil) + if err != nil { + t.Errorf("Was not expecting an error") + } + if j.Valid != false { + t.Errorf("Expected valid to be false, but got true") + } +} + +func TestBitBool(t *testing.T) { + // Test true value + var b BitBool = true + + v, err := b.Value() + if err != nil { + t.Errorf("Cannot return error") + } + err = (&b).Scan(v) + if err != nil { + t.Errorf("Was not expecting an error") + } + if !b { + t.Errorf("Was expecting the bool we sent in (true), got %v", b) + } + + // Test false value + b = false + + v, err = b.Value() + if err != nil { + t.Errorf("Cannot return error") + } + err = (&b).Scan(v) + if err != nil { + t.Errorf("Was not expecting an error") + } + if b { + t.Errorf("Was expecting the bool we sent in (false), got %v", b) + } +} diff --git a/vendor/github.com/kelseyhightower/envconfig/.travis.yml b/vendor/github.com/kelseyhightower/envconfig/.travis.yml new file mode 100644 index 0000000..e15301a --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip diff --git a/vendor/github.com/kelseyhightower/envconfig/LICENSE b/vendor/github.com/kelseyhightower/envconfig/LICENSE new file mode 100644 index 0000000..4bfa7a8 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013 Kelsey Hightower + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS b/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS new file mode 100644 index 0000000..6527a9f --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS @@ -0,0 +1,2 @@ +Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower +Travis Parker travis.parker@gmail.com github.com/teepark diff --git a/vendor/github.com/kelseyhightower/envconfig/README.md b/vendor/github.com/kelseyhightower/envconfig/README.md new file mode 100644 index 0000000..b6c65a8 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/README.md @@ -0,0 +1,188 @@ +# envconfig + +[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig) + +```Go +import "github.com/kelseyhightower/envconfig" +``` + +## Documentation + +See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig) + +## Usage + +Set some environment variables: + +```Bash +export MYAPP_DEBUG=false +export MYAPP_PORT=8080 +export MYAPP_USER=Kelsey +export MYAPP_RATE="0.5" +export MYAPP_TIMEOUT="3m" +export MYAPP_USERS="rob,ken,robert" +export MYAPP_COLORCODES="red:1,green:2,blue:3" +``` + +Write some code: + +```Go +package main + +import ( + "fmt" + "log" + "time" + + "github.com/kelseyhightower/envconfig" +) + +type Specification struct { + Debug bool + Port int + User string + Users []string + Rate float32 + Timeout time.Duration + ColorCodes map[string]int +} + +func main() { + var s Specification + err := envconfig.Process("myapp", &s) + if err != nil { + log.Fatal(err.Error()) + } + format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n" + _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate) + if err != nil { + log.Fatal(err.Error()) + } + + fmt.Println("Users:") + for _, u := range s.Users { + fmt.Printf(" %s\n", u) + } + + fmt.Println("Color codes:") + for k, v := range s.ColorCodes { + fmt.Printf(" %s: %d\n", k, v) + } +} +``` + +Results: + +```Bash +Debug: false +Port: 8080 +User: Kelsey +Rate: 0.500000 +Timeout: 3m0s +Users: + rob + ken + robert +Color codes: + red: 1 + green: 2 + blue: 3 +``` + +## Struct Tag Support + +Envconfig supports the use of struct tags to specify alternate, default, and required +environment variables. + +For example, consider the following struct: + +```Go +type Specification struct { + ManualOverride1 string `envconfig:"manual_override_1"` + DefaultVar string `default:"foobar"` + RequiredVar string `required:"true"` + IgnoredVar string `ignored:"true"` + AutoSplitVar string `split_words:"true"` +} +``` + +Envconfig has automatic support for CamelCased struct elements when the +`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above +would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the +setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers +will get globbed into the previous word. If the setting does not do the +right thing, you may use a manual override. + +Envconfig will process value for `ManualOverride1` by populating it with the +value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have +instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag +it would have looked up `MYAPP_MANUAL_OVERRIDE1`. + +```Bash +export MYAPP_MANUAL_OVERRIDE_1="this will be the value" + +# export MYAPP_MANUALOVERRIDE1="and this will not" +``` + +If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`, +it will populate it with "foobar" as a default value. + +If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`, +it will return an error when asked to process the struct. + +If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there +is a struct tag defined, it will try to populate your variable with an environment +variable that directly matches the envconfig tag in your struct definition: + +```shell +export SERVICE_HOST=127.0.0.1 +export MYAPP_DEBUG=true +``` +```Go +type Specification struct { + ServiceHost string `envconfig:"SERVICE_HOST"` + Debug bool +} +``` + +Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding +environment variable is set. + +## Supported Struct Field Types + +envconfig supports supports these struct field types: + + * string + * int8, int16, int32, int64 + * bool + * float32, float64 + * slices of any supported type + * maps (keys and values of any supported type) + * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler) + +Embedded structs using these fields are also supported. + +## Custom Decoders + +Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can +control its own deserialization: + +```Bash +export DNS_SERVER=8.8.8.8 +``` + +```Go +type IPDecoder net.IP + +func (ipd *IPDecoder) Decode(value string) error { + *ipd = IPDecoder(net.ParseIP(value)) + return nil +} + +type DNSConfig struct { + Address IPDecoder `envconfig:"DNS_SERVER"` +} +``` + +Also, envconfig will use a `Set(string) error` method like from the +[flag.Value](https://godoc.org/flag#Value) interface if implemented. diff --git a/vendor/github.com/kelseyhightower/envconfig/doc.go b/vendor/github.com/kelseyhightower/envconfig/doc.go new file mode 100644 index 0000000..f28561c --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) 2013 Kelsey Hightower. All rights reserved. +// Use of this source code is governed by the MIT License that can be found in +// the LICENSE file. + +// Package envconfig implements decoding of environment variables based on a user +// defined specification. A typical use is using environment variables for +// configuration settings. +package envconfig diff --git a/vendor/github.com/kelseyhightower/envconfig/env_os.go b/vendor/github.com/kelseyhightower/envconfig/env_os.go new file mode 100644 index 0000000..a6a014a --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/env_os.go @@ -0,0 +1,7 @@ +// +build appengine + +package envconfig + +import "os" + +var lookupEnv = os.LookupEnv diff --git a/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/vendor/github.com/kelseyhightower/envconfig/env_syscall.go new file mode 100644 index 0000000..9d98085 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/env_syscall.go @@ -0,0 +1,7 @@ +// +build !appengine + +package envconfig + +import "syscall" + +var lookupEnv = syscall.Getenv diff --git a/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/vendor/github.com/kelseyhightower/envconfig/envconfig.go new file mode 100644 index 0000000..892d746 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/envconfig.go @@ -0,0 +1,319 @@ +// Copyright (c) 2013 Kelsey Hightower. All rights reserved. +// Use of this source code is governed by the MIT License that can be found in +// the LICENSE file. + +package envconfig + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +// ErrInvalidSpecification indicates that a specification is of the wrong type. +var ErrInvalidSpecification = errors.New("specification must be a struct pointer") + +// A ParseError occurs when an environment variable cannot be converted to +// the type required by a struct field during assignment. +type ParseError struct { + KeyName string + FieldName string + TypeName string + Value string + Err error +} + +// Decoder has the same semantics as Setter, but takes higher precedence. +// It is provided for historical compatibility. +type Decoder interface { + Decode(value string) error +} + +// Setter is implemented by types can self-deserialize values. +// Any type that implements flag.Value also implements Setter. +type Setter interface { + Set(value string) error +} + +func (e *ParseError) Error() string { + return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err) +} + +// varInfo maintains information about the configuration variable +type varInfo struct { + Name string + Alt string + Key string + Field reflect.Value + Tags reflect.StructTag +} + +// GatherInfo gathers information about the specified struct +func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) { + expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)") + s := reflect.ValueOf(spec) + + if s.Kind() != reflect.Ptr { + return nil, ErrInvalidSpecification + } + s = s.Elem() + if s.Kind() != reflect.Struct { + return nil, ErrInvalidSpecification + } + typeOfSpec := s.Type() + + // over allocate an info array, we will extend if needed later + infos := make([]varInfo, 0, s.NumField()) + for i := 0; i < s.NumField(); i++ { + f := s.Field(i) + ftype := typeOfSpec.Field(i) + if !f.CanSet() || ftype.Tag.Get("ignored") == "true" { + continue + } + + for f.Kind() == reflect.Ptr { + if f.IsNil() { + if f.Type().Elem().Kind() != reflect.Struct { + // nil pointer to a non-struct: leave it alone + break + } + // nil pointer to struct: create a zero instance + f.Set(reflect.New(f.Type().Elem())) + } + f = f.Elem() + } + + // Capture information about the config variable + info := varInfo{ + Name: ftype.Name, + Field: f, + Tags: ftype.Tag, + Alt: strings.ToUpper(ftype.Tag.Get("envconfig")), + } + + // Default to the field name as the env var name (will be upcased) + info.Key = info.Name + + // Best effort to un-pick camel casing as separate words + if ftype.Tag.Get("split_words") == "true" { + words := expr.FindAllStringSubmatch(ftype.Name, -1) + if len(words) > 0 { + var name []string + for _, words := range words { + name = append(name, words[0]) + } + + info.Key = strings.Join(name, "_") + } + } + if info.Alt != "" { + info.Key = info.Alt + } + if prefix != "" { + info.Key = fmt.Sprintf("%s_%s", prefix, info.Key) + } + info.Key = strings.ToUpper(info.Key) + infos = append(infos, info) + + if f.Kind() == reflect.Struct { + // honor Decode if present + if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil { + innerPrefix := prefix + if !ftype.Anonymous { + innerPrefix = info.Key + } + + embeddedPtr := f.Addr().Interface() + embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr) + if err != nil { + return nil, err + } + infos = append(infos[:len(infos)-1], embeddedInfos...) + + continue + } + } + } + return infos, nil +} + +// Process populates the specified struct based on environment variables +func Process(prefix string, spec interface{}) error { + infos, err := gatherInfo(prefix, spec) + + for _, info := range infos { + + // `os.Getenv` cannot differentiate between an explicitly set empty value + // and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`, + // but it is only available in go1.5 or newer. We're using Go build tags + // here to use os.LookupEnv for >=go1.5 + value, ok := lookupEnv(info.Key) + if !ok && info.Alt != "" { + value, ok = lookupEnv(info.Alt) + } + + def := info.Tags.Get("default") + if def != "" && !ok { + value = def + } + + req := info.Tags.Get("required") + if !ok && def == "" { + if req == "true" { + return fmt.Errorf("required key %s missing value", info.Key) + } + continue + } + + err := processField(value, info.Field) + if err != nil { + return &ParseError{ + KeyName: info.Key, + FieldName: info.Name, + TypeName: info.Field.Type().String(), + Value: value, + Err: err, + } + } + } + + return err +} + +// MustProcess is the same as Process but panics if an error occurs +func MustProcess(prefix string, spec interface{}) { + if err := Process(prefix, spec); err != nil { + panic(err) + } +} + +func processField(value string, field reflect.Value) error { + typ := field.Type() + + decoder := decoderFrom(field) + if decoder != nil { + return decoder.Decode(value) + } + // look for Set method if Decode not defined + setter := setterFrom(field) + if setter != nil { + return setter.Set(value) + } + + if t := textUnmarshaler(field); t != nil { + return t.UnmarshalText([]byte(value)) + } + + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + if field.IsNil() { + field.Set(reflect.New(typ)) + } + field = field.Elem() + } + + switch typ.Kind() { + case reflect.String: + field.SetString(value) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var ( + val int64 + err error + ) + if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" { + var d time.Duration + d, err = time.ParseDuration(value) + val = int64(d) + } else { + val, err = strconv.ParseInt(value, 0, typ.Bits()) + } + if err != nil { + return err + } + + field.SetInt(val) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val, err := strconv.ParseUint(value, 0, typ.Bits()) + if err != nil { + return err + } + field.SetUint(val) + case reflect.Bool: + val, err := strconv.ParseBool(value) + if err != nil { + return err + } + field.SetBool(val) + case reflect.Float32, reflect.Float64: + val, err := strconv.ParseFloat(value, typ.Bits()) + if err != nil { + return err + } + field.SetFloat(val) + case reflect.Slice: + vals := strings.Split(value, ",") + sl := reflect.MakeSlice(typ, len(vals), len(vals)) + for i, val := range vals { + err := processField(val, sl.Index(i)) + if err != nil { + return err + } + } + field.Set(sl) + case reflect.Map: + pairs := strings.Split(value, ",") + mp := reflect.MakeMap(typ) + for _, pair := range pairs { + kvpair := strings.Split(pair, ":") + if len(kvpair) != 2 { + return fmt.Errorf("invalid map item: %q", pair) + } + k := reflect.New(typ.Key()).Elem() + err := processField(kvpair[0], k) + if err != nil { + return err + } + v := reflect.New(typ.Elem()).Elem() + err = processField(kvpair[1], v) + if err != nil { + return err + } + mp.SetMapIndex(k, v) + } + field.Set(mp) + } + + return nil +} + +func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) { + // it may be impossible for a struct field to fail this check + if !field.CanInterface() { + return + } + var ok bool + fn(field.Interface(), &ok) + if !ok && field.CanAddr() { + fn(field.Addr().Interface(), &ok) + } +} + +func decoderFrom(field reflect.Value) (d Decoder) { + interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) }) + return d +} + +func setterFrom(field reflect.Value) (s Setter) { + interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) }) + return s +} + +func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) { + interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) }) + return t +} diff --git a/vendor/github.com/kelseyhightower/envconfig/envconfig_test.go b/vendor/github.com/kelseyhightower/envconfig/envconfig_test.go new file mode 100644 index 0000000..e754058 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/envconfig_test.go @@ -0,0 +1,688 @@ +// Copyright (c) 2013 Kelsey Hightower. All rights reserved. +// Use of this source code is governed by the MIT License that can be found in +// the LICENSE file. + +package envconfig + +import ( + "flag" + "fmt" + "os" + "testing" + "time" +) + +type HonorDecodeInStruct struct { + Value string +} + +func (h *HonorDecodeInStruct) Decode(env string) error { + h.Value = "decoded" + return nil +} + +type Specification struct { + Embedded `desc:"can we document a struct"` + EmbeddedButIgnored `ignored:"true"` + Debug bool + Port int + Rate float32 + User string + TTL uint32 + Timeout time.Duration + AdminUsers []string + MagicNumbers []int + ColorCodes map[string]int + MultiWordVar string + MultiWordVarWithAutoSplit uint32 `split_words:"true"` + SomePointer *string + SomePointerWithDefault *string `default:"foo2baz" desc:"foorbar is the word"` + MultiWordVarWithAlt string `envconfig:"MULTI_WORD_VAR_WITH_ALT" desc:"what alt"` + MultiWordVarWithLowerCaseAlt string `envconfig:"multi_word_var_with_lower_case_alt"` + NoPrefixWithAlt string `envconfig:"SERVICE_HOST"` + DefaultVar string `default:"foobar"` + RequiredVar string `required:"true"` + NoPrefixDefault string `envconfig:"BROKER" default:"127.0.0.1"` + RequiredDefault string `required:"true" default:"foo2bar"` + Ignored string `ignored:"true"` + NestedSpecification struct { + Property string `envconfig:"inner"` + PropertyWithDefault string `default:"fuzzybydefault"` + } `envconfig:"outer"` + AfterNested string + DecodeStruct HonorDecodeInStruct `envconfig:"honor"` + Datetime time.Time +} + +type Embedded struct { + Enabled bool `desc:"some embedded value"` + EmbeddedPort int + MultiWordVar string + MultiWordVarWithAlt string `envconfig:"MULTI_WITH_DIFFERENT_ALT"` + EmbeddedAlt string `envconfig:"EMBEDDED_WITH_ALT"` + EmbeddedIgnored string `ignored:"true"` +} + +type EmbeddedButIgnored struct { + FirstEmbeddedButIgnored string + SecondEmbeddedButIgnored string +} + +func TestProcess(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_DEBUG", "true") + os.Setenv("ENV_CONFIG_PORT", "8080") + os.Setenv("ENV_CONFIG_RATE", "0.5") + os.Setenv("ENV_CONFIG_USER", "Kelsey") + os.Setenv("ENV_CONFIG_TIMEOUT", "2m") + os.Setenv("ENV_CONFIG_ADMINUSERS", "John,Adam,Will") + os.Setenv("ENV_CONFIG_MAGICNUMBERS", "5,10,20") + os.Setenv("ENV_CONFIG_COLORCODES", "red:1,green:2,blue:3") + os.Setenv("SERVICE_HOST", "127.0.0.1") + os.Setenv("ENV_CONFIG_TTL", "30") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + os.Setenv("ENV_CONFIG_IGNORED", "was-not-ignored") + os.Setenv("ENV_CONFIG_OUTER_INNER", "iamnested") + os.Setenv("ENV_CONFIG_AFTERNESTED", "after") + os.Setenv("ENV_CONFIG_HONOR", "honor") + os.Setenv("ENV_CONFIG_DATETIME", "2016-08-16T18:57:05Z") + os.Setenv("ENV_CONFIG_MULTI_WORD_VAR_WITH_AUTO_SPLIT", "24") + err := Process("env_config", &s) + if err != nil { + t.Error(err.Error()) + } + if s.NoPrefixWithAlt != "127.0.0.1" { + t.Errorf("expected %v, got %v", "127.0.0.1", s.NoPrefixWithAlt) + } + if !s.Debug { + t.Errorf("expected %v, got %v", true, s.Debug) + } + if s.Port != 8080 { + t.Errorf("expected %d, got %v", 8080, s.Port) + } + if s.Rate != 0.5 { + t.Errorf("expected %f, got %v", 0.5, s.Rate) + } + if s.TTL != 30 { + t.Errorf("expected %d, got %v", 30, s.TTL) + } + if s.User != "Kelsey" { + t.Errorf("expected %s, got %s", "Kelsey", s.User) + } + if s.Timeout != 2*time.Minute { + t.Errorf("expected %s, got %s", 2*time.Minute, s.Timeout) + } + if s.RequiredVar != "foo" { + t.Errorf("expected %s, got %s", "foo", s.RequiredVar) + } + if len(s.AdminUsers) != 3 || + s.AdminUsers[0] != "John" || + s.AdminUsers[1] != "Adam" || + s.AdminUsers[2] != "Will" { + t.Errorf("expected %#v, got %#v", []string{"John", "Adam", "Will"}, s.AdminUsers) + } + if len(s.MagicNumbers) != 3 || + s.MagicNumbers[0] != 5 || + s.MagicNumbers[1] != 10 || + s.MagicNumbers[2] != 20 { + t.Errorf("expected %#v, got %#v", []int{5, 10, 20}, s.MagicNumbers) + } + if s.Ignored != "" { + t.Errorf("expected empty string, got %#v", s.Ignored) + } + + if len(s.ColorCodes) != 3 || + s.ColorCodes["red"] != 1 || + s.ColorCodes["green"] != 2 || + s.ColorCodes["blue"] != 3 { + t.Errorf( + "expected %#v, got %#v", + map[string]int{ + "red": 1, + "green": 2, + "blue": 3, + }, + s.ColorCodes, + ) + } + + if s.NestedSpecification.Property != "iamnested" { + t.Errorf("expected '%s' string, got %#v", "iamnested", s.NestedSpecification.Property) + } + + if s.NestedSpecification.PropertyWithDefault != "fuzzybydefault" { + t.Errorf("expected default '%s' string, got %#v", "fuzzybydefault", s.NestedSpecification.PropertyWithDefault) + } + + if s.AfterNested != "after" { + t.Errorf("expected default '%s' string, got %#v", "after", s.AfterNested) + } + + if s.DecodeStruct.Value != "decoded" { + t.Errorf("expected default '%s' string, got %#v", "decoded", s.DecodeStruct.Value) + } + + if expected := time.Date(2016, 8, 16, 18, 57, 05, 0, time.UTC); !s.Datetime.Equal(expected) { + t.Errorf("expected %s, got %s", expected.Format(time.RFC3339), s.Datetime.Format(time.RFC3339)) + } + + if s.MultiWordVarWithAutoSplit != 24 { + t.Errorf("expected %q, got %q", 24, s.MultiWordVarWithAutoSplit) + } +} + +func TestParseErrorBool(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_DEBUG", "string") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + err := Process("env_config", &s) + v, ok := err.(*ParseError) + if !ok { + t.Errorf("expected ParseError, got %v", v) + } + if v.FieldName != "Debug" { + t.Errorf("expected %s, got %v", "Debug", v.FieldName) + } + if s.Debug != false { + t.Errorf("expected %v, got %v", false, s.Debug) + } +} + +func TestParseErrorFloat32(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_RATE", "string") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + err := Process("env_config", &s) + v, ok := err.(*ParseError) + if !ok { + t.Errorf("expected ParseError, got %v", v) + } + if v.FieldName != "Rate" { + t.Errorf("expected %s, got %v", "Rate", v.FieldName) + } + if s.Rate != 0 { + t.Errorf("expected %v, got %v", 0, s.Rate) + } +} + +func TestParseErrorInt(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_PORT", "string") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + err := Process("env_config", &s) + v, ok := err.(*ParseError) + if !ok { + t.Errorf("expected ParseError, got %v", v) + } + if v.FieldName != "Port" { + t.Errorf("expected %s, got %v", "Port", v.FieldName) + } + if s.Port != 0 { + t.Errorf("expected %v, got %v", 0, s.Port) + } +} + +func TestParseErrorUint(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_TTL", "-30") + err := Process("env_config", &s) + v, ok := err.(*ParseError) + if !ok { + t.Errorf("expected ParseError, got %v", v) + } + if v.FieldName != "TTL" { + t.Errorf("expected %s, got %v", "TTL", v.FieldName) + } + if s.TTL != 0 { + t.Errorf("expected %v, got %v", 0, s.TTL) + } +} + +func TestParseErrorSplitWords(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_MULTI_WORD_VAR_WITH_AUTO_SPLIT", "shakespeare") + err := Process("env_config", &s) + v, ok := err.(*ParseError) + if !ok { + t.Errorf("expected ParseError, got %v", v) + } + if v.FieldName != "MultiWordVarWithAutoSplit" { + t.Errorf("expected %s, got %v", "", v.FieldName) + } + if s.MultiWordVarWithAutoSplit != 0 { + t.Errorf("expected %v, got %v", 0, s.MultiWordVarWithAutoSplit) + } +} + +func TestErrInvalidSpecification(t *testing.T) { + m := make(map[string]string) + err := Process("env_config", &m) + if err != ErrInvalidSpecification { + t.Errorf("expected %v, got %v", ErrInvalidSpecification, err) + } +} + +func TestUnsetVars(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("USER", "foo") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + // If the var is not defined the non-prefixed version should not be used + // unless the struct tag says so + if s.User != "" { + t.Errorf("expected %q, got %q", "", s.User) + } +} + +func TestAlternateVarNames(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_MULTI_WORD_VAR", "foo") + os.Setenv("ENV_CONFIG_MULTI_WORD_VAR_WITH_ALT", "bar") + os.Setenv("ENV_CONFIG_MULTI_WORD_VAR_WITH_LOWER_CASE_ALT", "baz") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + // Setting the alt version of the var in the environment has no effect if + // the struct tag is not supplied + if s.MultiWordVar != "" { + t.Errorf("expected %q, got %q", "", s.MultiWordVar) + } + + // Setting the alt version of the var in the environment correctly sets + // the value if the struct tag IS supplied + if s.MultiWordVarWithAlt != "bar" { + t.Errorf("expected %q, got %q", "bar", s.MultiWordVarWithAlt) + } + + // Alt value is not case sensitive and is treated as all uppercase + if s.MultiWordVarWithLowerCaseAlt != "baz" { + t.Errorf("expected %q, got %q", "baz", s.MultiWordVarWithLowerCaseAlt) + } +} + +func TestRequiredVar(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foobar") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if s.RequiredVar != "foobar" { + t.Errorf("expected %s, got %s", "foobar", s.RequiredVar) + } +} + +func TestBlankDefaultVar(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "requiredvalue") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if s.DefaultVar != "foobar" { + t.Errorf("expected %s, got %s", "foobar", s.DefaultVar) + } + + if *s.SomePointerWithDefault != "foo2baz" { + t.Errorf("expected %s, got %s", "foo2baz", *s.SomePointerWithDefault) + } +} + +func TestNonBlankDefaultVar(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_DEFAULTVAR", "nondefaultval") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "requiredvalue") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if s.DefaultVar != "nondefaultval" { + t.Errorf("expected %s, got %s", "nondefaultval", s.DefaultVar) + } +} + +func TestExplicitBlankDefaultVar(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_DEFAULTVAR", "") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "") + + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if s.DefaultVar != "" { + t.Errorf("expected %s, got %s", "\"\"", s.DefaultVar) + } +} + +func TestAlternateNameDefaultVar(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("BROKER", "betterbroker") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if s.NoPrefixDefault != "betterbroker" { + t.Errorf("expected %q, got %q", "betterbroker", s.NoPrefixDefault) + } + + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if s.NoPrefixDefault != "127.0.0.1" { + t.Errorf("expected %q, got %q", "127.0.0.1", s.NoPrefixDefault) + } +} + +func TestRequiredDefault(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if s.RequiredDefault != "foo2bar" { + t.Errorf("expected %q, got %q", "foo2bar", s.RequiredDefault) + } +} + +func TestPointerFieldBlank(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if s.SomePointer != nil { + t.Errorf("expected , got %q", *s.SomePointer) + } +} + +func TestMustProcess(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_DEBUG", "true") + os.Setenv("ENV_CONFIG_PORT", "8080") + os.Setenv("ENV_CONFIG_RATE", "0.5") + os.Setenv("ENV_CONFIG_USER", "Kelsey") + os.Setenv("SERVICE_HOST", "127.0.0.1") + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + MustProcess("env_config", &s) + + defer func() { + if err := recover(); err != nil { + return + } + + t.Error("expected panic") + }() + m := make(map[string]string) + MustProcess("env_config", &m) +} + +func TestEmbeddedStruct(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "required") + os.Setenv("ENV_CONFIG_ENABLED", "true") + os.Setenv("ENV_CONFIG_EMBEDDEDPORT", "1234") + os.Setenv("ENV_CONFIG_MULTIWORDVAR", "foo") + os.Setenv("ENV_CONFIG_MULTI_WORD_VAR_WITH_ALT", "bar") + os.Setenv("ENV_CONFIG_MULTI_WITH_DIFFERENT_ALT", "baz") + os.Setenv("ENV_CONFIG_EMBEDDED_WITH_ALT", "foobar") + os.Setenv("ENV_CONFIG_SOMEPOINTER", "foobaz") + os.Setenv("ENV_CONFIG_EMBEDDED_IGNORED", "was-not-ignored") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + if !s.Enabled { + t.Errorf("expected %v, got %v", true, s.Enabled) + } + if s.EmbeddedPort != 1234 { + t.Errorf("expected %d, got %v", 1234, s.EmbeddedPort) + } + if s.MultiWordVar != "foo" { + t.Errorf("expected %s, got %s", "foo", s.MultiWordVar) + } + if s.Embedded.MultiWordVar != "foo" { + t.Errorf("expected %s, got %s", "foo", s.Embedded.MultiWordVar) + } + if s.MultiWordVarWithAlt != "bar" { + t.Errorf("expected %s, got %s", "bar", s.MultiWordVarWithAlt) + } + if s.Embedded.MultiWordVarWithAlt != "baz" { + t.Errorf("expected %s, got %s", "baz", s.Embedded.MultiWordVarWithAlt) + } + if s.EmbeddedAlt != "foobar" { + t.Errorf("expected %s, got %s", "foobar", s.EmbeddedAlt) + } + if *s.SomePointer != "foobaz" { + t.Errorf("expected %s, got %s", "foobaz", *s.SomePointer) + } + if s.EmbeddedIgnored != "" { + t.Errorf("expected empty string, got %#v", s.Ignored) + } +} + +func TestEmbeddedButIgnoredStruct(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "required") + os.Setenv("ENV_CONFIG_FIRSTEMBEDDEDBUTIGNORED", "was-not-ignored") + os.Setenv("ENV_CONFIG_SECONDEMBEDDEDBUTIGNORED", "was-not-ignored") + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + if s.FirstEmbeddedButIgnored != "" { + t.Errorf("expected empty string, got %#v", s.Ignored) + } + if s.SecondEmbeddedButIgnored != "" { + t.Errorf("expected empty string, got %#v", s.Ignored) + } +} + +func TestNonPointerFailsProperly(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "snap") + + err := Process("env_config", s) + if err != ErrInvalidSpecification { + t.Errorf("non-pointer should fail with ErrInvalidSpecification, was instead %s", err) + } +} + +func TestCustomValueFields(t *testing.T) { + var s struct { + Foo string + Bar bracketed + Baz quoted + Struct setterStruct + } + + // Set would panic when the receiver is nil, + // so make sure it has an initial value to replace. + s.Baz = quoted{new(bracketed)} + + os.Clearenv() + os.Setenv("ENV_CONFIG_FOO", "foo") + os.Setenv("ENV_CONFIG_BAR", "bar") + os.Setenv("ENV_CONFIG_BAZ", "baz") + os.Setenv("ENV_CONFIG_STRUCT", "inner") + + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if want := "foo"; s.Foo != want { + t.Errorf("foo: got %#q, want %#q", s.Foo, want) + } + + if want := "[bar]"; s.Bar.String() != want { + t.Errorf("bar: got %#q, want %#q", s.Bar, want) + } + + if want := `["baz"]`; s.Baz.String() != want { + t.Errorf(`baz: got %#q, want %#q`, s.Baz, want) + } + + if want := `setterstruct{"inner"}`; s.Struct.Inner != want { + t.Errorf(`Struct.Inner: got %#q, want %#q`, s.Struct.Inner, want) + } +} + +func TestCustomPointerFields(t *testing.T) { + var s struct { + Foo string + Bar *bracketed + Baz *quoted + Struct *setterStruct + } + + // Set would panic when the receiver is nil, + // so make sure they have initial values to replace. + s.Bar = new(bracketed) + s.Baz = "ed{new(bracketed)} + + os.Clearenv() + os.Setenv("ENV_CONFIG_FOO", "foo") + os.Setenv("ENV_CONFIG_BAR", "bar") + os.Setenv("ENV_CONFIG_BAZ", "baz") + os.Setenv("ENV_CONFIG_STRUCT", "inner") + + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + + if want := "foo"; s.Foo != want { + t.Errorf("foo: got %#q, want %#q", s.Foo, want) + } + + if want := "[bar]"; s.Bar.String() != want { + t.Errorf("bar: got %#q, want %#q", s.Bar, want) + } + + if want := `["baz"]`; s.Baz.String() != want { + t.Errorf(`baz: got %#q, want %#q`, s.Baz, want) + } + + if want := `setterstruct{"inner"}`; s.Struct.Inner != want { + t.Errorf(`Struct.Inner: got %#q, want %#q`, s.Struct.Inner, want) + } +} + +func TestEmptyPrefixUsesFieldNames(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("REQUIREDVAR", "foo") + + err := Process("", &s) + if err != nil { + t.Errorf("Process failed: %s", err) + } + + if s.RequiredVar != "foo" { + t.Errorf( + `RequiredVar not populated correctly: expected "foo", got %q`, + s.RequiredVar, + ) + } +} + +func TestNestedStructVarName(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "required") + val := "found with only short name" + os.Setenv("INNER", val) + if err := Process("env_config", &s); err != nil { + t.Error(err.Error()) + } + if s.NestedSpecification.Property != val { + t.Errorf("expected %s, got %s", val, s.NestedSpecification.Property) + } +} + +func TestTextUnmarshalerError(t *testing.T) { + var s Specification + os.Clearenv() + os.Setenv("ENV_CONFIG_REQUIREDVAR", "foo") + os.Setenv("ENV_CONFIG_DATETIME", "I'M NOT A DATE") + + err := Process("env_config", &s) + + v, ok := err.(*ParseError) + if !ok { + t.Errorf("expected ParseError, got %v", v) + } + if v.FieldName != "Datetime" { + t.Errorf("expected %s, got %v", "Debug", v.FieldName) + } + + expectedLowLevelError := time.ParseError{ + Layout: time.RFC3339, + Value: "I'M NOT A DATE", + LayoutElem: "2006", + ValueElem: "I'M NOT A DATE", + } + + if v.Err.Error() != expectedLowLevelError.Error() { + t.Errorf("expected %s, got %s", expectedLowLevelError, v.Err) + } + if s.Debug != false { + t.Errorf("expected %v, got %v", false, s.Debug) + } +} + +type bracketed string + +func (b *bracketed) Set(value string) error { + *b = bracketed("[" + value + "]") + return nil +} + +func (b bracketed) String() string { + return string(b) +} + +// quoted is used to test the precedence of Decode over Set. +// The sole field is a flag.Value rather than a setter to validate that +// all flag.Value implementations are also Setter implementations. +type quoted struct{ flag.Value } + +func (d quoted) Decode(value string) error { + return d.Set(`"` + value + `"`) +} + +type setterStruct struct { + Inner string +} + +func (ss *setterStruct) Set(value string) error { + ss.Inner = fmt.Sprintf("setterstruct{%q}", value) + return nil +} diff --git a/vendor/github.com/kelseyhightower/envconfig/testdata/custom.txt b/vendor/github.com/kelseyhightower/envconfig/testdata/custom.txt new file mode 100644 index 0000000..243e82c --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/testdata/custom.txt @@ -0,0 +1,30 @@ +ENV_CONFIG_ENABLED=some.embedded.value +ENV_CONFIG_EMBEDDEDPORT= +ENV_CONFIG_MULTIWORDVAR= +ENV_CONFIG_MULTI_WITH_DIFFERENT_ALT= +ENV_CONFIG_EMBEDDED_WITH_ALT= +ENV_CONFIG_DEBUG= +ENV_CONFIG_PORT= +ENV_CONFIG_RATE= +ENV_CONFIG_USER= +ENV_CONFIG_TTL= +ENV_CONFIG_TIMEOUT= +ENV_CONFIG_ADMINUSERS= +ENV_CONFIG_MAGICNUMBERS= +ENV_CONFIG_COLORCODES= +ENV_CONFIG_MULTIWORDVAR= +ENV_CONFIG_MULTI_WORD_VAR_WITH_AUTO_SPLIT= +ENV_CONFIG_SOMEPOINTER= +ENV_CONFIG_SOMEPOINTERWITHDEFAULT=foorbar.is.the.word +ENV_CONFIG_MULTI_WORD_VAR_WITH_ALT=what.alt +ENV_CONFIG_MULTI_WORD_VAR_WITH_LOWER_CASE_ALT= +ENV_CONFIG_SERVICE_HOST= +ENV_CONFIG_DEFAULTVAR= +ENV_CONFIG_REQUIREDVAR= +ENV_CONFIG_BROKER= +ENV_CONFIG_REQUIREDDEFAULT= +ENV_CONFIG_OUTER_INNER= +ENV_CONFIG_OUTER_PROPERTYWITHDEFAULT= +ENV_CONFIG_AFTERNESTED= +ENV_CONFIG_HONOR= +ENV_CONFIG_DATETIME= diff --git a/vendor/github.com/kelseyhightower/envconfig/testdata/default_list.txt b/vendor/github.com/kelseyhightower/envconfig/testdata/default_list.txt new file mode 100644 index 0000000..bc29211 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/testdata/default_list.txt @@ -0,0 +1,153 @@ +This.application.is.configured.via.the.environment..The.following.environment +variables.can.be.used: + +ENV_CONFIG_ENABLED +..[description].some.embedded.value +..[type]........True.or.False +..[default]..... +..[required].... +ENV_CONFIG_EMBEDDEDPORT +..[description]. +..[type]........Integer +..[default]..... +..[required].... +ENV_CONFIG_MULTIWORDVAR +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_MULTI_WITH_DIFFERENT_ALT +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_EMBEDDED_WITH_ALT +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_DEBUG +..[description]. +..[type]........True.or.False +..[default]..... +..[required].... +ENV_CONFIG_PORT +..[description]. +..[type]........Integer +..[default]..... +..[required].... +ENV_CONFIG_RATE +..[description]. +..[type]........Float +..[default]..... +..[required].... +ENV_CONFIG_USER +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_TTL +..[description]. +..[type]........Unsigned.Integer +..[default]..... +..[required].... +ENV_CONFIG_TIMEOUT +..[description]. +..[type]........Duration +..[default]..... +..[required].... +ENV_CONFIG_ADMINUSERS +..[description]. +..[type]........Comma-separated.list.of.String +..[default]..... +..[required].... +ENV_CONFIG_MAGICNUMBERS +..[description]. +..[type]........Comma-separated.list.of.Integer +..[default]..... +..[required].... +ENV_CONFIG_COLORCODES +..[description]. +..[type]........Comma-separated.list.of.String:Integer.pairs +..[default]..... +..[required].... +ENV_CONFIG_MULTIWORDVAR +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_MULTI_WORD_VAR_WITH_AUTO_SPLIT +..[description]. +..[type]........Unsigned.Integer +..[default]..... +..[required].... +ENV_CONFIG_SOMEPOINTER +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_SOMEPOINTERWITHDEFAULT +..[description].foorbar.is.the.word +..[type]........String +..[default].....foo2baz +..[required].... +ENV_CONFIG_MULTI_WORD_VAR_WITH_ALT +..[description].what.alt +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_MULTI_WORD_VAR_WITH_LOWER_CASE_ALT +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_SERVICE_HOST +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_DEFAULTVAR +..[description]. +..[type]........String +..[default].....foobar +..[required].... +ENV_CONFIG_REQUIREDVAR +..[description]. +..[type]........String +..[default]..... +..[required]....true +ENV_CONFIG_BROKER +..[description]. +..[type]........String +..[default].....127.0.0.1 +..[required].... +ENV_CONFIG_REQUIREDDEFAULT +..[description]. +..[type]........String +..[default].....foo2bar +..[required]....true +ENV_CONFIG_OUTER_INNER +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_OUTER_PROPERTYWITHDEFAULT +..[description]. +..[type]........String +..[default].....fuzzybydefault +..[required].... +ENV_CONFIG_AFTERNESTED +..[description]. +..[type]........String +..[default]..... +..[required].... +ENV_CONFIG_HONOR +..[description]. +..[type]........HonorDecodeInStruct +..[default]..... +..[required].... +ENV_CONFIG_DATETIME +..[description]. +..[type]........Time +..[default]..... +..[required].... diff --git a/vendor/github.com/kelseyhightower/envconfig/testdata/default_table.txt b/vendor/github.com/kelseyhightower/envconfig/testdata/default_table.txt new file mode 100644 index 0000000..f3cf945 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/testdata/default_table.txt @@ -0,0 +1,34 @@ +This.application.is.configured.via.the.environment..The.following.environment +variables.can.be.used: + +KEY..............................................TYPE............................................DEFAULT...........REQUIRED....DESCRIPTION +ENV_CONFIG_ENABLED...............................True.or.False.................................................................some.embedded.value +ENV_CONFIG_EMBEDDEDPORT..........................Integer....................................................................... +ENV_CONFIG_MULTIWORDVAR..........................String........................................................................ +ENV_CONFIG_MULTI_WITH_DIFFERENT_ALT..............String........................................................................ +ENV_CONFIG_EMBEDDED_WITH_ALT.....................String........................................................................ +ENV_CONFIG_DEBUG.................................True.or.False................................................................. +ENV_CONFIG_PORT..................................Integer....................................................................... +ENV_CONFIG_RATE..................................Float......................................................................... +ENV_CONFIG_USER..................................String........................................................................ +ENV_CONFIG_TTL...................................Unsigned.Integer.............................................................. +ENV_CONFIG_TIMEOUT...............................Duration...................................................................... +ENV_CONFIG_ADMINUSERS............................Comma-separated.list.of.String................................................ +ENV_CONFIG_MAGICNUMBERS..........................Comma-separated.list.of.Integer............................................... +ENV_CONFIG_COLORCODES............................Comma-separated.list.of.String:Integer.pairs.................................. +ENV_CONFIG_MULTIWORDVAR..........................String........................................................................ +ENV_CONFIG_MULTI_WORD_VAR_WITH_AUTO_SPLIT........Unsigned.Integer.............................................................. +ENV_CONFIG_SOMEPOINTER...........................String........................................................................ +ENV_CONFIG_SOMEPOINTERWITHDEFAULT................String..........................................foo2baz.......................foorbar.is.the.word +ENV_CONFIG_MULTI_WORD_VAR_WITH_ALT...............String........................................................................what.alt +ENV_CONFIG_MULTI_WORD_VAR_WITH_LOWER_CASE_ALT....String........................................................................ +ENV_CONFIG_SERVICE_HOST..........................String........................................................................ +ENV_CONFIG_DEFAULTVAR............................String..........................................foobar........................ +ENV_CONFIG_REQUIREDVAR...........................String............................................................true........ +ENV_CONFIG_BROKER................................String..........................................127.0.0.1..................... +ENV_CONFIG_REQUIREDDEFAULT.......................String..........................................foo2bar...........true........ +ENV_CONFIG_OUTER_INNER...........................String........................................................................ +ENV_CONFIG_OUTER_PROPERTYWITHDEFAULT.............String..........................................fuzzybydefault................ +ENV_CONFIG_AFTERNESTED...........................String........................................................................ +ENV_CONFIG_HONOR.................................HonorDecodeInStruct........................................................... +ENV_CONFIG_DATETIME..............................Time.......................................................................... diff --git a/vendor/github.com/kelseyhightower/envconfig/testdata/fault.txt b/vendor/github.com/kelseyhightower/envconfig/testdata/fault.txt new file mode 100644 index 0000000..30e28ce --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/testdata/fault.txt @@ -0,0 +1,30 @@ +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} +{.Key} diff --git a/vendor/github.com/kelseyhightower/envconfig/usage.go b/vendor/github.com/kelseyhightower/envconfig/usage.go new file mode 100644 index 0000000..1846353 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/usage.go @@ -0,0 +1,158 @@ +// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved. +// Use of this source code is governed by the MIT License that can be found in +// the LICENSE file. + +package envconfig + +import ( + "encoding" + "fmt" + "io" + "os" + "reflect" + "strconv" + "strings" + "text/tabwriter" + "text/template" +) + +const ( + // DefaultListFormat constant to use to display usage in a list format + DefaultListFormat = `This application is configured via the environment. The following environment +variables can be used: +{{range .}} +{{usage_key .}} + [description] {{usage_description .}} + [type] {{usage_type .}} + [default] {{usage_default .}} + [required] {{usage_required .}}{{end}} +` + // DefaultTableFormat constant to use to display usage in a tabluar format + DefaultTableFormat = `This application is configured via the environment. The following environment +variables can be used: + +KEY TYPE DEFAULT REQUIRED DESCRIPTION +{{range .}}{{usage_key .}} {{usage_type .}} {{usage_default .}} {{usage_required .}} {{usage_description .}} +{{end}}` +) + +var ( + decoderType = reflect.TypeOf((*Decoder)(nil)).Elem() + setterType = reflect.TypeOf((*Setter)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +func implementsInterface(t reflect.Type) bool { + return t.Implements(decoderType) || + reflect.PtrTo(t).Implements(decoderType) || + t.Implements(setterType) || + reflect.PtrTo(t).Implements(setterType) || + t.Implements(unmarshalerType) || + reflect.PtrTo(t).Implements(unmarshalerType) +} + +// toTypeDescription converts Go types into a human readable description +func toTypeDescription(t reflect.Type) string { + switch t.Kind() { + case reflect.Array, reflect.Slice: + return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem())) + case reflect.Map: + return fmt.Sprintf( + "Comma-separated list of %s:%s pairs", + toTypeDescription(t.Key()), + toTypeDescription(t.Elem()), + ) + case reflect.Ptr: + return toTypeDescription(t.Elem()) + case reflect.Struct: + if implementsInterface(t) && t.Name() != "" { + return t.Name() + } + return "" + case reflect.String: + name := t.Name() + if name != "" && name != "string" { + return name + } + return "String" + case reflect.Bool: + name := t.Name() + if name != "" && name != "bool" { + return name + } + return "True or False" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + name := t.Name() + if name != "" && !strings.HasPrefix(name, "int") { + return name + } + return "Integer" + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + name := t.Name() + if name != "" && !strings.HasPrefix(name, "uint") { + return name + } + return "Unsigned Integer" + case reflect.Float32, reflect.Float64: + name := t.Name() + if name != "" && !strings.HasPrefix(name, "float") { + return name + } + return "Float" + } + return fmt.Sprintf("%+v", t) +} + +// Usage writes usage information to stderr using the default header and table format +func Usage(prefix string, spec interface{}) error { + // The default is to output the usage information as a table + // Create tabwriter instance to support table output + tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0) + + err := Usagef(prefix, spec, tabs, DefaultTableFormat) + tabs.Flush() + return err +} + +// Usagef writes usage information to the specified io.Writer using the specifed template specification +func Usagef(prefix string, spec interface{}, out io.Writer, format string) error { + + // Specify the default usage template functions + functions := template.FuncMap{ + "usage_key": func(v varInfo) string { return v.Key }, + "usage_description": func(v varInfo) string { return v.Tags.Get("desc") }, + "usage_type": func(v varInfo) string { return toTypeDescription(v.Field.Type()) }, + "usage_default": func(v varInfo) string { return v.Tags.Get("default") }, + "usage_required": func(v varInfo) (string, error) { + req := v.Tags.Get("required") + if req != "" { + reqB, err := strconv.ParseBool(req) + if err != nil { + return "", err + } + if reqB { + req = "true" + } + } + return req, nil + }, + } + + tmpl, err := template.New("envconfig").Funcs(functions).Parse(format) + if err != nil { + return err + } + + return Usaget(prefix, spec, out, tmpl) +} + +// Usaget writes usage information to the specified io.Writer using the specified template +func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error { + // gather first + infos, err := gatherInfo(prefix, spec) + if err != nil { + return err + } + + return tmpl.Execute(out, infos) +} diff --git a/vendor/github.com/kelseyhightower/envconfig/usage_test.go b/vendor/github.com/kelseyhightower/envconfig/usage_test.go new file mode 100644 index 0000000..b433d19 --- /dev/null +++ b/vendor/github.com/kelseyhightower/envconfig/usage_test.go @@ -0,0 +1,155 @@ +// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved. +// Use of this source code is governed by the MIT License that can be found in +// the LICENSE file. + +package envconfig + +import ( + "bytes" + "io" + "io/ioutil" + "log" + "os" + "strings" + "testing" + "text/tabwriter" +) + +var testUsageTableResult, testUsageListResult, testUsageCustomResult, testUsageBadFormatResult string + +func TestMain(m *testing.M) { + + // Load the expected test results from a text file + data, err := ioutil.ReadFile("testdata/default_table.txt") + if err != nil { + log.Fatal(err) + } + testUsageTableResult = string(data) + + data, err = ioutil.ReadFile("testdata/default_list.txt") + if err != nil { + log.Fatal(err) + } + testUsageListResult = string(data) + + data, err = ioutil.ReadFile("testdata/custom.txt") + if err != nil { + log.Fatal(err) + } + testUsageCustomResult = string(data) + + data, err = ioutil.ReadFile("testdata/fault.txt") + if err != nil { + log.Fatal(err) + } + testUsageBadFormatResult = string(data) + + retCode := m.Run() + os.Exit(retCode) +} + +func compareUsage(want, got string, t *testing.T) { + got = strings.Replace(got, " ", ".", -1) + if want != got { + shortest := len(want) + if len(got) < shortest { + shortest = len(got) + } + if len(want) != len(got) { + t.Errorf("expected result length of %d, found %d", len(want), len(got)) + } + for i := 0; i < shortest; i++ { + if want[i] != got[i] { + t.Errorf("difference at index %d, expected '%c' (%v), found '%c' (%v)\n", + i, want[i], want[i], got[i], got[i]) + break + } + } + t.Errorf("Complete Expected:\n'%s'\nComplete Found:\n'%s'\n", want, got) + } +} + +func TestUsageDefault(t *testing.T) { + var s Specification + os.Clearenv() + save := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + err := Usage("env_config", &s) + outC := make(chan string) + // copy the output in a separate goroutine so printing can't block indefinitely + go func() { + var buf bytes.Buffer + io.Copy(&buf, r) + outC <- buf.String() + }() + w.Close() + os.Stdout = save // restoring the real stdout + out := <-outC + + if err != nil { + t.Error(err.Error()) + } + compareUsage(testUsageTableResult, out, t) +} + +func TestUsageTable(t *testing.T) { + var s Specification + os.Clearenv() + buf := new(bytes.Buffer) + tabs := tabwriter.NewWriter(buf, 1, 0, 4, ' ', 0) + err := Usagef("env_config", &s, tabs, DefaultTableFormat) + tabs.Flush() + if err != nil { + t.Error(err.Error()) + } + compareUsage(testUsageTableResult, buf.String(), t) +} + +func TestUsageList(t *testing.T) { + var s Specification + os.Clearenv() + buf := new(bytes.Buffer) + err := Usagef("env_config", &s, buf, DefaultListFormat) + if err != nil { + t.Error(err.Error()) + } + compareUsage(testUsageListResult, buf.String(), t) +} + +func TestUsageCustomFormat(t *testing.T) { + var s Specification + os.Clearenv() + buf := new(bytes.Buffer) + err := Usagef("env_config", &s, buf, "{{range .}}{{usage_key .}}={{usage_description .}}\n{{end}}") + if err != nil { + t.Error(err.Error()) + } + compareUsage(testUsageCustomResult, buf.String(), t) +} + +func TestUsageUnknownKeyFormat(t *testing.T) { + var s Specification + unknownError := "template: envconfig:1:2: executing \"envconfig\" at <.UnknownKey>" + os.Clearenv() + buf := new(bytes.Buffer) + err := Usagef("env_config", &s, buf, "{{.UnknownKey}}") + if err == nil { + t.Errorf("expected 'unknown key' error, but got no error") + } + if strings.Index(err.Error(), unknownError) == -1 { + t.Errorf("expected '%s', but got '%s'", unknownError, err.Error()) + } +} + +func TestUsageBadFormat(t *testing.T) { + var s Specification + os.Clearenv() + // If you don't use two {{}} then you get a lieteral + buf := new(bytes.Buffer) + err := Usagef("env_config", &s, buf, "{{range .}}{.Key}\n{{end}}") + if err != nil { + t.Error(err.Error()) + } + compareUsage(testUsageBadFormatResult, buf.String(), t) +} diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 0000000..0f1d00e --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,4 @@ +.db +*.test +*~ +*.swp diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh new file mode 100755 index 0000000..a297dc4 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +set -eu + +client_configure() { + sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key +} + +pgdg_repository() { + local sourcelist='sources.list.d/postgresql.list' + + curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - + echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" + sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update +} + +postgresql_configure() { + sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config + local all all trust + hostnossl all pqgossltest 127.0.0.1/32 reject + hostnossl all pqgosslcert 127.0.0.1/32 reject + hostssl all pqgossltest 127.0.0.1/32 trust + hostssl all pqgosslcert 127.0.0.1/32 cert + host all all 127.0.0.1/32 trust + hostnossl all pqgossltest ::1/128 reject + hostnossl all pqgosslcert ::1/128 reject + hostssl all pqgossltest ::1/128 trust + hostssl all pqgosslcert ::1/128 cert + host all all ::1/128 trust + config + + xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates + certs/root.crt + certs/server.crt + certs/server.key + certificates + + sort -VCu <<-versions || + $PGVERSION + 9.2 + versions + sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config + ssl_ca_file = 'root.crt' + ssl_cert_file = 'server.crt' + ssl_key_file = 'server.key' + config + + echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null + + sudo service postgresql restart +} + +postgresql_install() { + xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages + postgresql-$PGVERSION + postgresql-server-dev-$PGVERSION + postgresql-contrib-$PGVERSION + packages +} + +postgresql_uninstall() { + sudo service postgresql stop + xargs sudo apt-get -y --purge remove <<-packages + libpq-dev + libpq5 + postgresql + postgresql-client-common + postgresql-common + packages + sudo rm -rf /var/lib/postgresql +} + +megacheck_install() { + # Lock megacheck version at $MEGACHECK_VERSION to prevent spontaneous + # new error messages in old code. + go get -d honnef.co/go/tools/... + git -C $GOPATH/src/honnef.co/go/tools/ checkout $MEGACHECK_VERSION + go install honnef.co/go/tools/cmd/megacheck + megacheck --version +} + +golint_install() { + go get github.com/golang/lint/golint +} + +$1 diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml new file mode 100644 index 0000000..18556e0 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.yml @@ -0,0 +1,50 @@ +language: go + +go: + - 1.8.x + - 1.9.x + - 1.10.x + - master + +sudo: true + +env: + global: + - PGUSER=postgres + - PQGOSSLTESTS=1 + - PQSSLCERTTEST_PATH=$PWD/certs + - PGHOST=127.0.0.1 + - MEGACHECK_VERSION=2017.2.2 + matrix: + - PGVERSION=10 + - PGVERSION=9.6 + - PGVERSION=9.5 + - PGVERSION=9.4 + - PGVERSION=9.3 + - PGVERSION=9.2 + - PGVERSION=9.1 + - PGVERSION=9.0 + +before_install: + - ./.travis.sh postgresql_uninstall + - ./.travis.sh pgdg_repository + - ./.travis.sh postgresql_install + - ./.travis.sh postgresql_configure + - ./.travis.sh client_configure + - ./.travis.sh megacheck_install + - ./.travis.sh golint_install + - go get golang.org/x/tools/cmd/goimports + +before_script: + - createdb pqgotest + - createuser -DRS pqgossltest + - createuser -DRS pqgosslcert + +script: + - > + goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' + - go vet ./... + - megacheck -go 1.8 ./... + - golint ./... + - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... + - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md new file mode 100644 index 0000000..84c937f --- /dev/null +++ b/vendor/github.com/lib/pq/CONTRIBUTING.md @@ -0,0 +1,29 @@ +## Contributing to pq + +`pq` has a backlog of pull requests, but contributions are still very +much welcome. You can help with patch review, submitting bug reports, +or adding new functionality. There is no formal style guide, but +please conform to the style of existing code and general Go formatting +conventions when submitting patches. + +### Patch review + +Help review existing open pull requests by commenting on the code or +proposed functionality. + +### Bug reports + +We appreciate any bug reports, but especially ones with self-contained +(doesn't depend on code outside of pq), minimal (can't be simplified +further) test cases. It's especially helpful if you can submit a pull +request with just the failing test case (you'll probably want to +pattern it after the tests in +[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). + +### New functionality + +There are a number of pending patches for new functionality, so +additional feature patches will take a while to merge. Still, patches +are generally reviewed based on usefulness and complexity in addition +to time-in-queue, so if you have a knockout idea, take a shot. Feel +free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 0000000..5773904 --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 0000000..781c89e --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,106 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq) +[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq) + +## Install + + go get github.com/lib/pq + +## Docs + +For detailed documentation and basic usage examples, please see the package +documentation at . + +## Tests + +`go test` is used for testing. A running PostgreSQL server is +required, with the ability to log in. The default database to connect +to test with is "pqgotest," but it can be overridden using environment +variables. + +Example: + + PGHOST=/run/postgresql go test github.com/lib/pq + +Optionally, a benchmark suite can be run as part of the tests: + + PGHOST=/run/postgresql go test -bench . + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support + +## Future / Things you can help with + +* Better COPY FROM / COPY TO (see discussion in #181) + +## Thank you (alphabetical) + +Some of these contributors are from the original library `bmizerany/pq.go` whose +code still exists in here. + +* Andy Balholm (andybalholm) +* Ben Berkert (benburkert) +* Benjamin Heatwole (bheatwole) +* Bill Mill (llimllib) +* Bjørn Madsen (aeons) +* Blake Gentry (bgentry) +* Brad Fitzpatrick (bradfitz) +* Charlie Melbye (cmelbye) +* Chris Bandy (cbandy) +* Chris Gilling (cgilling) +* Chris Walsh (cwds) +* Dan Sosedoff (sosedoff) +* Daniel Farina (fdr) +* Eric Chlebek (echlebek) +* Eric Garrido (minusnine) +* Eric Urban (hydrogen18) +* Everyone at The Go Team +* Evan Shaw (edsrzf) +* Ewan Chou (coocood) +* Fazal Majid (fazalmajid) +* Federico Romero (federomero) +* Fumin (fumin) +* Gary Burd (garyburd) +* Heroku (heroku) +* James Pozdena (jpoz) +* Jason McVetta (jmcvetta) +* Jeremy Jay (pbnjay) +* Joakim Sernbrant (serbaut) +* John Gallagher (jgallagher) +* Jonathan Rudenberg (titanous) +* Joël Stemmer (jstemmer) +* Kamil Kisiel (kisielk) +* Kelly Dunn (kellydunn) +* Keith Rarick (kr) +* Kir Shatrov (kirs) +* Lann Martin (lann) +* Maciek Sakrejda (uhoh-itsmaciek) +* Marc Brinkmann (mbr) +* Marko Tiikkaja (johto) +* Matt Newberry (MattNewberry) +* Matt Robenolt (mattrobenolt) +* Martin Olsen (martinolsen) +* Mike Lewis (mikelikespie) +* Nicolas Patry (Narsil) +* Oliver Tonnhofer (olt) +* Patrick Hayes (phayes) +* Paul Hammond (paulhammond) +* Ryan Smith (ryandotsmith) +* Samuel Stauffer (samuel) +* Timothée Peignier (cyberdelia) +* Travis Cline (tmc) +* TruongSinh Tran-Nguyen (truongsinh) +* Yaismel Miranda (ympons) +* notedit (notedit) diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 0000000..e4933e2 --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,756 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []string: + return (*StringArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]string: + return (*StringArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/array_test.go b/vendor/github.com/lib/pq/array_test.go new file mode 100644 index 0000000..f724bcd --- /dev/null +++ b/vendor/github.com/lib/pq/array_test.go @@ -0,0 +1,1311 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "math/rand" + "reflect" + "strings" + "testing" +) + +func TestParseArray(t *testing.T) { + for _, tt := range []struct { + input string + delim string + dims []int + elems [][]byte + }{ + {`{}`, `,`, nil, [][]byte{}}, + {`{NULL}`, `,`, []int{1}, [][]byte{nil}}, + {`{a}`, `,`, []int{1}, [][]byte{{'a'}}}, + {`{a,b}`, `,`, []int{2}, [][]byte{{'a'}, {'b'}}}, + {`{{a,b}}`, `,`, []int{1, 2}, [][]byte{{'a'}, {'b'}}}, + {`{{a},{b}}`, `,`, []int{2, 1}, [][]byte{{'a'}, {'b'}}}, + {`{{{a,b},{c,d},{e,f}}}`, `,`, []int{1, 3, 2}, [][]byte{ + {'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}, + }}, + {`{""}`, `,`, []int{1}, [][]byte{{}}}, + {`{","}`, `,`, []int{1}, [][]byte{{','}}}, + {`{",",","}`, `,`, []int{2}, [][]byte{{','}, {','}}}, + {`{{",",","}}`, `,`, []int{1, 2}, [][]byte{{','}, {','}}}, + {`{{","},{","}}`, `,`, []int{2, 1}, [][]byte{{','}, {','}}}, + {`{{{",",","},{",",","},{",",","}}}`, `,`, []int{1, 3, 2}, [][]byte{ + {','}, {','}, {','}, {','}, {','}, {','}, + }}, + {`{"\"}"}`, `,`, []int{1}, [][]byte{{'"', '}'}}}, + {`{"\"","\""}`, `,`, []int{2}, [][]byte{{'"'}, {'"'}}}, + {`{{"\"","\""}}`, `,`, []int{1, 2}, [][]byte{{'"'}, {'"'}}}, + {`{{"\""},{"\""}}`, `,`, []int{2, 1}, [][]byte{{'"'}, {'"'}}}, + {`{{{"\"","\""},{"\"","\""},{"\"","\""}}}`, `,`, []int{1, 3, 2}, [][]byte{ + {'"'}, {'"'}, {'"'}, {'"'}, {'"'}, {'"'}, + }}, + {`{axyzb}`, `xyz`, []int{2}, [][]byte{{'a'}, {'b'}}}, + } { + dims, elems, err := parseArray([]byte(tt.input), []byte(tt.delim)) + + if err != nil { + t.Fatalf("Expected no error for %q, got %q", tt.input, err) + } + if !reflect.DeepEqual(dims, tt.dims) { + t.Errorf("Expected %v dimensions for %q, got %v", tt.dims, tt.input, dims) + } + if !reflect.DeepEqual(elems, tt.elems) { + t.Errorf("Expected %v elements for %q, got %v", tt.elems, tt.input, elems) + } + } +} + +func TestParseArrayError(t *testing.T) { + for _, tt := range []struct { + input, err string + }{ + {``, "expected '{' at offset 0"}, + {`x`, "expected '{' at offset 0"}, + {`}`, "expected '{' at offset 0"}, + {`{`, "expected '}' at offset 1"}, + {`{{}`, "expected '}' at offset 3"}, + {`{}}`, "unexpected '}' at offset 2"}, + {`{,}`, "unexpected ',' at offset 1"}, + {`{,x}`, "unexpected ',' at offset 1"}, + {`{x,}`, "unexpected '}' at offset 3"}, + {`{x,{`, "unexpected '{' at offset 3"}, + {`{x},`, "unexpected ',' at offset 3"}, + {`{x}}`, "unexpected '}' at offset 3"}, + {`{{x}`, "expected '}' at offset 4"}, + {`{""x}`, "unexpected 'x' at offset 3"}, + {`{{a},{b,c}}`, "multidimensional arrays must have elements with matching dimensions"}, + } { + _, _, err := parseArray([]byte(tt.input), []byte{','}) + + if err == nil { + t.Fatalf("Expected error for %q, got none", tt.input) + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) + } + } +} + +func TestArrayScanner(t *testing.T) { + var s sql.Scanner = Array(&[]bool{}) + if _, ok := s.(*BoolArray); !ok { + t.Errorf("Expected *BoolArray, got %T", s) + } + + s = Array(&[]float64{}) + if _, ok := s.(*Float64Array); !ok { + t.Errorf("Expected *Float64Array, got %T", s) + } + + s = Array(&[]int64{}) + if _, ok := s.(*Int64Array); !ok { + t.Errorf("Expected *Int64Array, got %T", s) + } + + s = Array(&[]string{}) + if _, ok := s.(*StringArray); !ok { + t.Errorf("Expected *StringArray, got %T", s) + } + + for _, tt := range []interface{}{ + &[]sql.Scanner{}, + &[][]bool{}, + &[][]float64{}, + &[][]int64{}, + &[][]string{}, + } { + s = Array(tt) + if _, ok := s.(GenericArray); !ok { + t.Errorf("Expected GenericArray for %T, got %T", tt, s) + } + } +} + +func TestArrayValuer(t *testing.T) { + var v driver.Valuer = Array([]bool{}) + if _, ok := v.(*BoolArray); !ok { + t.Errorf("Expected *BoolArray, got %T", v) + } + + v = Array([]float64{}) + if _, ok := v.(*Float64Array); !ok { + t.Errorf("Expected *Float64Array, got %T", v) + } + + v = Array([]int64{}) + if _, ok := v.(*Int64Array); !ok { + t.Errorf("Expected *Int64Array, got %T", v) + } + + v = Array([]string{}) + if _, ok := v.(*StringArray); !ok { + t.Errorf("Expected *StringArray, got %T", v) + } + + for _, tt := range []interface{}{ + nil, + []driver.Value{}, + [][]bool{}, + [][]float64{}, + [][]int64{}, + [][]string{}, + } { + v = Array(tt) + if _, ok := v.(GenericArray); !ok { + t.Errorf("Expected GenericArray for %T, got %T", tt, v) + } + } +} + +func TestBoolArrayScanUnsupported(t *testing.T) { + var arr BoolArray + err := arr.Scan(1) + + if err == nil { + t.Fatal("Expected error when scanning from int") + } + if !strings.Contains(err.Error(), "int to BoolArray") { + t.Errorf("Expected type to be mentioned when scanning, got %q", err) + } +} + +func TestBoolArrayScanEmpty(t *testing.T) { + var arr BoolArray + err := arr.Scan(`{}`) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr == nil || len(arr) != 0 { + t.Errorf("Expected empty, got %#v", arr) + } +} + +func TestBoolArrayScanNil(t *testing.T) { + arr := BoolArray{true, true, true} + err := arr.Scan(nil) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr != nil { + t.Errorf("Expected nil, got %+v", arr) + } +} + +var BoolArrayStringTests = []struct { + str string + arr BoolArray +}{ + {`{}`, BoolArray{}}, + {`{t}`, BoolArray{true}}, + {`{f,t}`, BoolArray{false, true}}, +} + +func TestBoolArrayScanBytes(t *testing.T) { + for _, tt := range BoolArrayStringTests { + bytes := []byte(tt.str) + arr := BoolArray{true, true, true} + err := arr.Scan(bytes) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", bytes, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) + } + } +} + +func BenchmarkBoolArrayScanBytes(b *testing.B) { + var a BoolArray + var x interface{} = []byte(`{t,f,t,f,t,f,t,f,t,f}`) + + for i := 0; i < b.N; i++ { + a = BoolArray{} + a.Scan(x) + } +} + +func TestBoolArrayScanString(t *testing.T) { + for _, tt := range BoolArrayStringTests { + arr := BoolArray{true, true, true} + err := arr.Scan(tt.str) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", tt.str, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) + } + } +} + +func TestBoolArrayScanError(t *testing.T) { + for _, tt := range []struct { + input, err string + }{ + {``, "unable to parse array"}, + {`{`, "unable to parse array"}, + {`{{t},{f}}`, "cannot convert ARRAY[2][1] to BoolArray"}, + {`{NULL}`, `could not parse boolean array index 0: invalid boolean ""`}, + {`{a}`, `could not parse boolean array index 0: invalid boolean "a"`}, + {`{t,b}`, `could not parse boolean array index 1: invalid boolean "b"`}, + {`{t,f,cd}`, `could not parse boolean array index 2: invalid boolean "cd"`}, + } { + arr := BoolArray{true, true, true} + err := arr.Scan(tt.input) + + if err == nil { + t.Fatalf("Expected error for %q, got none", tt.input) + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) + } + if !reflect.DeepEqual(arr, BoolArray{true, true, true}) { + t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) + } + } +} + +func TestBoolArrayValue(t *testing.T) { + result, err := BoolArray(nil).Value() + + if err != nil { + t.Fatalf("Expected no error for nil, got %v", err) + } + if result != nil { + t.Errorf("Expected nil, got %q", result) + } + + result, err = BoolArray([]bool{}).Value() + + if err != nil { + t.Fatalf("Expected no error for empty, got %v", err) + } + if expected := `{}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected empty, got %q", result) + } + + result, err = BoolArray([]bool{false, true, false}).Value() + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if expected := `{f,t,f}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected %q, got %q", expected, result) + } +} + +func BenchmarkBoolArrayValue(b *testing.B) { + rand.Seed(1) + x := make([]bool, 10) + for i := 0; i < len(x); i++ { + x[i] = rand.Intn(2) == 0 + } + a := BoolArray(x) + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func TestByteaArrayScanUnsupported(t *testing.T) { + var arr ByteaArray + err := arr.Scan(1) + + if err == nil { + t.Fatal("Expected error when scanning from int") + } + if !strings.Contains(err.Error(), "int to ByteaArray") { + t.Errorf("Expected type to be mentioned when scanning, got %q", err) + } +} + +func TestByteaArrayScanEmpty(t *testing.T) { + var arr ByteaArray + err := arr.Scan(`{}`) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr == nil || len(arr) != 0 { + t.Errorf("Expected empty, got %#v", arr) + } +} + +func TestByteaArrayScanNil(t *testing.T) { + arr := ByteaArray{{2}, {6}, {0, 0}} + err := arr.Scan(nil) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr != nil { + t.Errorf("Expected nil, got %+v", arr) + } +} + +var ByteaArrayStringTests = []struct { + str string + arr ByteaArray +}{ + {`{}`, ByteaArray{}}, + {`{NULL}`, ByteaArray{nil}}, + {`{"\\xfeff"}`, ByteaArray{{'\xFE', '\xFF'}}}, + {`{"\\xdead","\\xbeef"}`, ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}}, +} + +func TestByteaArrayScanBytes(t *testing.T) { + for _, tt := range ByteaArrayStringTests { + bytes := []byte(tt.str) + arr := ByteaArray{{2}, {6}, {0, 0}} + err := arr.Scan(bytes) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", bytes, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) + } + } +} + +func BenchmarkByteaArrayScanBytes(b *testing.B) { + var a ByteaArray + var x interface{} = []byte(`{"\\xfe","\\xff","\\xdead","\\xbeef","\\xfe","\\xff","\\xdead","\\xbeef","\\xfe","\\xff"}`) + + for i := 0; i < b.N; i++ { + a = ByteaArray{} + a.Scan(x) + } +} + +func TestByteaArrayScanString(t *testing.T) { + for _, tt := range ByteaArrayStringTests { + arr := ByteaArray{{2}, {6}, {0, 0}} + err := arr.Scan(tt.str) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", tt.str, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) + } + } +} + +func TestByteaArrayScanError(t *testing.T) { + for _, tt := range []struct { + input, err string + }{ + {``, "unable to parse array"}, + {`{`, "unable to parse array"}, + {`{{"\\xfeff"},{"\\xbeef"}}`, "cannot convert ARRAY[2][1] to ByteaArray"}, + {`{"\\abc"}`, "could not parse bytea array index 0: could not parse bytea value"}, + } { + arr := ByteaArray{{2}, {6}, {0, 0}} + err := arr.Scan(tt.input) + + if err == nil { + t.Fatalf("Expected error for %q, got none", tt.input) + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) + } + if !reflect.DeepEqual(arr, ByteaArray{{2}, {6}, {0, 0}}) { + t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) + } + } +} + +func TestByteaArrayValue(t *testing.T) { + result, err := ByteaArray(nil).Value() + + if err != nil { + t.Fatalf("Expected no error for nil, got %v", err) + } + if result != nil { + t.Errorf("Expected nil, got %q", result) + } + + result, err = ByteaArray([][]byte{}).Value() + + if err != nil { + t.Fatalf("Expected no error for empty, got %v", err) + } + if expected := `{}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected empty, got %q", result) + } + + result, err = ByteaArray([][]byte{{'\xDE', '\xAD', '\xBE', '\xEF'}, {'\xFE', '\xFF'}, {}}).Value() + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if expected := `{"\\xdeadbeef","\\xfeff","\\x"}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected %q, got %q", expected, result) + } +} + +func BenchmarkByteaArrayValue(b *testing.B) { + rand.Seed(1) + x := make([][]byte, 10) + for i := 0; i < len(x); i++ { + x[i] = make([]byte, len(x)) + for j := 0; j < len(x); j++ { + x[i][j] = byte(rand.Int()) + } + } + a := ByteaArray(x) + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func TestFloat64ArrayScanUnsupported(t *testing.T) { + var arr Float64Array + err := arr.Scan(true) + + if err == nil { + t.Fatal("Expected error when scanning from bool") + } + if !strings.Contains(err.Error(), "bool to Float64Array") { + t.Errorf("Expected type to be mentioned when scanning, got %q", err) + } +} + +func TestFloat64ArrayScanEmpty(t *testing.T) { + var arr Float64Array + err := arr.Scan(`{}`) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr == nil || len(arr) != 0 { + t.Errorf("Expected empty, got %#v", arr) + } +} + +func TestFloat64ArrayScanNil(t *testing.T) { + arr := Float64Array{5, 5, 5} + err := arr.Scan(nil) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr != nil { + t.Errorf("Expected nil, got %+v", arr) + } +} + +var Float64ArrayStringTests = []struct { + str string + arr Float64Array +}{ + {`{}`, Float64Array{}}, + {`{1.2}`, Float64Array{1.2}}, + {`{3.456,7.89}`, Float64Array{3.456, 7.89}}, + {`{3,1,2}`, Float64Array{3, 1, 2}}, +} + +func TestFloat64ArrayScanBytes(t *testing.T) { + for _, tt := range Float64ArrayStringTests { + bytes := []byte(tt.str) + arr := Float64Array{5, 5, 5} + err := arr.Scan(bytes) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", bytes, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) + } + } +} + +func BenchmarkFloat64ArrayScanBytes(b *testing.B) { + var a Float64Array + var x interface{} = []byte(`{1.2,3.4,5.6,7.8,9.01,2.34,5.67,8.90,1.234,5.678}`) + + for i := 0; i < b.N; i++ { + a = Float64Array{} + a.Scan(x) + } +} + +func TestFloat64ArrayScanString(t *testing.T) { + for _, tt := range Float64ArrayStringTests { + arr := Float64Array{5, 5, 5} + err := arr.Scan(tt.str) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", tt.str, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) + } + } +} + +func TestFloat64ArrayScanError(t *testing.T) { + for _, tt := range []struct { + input, err string + }{ + {``, "unable to parse array"}, + {`{`, "unable to parse array"}, + {`{{5.6},{7.8}}`, "cannot convert ARRAY[2][1] to Float64Array"}, + {`{NULL}`, "parsing array element index 0:"}, + {`{a}`, "parsing array element index 0:"}, + {`{5.6,a}`, "parsing array element index 1:"}, + {`{5.6,7.8,a}`, "parsing array element index 2:"}, + } { + arr := Float64Array{5, 5, 5} + err := arr.Scan(tt.input) + + if err == nil { + t.Fatalf("Expected error for %q, got none", tt.input) + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) + } + if !reflect.DeepEqual(arr, Float64Array{5, 5, 5}) { + t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) + } + } +} + +func TestFloat64ArrayValue(t *testing.T) { + result, err := Float64Array(nil).Value() + + if err != nil { + t.Fatalf("Expected no error for nil, got %v", err) + } + if result != nil { + t.Errorf("Expected nil, got %q", result) + } + + result, err = Float64Array([]float64{}).Value() + + if err != nil { + t.Fatalf("Expected no error for empty, got %v", err) + } + if expected := `{}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected empty, got %q", result) + } + + result, err = Float64Array([]float64{1.2, 3.4, 5.6}).Value() + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if expected := `{1.2,3.4,5.6}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected %q, got %q", expected, result) + } +} + +func BenchmarkFloat64ArrayValue(b *testing.B) { + rand.Seed(1) + x := make([]float64, 10) + for i := 0; i < len(x); i++ { + x[i] = rand.NormFloat64() + } + a := Float64Array(x) + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func TestInt64ArrayScanUnsupported(t *testing.T) { + var arr Int64Array + err := arr.Scan(true) + + if err == nil { + t.Fatal("Expected error when scanning from bool") + } + if !strings.Contains(err.Error(), "bool to Int64Array") { + t.Errorf("Expected type to be mentioned when scanning, got %q", err) + } +} + +func TestInt64ArrayScanEmpty(t *testing.T) { + var arr Int64Array + err := arr.Scan(`{}`) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr == nil || len(arr) != 0 { + t.Errorf("Expected empty, got %#v", arr) + } +} + +func TestInt64ArrayScanNil(t *testing.T) { + arr := Int64Array{5, 5, 5} + err := arr.Scan(nil) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr != nil { + t.Errorf("Expected nil, got %+v", arr) + } +} + +var Int64ArrayStringTests = []struct { + str string + arr Int64Array +}{ + {`{}`, Int64Array{}}, + {`{12}`, Int64Array{12}}, + {`{345,678}`, Int64Array{345, 678}}, +} + +func TestInt64ArrayScanBytes(t *testing.T) { + for _, tt := range Int64ArrayStringTests { + bytes := []byte(tt.str) + arr := Int64Array{5, 5, 5} + err := arr.Scan(bytes) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", bytes, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) + } + } +} + +func BenchmarkInt64ArrayScanBytes(b *testing.B) { + var a Int64Array + var x interface{} = []byte(`{1,2,3,4,5,6,7,8,9,0}`) + + for i := 0; i < b.N; i++ { + a = Int64Array{} + a.Scan(x) + } +} + +func TestInt64ArrayScanString(t *testing.T) { + for _, tt := range Int64ArrayStringTests { + arr := Int64Array{5, 5, 5} + err := arr.Scan(tt.str) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", tt.str, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) + } + } +} + +func TestInt64ArrayScanError(t *testing.T) { + for _, tt := range []struct { + input, err string + }{ + {``, "unable to parse array"}, + {`{`, "unable to parse array"}, + {`{{5},{6}}`, "cannot convert ARRAY[2][1] to Int64Array"}, + {`{NULL}`, "parsing array element index 0:"}, + {`{a}`, "parsing array element index 0:"}, + {`{5,a}`, "parsing array element index 1:"}, + {`{5,6,a}`, "parsing array element index 2:"}, + } { + arr := Int64Array{5, 5, 5} + err := arr.Scan(tt.input) + + if err == nil { + t.Fatalf("Expected error for %q, got none", tt.input) + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) + } + if !reflect.DeepEqual(arr, Int64Array{5, 5, 5}) { + t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) + } + } +} + +func TestInt64ArrayValue(t *testing.T) { + result, err := Int64Array(nil).Value() + + if err != nil { + t.Fatalf("Expected no error for nil, got %v", err) + } + if result != nil { + t.Errorf("Expected nil, got %q", result) + } + + result, err = Int64Array([]int64{}).Value() + + if err != nil { + t.Fatalf("Expected no error for empty, got %v", err) + } + if expected := `{}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected empty, got %q", result) + } + + result, err = Int64Array([]int64{1, 2, 3}).Value() + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if expected := `{1,2,3}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected %q, got %q", expected, result) + } +} + +func BenchmarkInt64ArrayValue(b *testing.B) { + rand.Seed(1) + x := make([]int64, 10) + for i := 0; i < len(x); i++ { + x[i] = rand.Int63() + } + a := Int64Array(x) + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func TestStringArrayScanUnsupported(t *testing.T) { + var arr StringArray + err := arr.Scan(true) + + if err == nil { + t.Fatal("Expected error when scanning from bool") + } + if !strings.Contains(err.Error(), "bool to StringArray") { + t.Errorf("Expected type to be mentioned when scanning, got %q", err) + } +} + +func TestStringArrayScanEmpty(t *testing.T) { + var arr StringArray + err := arr.Scan(`{}`) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr == nil || len(arr) != 0 { + t.Errorf("Expected empty, got %#v", arr) + } +} + +func TestStringArrayScanNil(t *testing.T) { + arr := StringArray{"x", "x", "x"} + err := arr.Scan(nil) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if arr != nil { + t.Errorf("Expected nil, got %+v", arr) + } +} + +var StringArrayStringTests = []struct { + str string + arr StringArray +}{ + {`{}`, StringArray{}}, + {`{t}`, StringArray{"t"}}, + {`{f,1}`, StringArray{"f", "1"}}, + {`{"a\\b","c d",","}`, StringArray{"a\\b", "c d", ","}}, +} + +func TestStringArrayScanBytes(t *testing.T) { + for _, tt := range StringArrayStringTests { + bytes := []byte(tt.str) + arr := StringArray{"x", "x", "x"} + err := arr.Scan(bytes) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", bytes, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) + } + } +} + +func BenchmarkStringArrayScanBytes(b *testing.B) { + var a StringArray + var x interface{} = []byte(`{a,b,c,d,e,f,g,h,i,j}`) + var y interface{} = []byte(`{"\a","\b","\c","\d","\e","\f","\g","\h","\i","\j"}`) + + for i := 0; i < b.N; i++ { + a = StringArray{} + a.Scan(x) + a = StringArray{} + a.Scan(y) + } +} + +func TestStringArrayScanString(t *testing.T) { + for _, tt := range StringArrayStringTests { + arr := StringArray{"x", "x", "x"} + err := arr.Scan(tt.str) + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", tt.str, err) + } + if !reflect.DeepEqual(arr, tt.arr) { + t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) + } + } +} + +func TestStringArrayScanError(t *testing.T) { + for _, tt := range []struct { + input, err string + }{ + {``, "unable to parse array"}, + {`{`, "unable to parse array"}, + {`{{a},{b}}`, "cannot convert ARRAY[2][1] to StringArray"}, + {`{NULL}`, "parsing array element index 0: cannot convert nil to string"}, + {`{a,NULL}`, "parsing array element index 1: cannot convert nil to string"}, + {`{a,b,NULL}`, "parsing array element index 2: cannot convert nil to string"}, + } { + arr := StringArray{"x", "x", "x"} + err := arr.Scan(tt.input) + + if err == nil { + t.Fatalf("Expected error for %q, got none", tt.input) + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) + } + if !reflect.DeepEqual(arr, StringArray{"x", "x", "x"}) { + t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) + } + } +} + +func TestStringArrayValue(t *testing.T) { + result, err := StringArray(nil).Value() + + if err != nil { + t.Fatalf("Expected no error for nil, got %v", err) + } + if result != nil { + t.Errorf("Expected nil, got %q", result) + } + + result, err = StringArray([]string{}).Value() + + if err != nil { + t.Fatalf("Expected no error for empty, got %v", err) + } + if expected := `{}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected empty, got %q", result) + } + + result, err = StringArray([]string{`a`, `\b`, `c"`, `d,e`}).Value() + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if expected := `{"a","\\b","c\"","d,e"}`; !reflect.DeepEqual(result, expected) { + t.Errorf("Expected %q, got %q", expected, result) + } +} + +func BenchmarkStringArrayValue(b *testing.B) { + x := make([]string, 10) + for i := 0; i < len(x); i++ { + x[i] = strings.Repeat(`abc"def\ghi`, 5) + } + a := StringArray(x) + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func TestGenericArrayScanUnsupported(t *testing.T) { + var s string + var ss []string + var nsa [1]sql.NullString + + for _, tt := range []struct { + src, dest interface{} + err string + }{ + {nil, nil, "destination is not a pointer to array or slice"}, + {nil, true, "destination bool is not a pointer to array or slice"}, + {nil, &s, "destination *string is not a pointer to array or slice"}, + {nil, ss, "destination []string is not a pointer to array or slice"}, + {nil, &nsa, " to [1]sql.NullString"}, + {true, &ss, "bool to []string"}, + {`{{x}}`, &ss, "multidimensional ARRAY[1][1] is not implemented"}, + {`{{x},{x}}`, &ss, "multidimensional ARRAY[2][1] is not implemented"}, + {`{x}`, &ss, "scanning to string is not implemented"}, + } { + err := GenericArray{tt.dest}.Scan(tt.src) + + if err == nil { + t.Fatalf("Expected error for [%#v %#v]", tt.src, tt.dest) + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("Expected error to contain %q for [%#v %#v], got %q", tt.err, tt.src, tt.dest, err) + } + } +} + +func TestGenericArrayScanScannerArrayBytes(t *testing.T) { + src, expected, nsa := []byte(`{NULL,abc,"\""}`), + [3]sql.NullString{{}, {String: `abc`, Valid: true}, {String: `"`, Valid: true}}, + [3]sql.NullString{{String: ``, Valid: true}, {}, {}} + + if err := (GenericArray{&nsa}).Scan(src); err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if !reflect.DeepEqual(nsa, expected) { + t.Errorf("Expected %v, got %v", expected, nsa) + } +} + +func TestGenericArrayScanScannerArrayString(t *testing.T) { + src, expected, nsa := `{NULL,"\"",xyz}`, + [3]sql.NullString{{}, {String: `"`, Valid: true}, {String: `xyz`, Valid: true}}, + [3]sql.NullString{{String: ``, Valid: true}, {}, {}} + + if err := (GenericArray{&nsa}).Scan(src); err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if !reflect.DeepEqual(nsa, expected) { + t.Errorf("Expected %v, got %v", expected, nsa) + } +} + +func TestGenericArrayScanScannerSliceEmpty(t *testing.T) { + var nss []sql.NullString + + if err := (GenericArray{&nss}).Scan(`{}`); err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if nss == nil || len(nss) != 0 { + t.Errorf("Expected empty, got %#v", nss) + } +} + +func TestGenericArrayScanScannerSliceNil(t *testing.T) { + nss := []sql.NullString{{String: ``, Valid: true}, {}} + + if err := (GenericArray{&nss}).Scan(nil); err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if nss != nil { + t.Errorf("Expected nil, got %+v", nss) + } +} + +func TestGenericArrayScanScannerSliceBytes(t *testing.T) { + src, expected, nss := []byte(`{NULL,abc,"\""}`), + []sql.NullString{{}, {String: `abc`, Valid: true}, {String: `"`, Valid: true}}, + []sql.NullString{{String: ``, Valid: true}, {}, {}, {}, {}} + + if err := (GenericArray{&nss}).Scan(src); err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if !reflect.DeepEqual(nss, expected) { + t.Errorf("Expected %v, got %v", expected, nss) + } +} + +func BenchmarkGenericArrayScanScannerSliceBytes(b *testing.B) { + var a GenericArray + var x interface{} = []byte(`{a,b,c,d,e,f,g,h,i,j}`) + var y interface{} = []byte(`{"\a","\b","\c","\d","\e","\f","\g","\h","\i","\j"}`) + + for i := 0; i < b.N; i++ { + a = GenericArray{new([]sql.NullString)} + a.Scan(x) + a = GenericArray{new([]sql.NullString)} + a.Scan(y) + } +} + +func TestGenericArrayScanScannerSliceString(t *testing.T) { + src, expected, nss := `{NULL,"\"",xyz}`, + []sql.NullString{{}, {String: `"`, Valid: true}, {String: `xyz`, Valid: true}}, + []sql.NullString{{String: ``, Valid: true}, {}, {}} + + if err := (GenericArray{&nss}).Scan(src); err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if !reflect.DeepEqual(nss, expected) { + t.Errorf("Expected %v, got %v", expected, nss) + } +} + +type TildeNullInt64 struct{ sql.NullInt64 } + +func (TildeNullInt64) ArrayDelimiter() string { return "~" } + +func TestGenericArrayScanDelimiter(t *testing.T) { + src, expected, tnis := `{12~NULL~76}`, + []TildeNullInt64{{sql.NullInt64{Int64: 12, Valid: true}}, {}, {sql.NullInt64{Int64: 76, Valid: true}}}, + []TildeNullInt64{{sql.NullInt64{Int64: 0, Valid: true}}, {}} + + if err := (GenericArray{&tnis}).Scan(src); err != nil { + t.Fatalf("Expected no error for %#v, got %v", src, err) + } + if !reflect.DeepEqual(tnis, expected) { + t.Errorf("Expected %v for %#v, got %v", expected, src, tnis) + } +} + +func TestGenericArrayScanErrors(t *testing.T) { + var sa [1]string + var nis []sql.NullInt64 + var pss *[]string + + for _, tt := range []struct { + src, dest interface{} + err string + }{ + {nil, pss, "destination *[]string is nil"}, + {`{`, &sa, "unable to parse"}, + {`{}`, &sa, "cannot convert ARRAY[0] to [1]string"}, + {`{x,x}`, &sa, "cannot convert ARRAY[2] to [1]string"}, + {`{x}`, &nis, `parsing array element index 0: converting`}, + } { + err := GenericArray{tt.dest}.Scan(tt.src) + + if err == nil { + t.Fatalf("Expected error for [%#v %#v]", tt.src, tt.dest) + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("Expected error to contain %q for [%#v %#v], got %q", tt.err, tt.src, tt.dest, err) + } + } +} + +func TestGenericArrayValueUnsupported(t *testing.T) { + _, err := GenericArray{true}.Value() + + if err == nil { + t.Fatal("Expected error for bool") + } + if !strings.Contains(err.Error(), "bool to array") { + t.Errorf("Expected type to be mentioned, got %q", err) + } +} + +type ByteArrayValuer [1]byte +type ByteSliceValuer []byte +type FuncArrayValuer struct { + delimiter func() string + value func() (driver.Value, error) +} + +func (a ByteArrayValuer) Value() (driver.Value, error) { return a[:], nil } +func (b ByteSliceValuer) Value() (driver.Value, error) { return []byte(b), nil } +func (f FuncArrayValuer) ArrayDelimiter() string { return f.delimiter() } +func (f FuncArrayValuer) Value() (driver.Value, error) { return f.value() } + +func TestGenericArrayValue(t *testing.T) { + result, err := GenericArray{nil}.Value() + + if err != nil { + t.Fatalf("Expected no error for nil, got %v", err) + } + if result != nil { + t.Errorf("Expected nil, got %q", result) + } + + for _, tt := range []interface{}{ + []bool(nil), + [][]int(nil), + []*int(nil), + []sql.NullString(nil), + } { + result, err := GenericArray{tt}.Value() + + if err != nil { + t.Fatalf("Expected no error for %#v, got %v", tt, err) + } + if result != nil { + t.Errorf("Expected nil for %#v, got %q", tt, result) + } + } + + Tilde := func(v driver.Value) FuncArrayValuer { + return FuncArrayValuer{ + func() string { return "~" }, + func() (driver.Value, error) { return v, nil }} + } + + for _, tt := range []struct { + result string + input interface{} + }{ + {`{}`, []bool{}}, + {`{true}`, []bool{true}}, + {`{true,false}`, []bool{true, false}}, + {`{true,false}`, [2]bool{true, false}}, + + {`{}`, [][]int{{}}}, + {`{}`, [][]int{{}, {}}}, + {`{{1}}`, [][]int{{1}}}, + {`{{1},{2}}`, [][]int{{1}, {2}}}, + {`{{1,2},{3,4}}`, [][]int{{1, 2}, {3, 4}}}, + {`{{1,2},{3,4}}`, [2][2]int{{1, 2}, {3, 4}}}, + + {`{"a","\\b","c\"","d,e"}`, []string{`a`, `\b`, `c"`, `d,e`}}, + {`{"a","\\b","c\"","d,e"}`, [][]byte{{'a'}, {'\\', 'b'}, {'c', '"'}, {'d', ',', 'e'}}}, + + {`{NULL}`, []*int{nil}}, + {`{0,NULL}`, []*int{new(int), nil}}, + + {`{NULL}`, []sql.NullString{{}}}, + {`{"\"",NULL}`, []sql.NullString{{String: `"`, Valid: true}, {}}}, + + {`{"a","b"}`, []ByteArrayValuer{{'a'}, {'b'}}}, + {`{{"a","b"},{"c","d"}}`, [][]ByteArrayValuer{{{'a'}, {'b'}}, {{'c'}, {'d'}}}}, + + {`{"e","f"}`, []ByteSliceValuer{{'e'}, {'f'}}}, + {`{{"e","f"},{"g","h"}}`, [][]ByteSliceValuer{{{'e'}, {'f'}}, {{'g'}, {'h'}}}}, + + {`{1~2}`, []FuncArrayValuer{Tilde(int64(1)), Tilde(int64(2))}}, + {`{{1~2}~{3~4}}`, [][]FuncArrayValuer{{Tilde(int64(1)), Tilde(int64(2))}, {Tilde(int64(3)), Tilde(int64(4))}}}, + } { + result, err := GenericArray{tt.input}.Value() + + if err != nil { + t.Fatalf("Expected no error for %q, got %v", tt.input, err) + } + if !reflect.DeepEqual(result, tt.result) { + t.Errorf("Expected %q for %q, got %q", tt.result, tt.input, result) + } + } +} + +func TestGenericArrayValueErrors(t *testing.T) { + v := []interface{}{func() {}} + if _, err := (GenericArray{v}).Value(); err == nil { + t.Errorf("Expected error for %q, got nil", v) + } + + v = []interface{}{nil, func() {}} + if _, err := (GenericArray{v}).Value(); err == nil { + t.Errorf("Expected error for %q, got nil", v) + } +} + +func BenchmarkGenericArrayValueBools(b *testing.B) { + rand.Seed(1) + x := make([]bool, 10) + for i := 0; i < len(x); i++ { + x[i] = rand.Intn(2) == 0 + } + a := GenericArray{x} + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func BenchmarkGenericArrayValueFloat64s(b *testing.B) { + rand.Seed(1) + x := make([]float64, 10) + for i := 0; i < len(x); i++ { + x[i] = rand.NormFloat64() + } + a := GenericArray{x} + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func BenchmarkGenericArrayValueInt64s(b *testing.B) { + rand.Seed(1) + x := make([]int64, 10) + for i := 0; i < len(x); i++ { + x[i] = rand.Int63() + } + a := GenericArray{x} + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func BenchmarkGenericArrayValueByteSlices(b *testing.B) { + x := make([][]byte, 10) + for i := 0; i < len(x); i++ { + x[i] = bytes.Repeat([]byte(`abc"def\ghi`), 5) + } + a := GenericArray{x} + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func BenchmarkGenericArrayValueStrings(b *testing.B) { + x := make([]string, 10) + for i := 0; i < len(x); i++ { + x[i] = strings.Repeat(`abc"def\ghi`, 5) + } + a := GenericArray{x} + + for i := 0; i < b.N; i++ { + a.Value() + } +} + +func TestArrayScanBackend(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + for _, tt := range []struct { + s string + d sql.Scanner + e interface{} + }{ + {`ARRAY[true, false]`, new(BoolArray), &BoolArray{true, false}}, + {`ARRAY[E'\\xdead', E'\\xbeef']`, new(ByteaArray), &ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}}, + {`ARRAY[1.2, 3.4]`, new(Float64Array), &Float64Array{1.2, 3.4}}, + {`ARRAY[1, 2, 3]`, new(Int64Array), &Int64Array{1, 2, 3}}, + {`ARRAY['a', E'\\b', 'c"', 'd,e']`, new(StringArray), &StringArray{`a`, `\b`, `c"`, `d,e`}}, + } { + err := db.QueryRow(`SELECT ` + tt.s).Scan(tt.d) + if err != nil { + t.Errorf("Expected no error when scanning %s into %T, got %v", tt.s, tt.d, err) + } + if !reflect.DeepEqual(tt.d, tt.e) { + t.Errorf("Expected %v when scanning %s into %T, got %v", tt.e, tt.s, tt.d, tt.d) + } + } +} + +func TestArrayValueBackend(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + for _, tt := range []struct { + s string + v driver.Valuer + }{ + {`ARRAY[true, false]`, BoolArray{true, false}}, + {`ARRAY[E'\\xdead', E'\\xbeef']`, ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}}, + {`ARRAY[1.2, 3.4]`, Float64Array{1.2, 3.4}}, + {`ARRAY[1, 2, 3]`, Int64Array{1, 2, 3}}, + {`ARRAY['a', E'\\b', 'c"', 'd,e']`, StringArray{`a`, `\b`, `c"`, `d,e`}}, + } { + var x int + err := db.QueryRow(`SELECT 1 WHERE `+tt.s+` <> $1`, tt.v).Scan(&x) + if err != sql.ErrNoRows { + t.Errorf("Expected %v to equal %s, got %v", tt.v, tt.s, err) + } + } +} diff --git a/vendor/github.com/lib/pq/bench_test.go b/vendor/github.com/lib/pq/bench_test.go new file mode 100644 index 0000000..33d7a02 --- /dev/null +++ b/vendor/github.com/lib/pq/bench_test.go @@ -0,0 +1,436 @@ +// +build go1.1 + +package pq + +import ( + "bufio" + "bytes" + "context" + "database/sql" + "database/sql/driver" + "io" + "math/rand" + "net" + "runtime" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/lib/pq/oid" +) + +var ( + selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'" + selectSeriesQuery = "SELECT generate_series(1, 100)" +) + +func BenchmarkSelectString(b *testing.B) { + var result string + benchQuery(b, selectStringQuery, &result) +} + +func BenchmarkSelectSeries(b *testing.B) { + var result int + benchQuery(b, selectSeriesQuery, &result) +} + +func benchQuery(b *testing.B, query string, result interface{}) { + b.StopTimer() + db := openTestConn(b) + defer db.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchQueryLoop(b, db, query, result) + } +} + +func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) { + rows, err := db.Query(query) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + for rows.Next() { + err = rows.Scan(result) + if err != nil { + b.Fatal("failed to scan", err) + } + } +} + +// reading from circularConn yields content[:prefixLen] once, followed by +// content[prefixLen:] over and over again. It never returns EOF. +type circularConn struct { + content string + prefixLen int + pos int + net.Conn // for all other net.Conn methods that will never be called +} + +func (r *circularConn) Read(b []byte) (n int, err error) { + n = copy(b, r.content[r.pos:]) + r.pos += n + if r.pos >= len(r.content) { + r.pos = r.prefixLen + } + return +} + +func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil } + +func (r *circularConn) Close() error { return nil } + +func fakeConn(content string, prefixLen int) *conn { + c := &circularConn{content: content, prefixLen: prefixLen} + return &conn{buf: bufio.NewReader(c), c: c} +} + +// This benchmark is meant to be the same as BenchmarkSelectString, but takes +// out some of the factors this package can't control. The numbers are less noisy, +// but also the costs of network communication aren't accurately represented. +func BenchmarkMockSelectString(b *testing.B) { + b.StopTimer() + // taken from a recorded run of BenchmarkSelectString + // See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html + const response = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + + "2\x00\x00\x00\x04" + + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + + "C\x00\x00\x00\rSELECT 1\x00" + + "Z\x00\x00\x00\x05I" + + "3\x00\x00\x00\x04" + + "Z\x00\x00\x00\x05I" + c := fakeConn(response, 0) + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchMockQuery(b, c, selectStringQuery) + } +} + +var seriesRowData = func() string { + var buf bytes.Buffer + for i := 1; i <= 100; i++ { + digits := byte(2) + if i >= 100 { + digits = 3 + } else if i < 10 { + digits = 1 + } + buf.WriteString("D\x00\x00\x00") + buf.WriteByte(10 + digits) + buf.WriteString("\x00\x01\x00\x00\x00") + buf.WriteByte(digits) + buf.WriteString(strconv.Itoa(i)) + } + return buf.String() +}() + +func BenchmarkMockSelectSeries(b *testing.B) { + b.StopTimer() + var response = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + + "2\x00\x00\x00\x04" + + seriesRowData + + "C\x00\x00\x00\x0fSELECT 100\x00" + + "Z\x00\x00\x00\x05I" + + "3\x00\x00\x00\x04" + + "Z\x00\x00\x00\x05I" + c := fakeConn(response, 0) + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchMockQuery(b, c, selectSeriesQuery) + } +} + +func benchMockQuery(b *testing.B, c *conn, query string) { + stmt, err := c.Prepare(query) + if err != nil { + b.Fatal(err) + } + defer stmt.Close() + rows, err := stmt.(driver.StmtQueryContext).QueryContext(context.Background(), nil) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + var dest [1]driver.Value + for { + if err := rows.Next(dest[:]); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } +} + +func BenchmarkPreparedSelectString(b *testing.B) { + var result string + benchPreparedQuery(b, selectStringQuery, &result) +} + +func BenchmarkPreparedSelectSeries(b *testing.B) { + var result int + benchPreparedQuery(b, selectSeriesQuery, &result) +} + +func benchPreparedQuery(b *testing.B, query string, result interface{}) { + b.StopTimer() + db := openTestConn(b) + defer db.Close() + stmt, err := db.Prepare(query) + if err != nil { + b.Fatal(err) + } + defer stmt.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedQueryLoop(b, db, stmt, result) + } +} + +func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) { + rows, err := stmt.Query() + if err != nil { + b.Fatal(err) + } + if !rows.Next() { + rows.Close() + b.Fatal("no rows") + } + defer rows.Close() + for rows.Next() { + err = rows.Scan(&result) + if err != nil { + b.Fatal("failed to scan") + } + } +} + +// See the comment for BenchmarkMockSelectString. +func BenchmarkMockPreparedSelectString(b *testing.B) { + b.StopTimer() + const parseResponse = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + const responses = parseResponse + + "2\x00\x00\x00\x04" + + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + + "C\x00\x00\x00\rSELECT 1\x00" + + "Z\x00\x00\x00\x05I" + c := fakeConn(responses, len(parseResponse)) + + stmt, err := c.Prepare(selectStringQuery) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedMockQuery(b, c, stmt) + } +} + +func BenchmarkMockPreparedSelectSeries(b *testing.B) { + b.StopTimer() + const parseResponse = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + var responses = parseResponse + + "2\x00\x00\x00\x04" + + seriesRowData + + "C\x00\x00\x00\x0fSELECT 100\x00" + + "Z\x00\x00\x00\x05I" + c := fakeConn(responses, len(parseResponse)) + + stmt, err := c.Prepare(selectSeriesQuery) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedMockQuery(b, c, stmt) + } +} + +func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) { + rows, err := stmt.(driver.StmtQueryContext).QueryContext(context.Background(), nil) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + var dest [1]driver.Value + for { + if err := rows.Next(dest[:]); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } +} + +func BenchmarkEncodeInt64(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, int64(1234), oid.T_int8) + } +} + +func BenchmarkEncodeFloat64(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, 3.14159, oid.T_float8) + } +} + +var testByteString = []byte("abcdefghijklmnopqrstuvwxyz") + +func BenchmarkEncodeByteaHex(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{serverVersion: 90000}, testByteString, oid.T_bytea) + } +} +func BenchmarkEncodeByteaEscape(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{serverVersion: 84000}, testByteString, oid.T_bytea) + } +} + +func BenchmarkEncodeBool(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, true, oid.T_bool) + } +} + +var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local) + +func BenchmarkEncodeTimestamptz(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, testTimestamptz, oid.T_timestamptz) + } +} + +var testIntBytes = []byte("1234") + +func BenchmarkDecodeInt64(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testIntBytes, oid.T_int8, formatText) + } +} + +var testFloatBytes = []byte("3.14159") + +func BenchmarkDecodeFloat64(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testFloatBytes, oid.T_float8, formatText) + } +} + +var testBoolBytes = []byte{'t'} + +func BenchmarkDecodeBool(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testBoolBytes, oid.T_bool, formatText) + } +} + +func TestDecodeBool(t *testing.T) { + db := openTestConn(t) + rows, err := db.Query("select true") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07") + +func BenchmarkDecodeTimestamptz(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) + } +} + +func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) { + oldProcs := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(oldProcs) + runtime.GOMAXPROCS(runtime.NumCPU()) + globalLocationCache = newLocationCache() + + f := func(wg *sync.WaitGroup, loops int) { + defer wg.Done() + for i := 0; i < loops; i++ { + decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) + } + } + + wg := &sync.WaitGroup{} + b.ResetTimer() + for j := 0; j < 10; j++ { + wg.Add(1) + go f(wg, b.N/10) + } + wg.Wait() +} + +func BenchmarkLocationCache(b *testing.B) { + globalLocationCache = newLocationCache() + for i := 0; i < b.N; i++ { + globalLocationCache.getLocation(rand.Intn(10000)) + } +} + +func BenchmarkLocationCacheMultiThread(b *testing.B) { + oldProcs := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(oldProcs) + runtime.GOMAXPROCS(runtime.NumCPU()) + globalLocationCache = newLocationCache() + + f := func(wg *sync.WaitGroup, loops int) { + defer wg.Done() + for i := 0; i < loops; i++ { + globalLocationCache.getLocation(rand.Intn(10000)) + } + } + + wg := &sync.WaitGroup{} + b.ResetTimer() + for j := 0; j < 10; j++ { + wg.Add(1) + go f(wg, b.N/10) + } + wg.Wait() +} + +// Stress test the performance of parsing results from the wire. +func BenchmarkResultParsing(b *testing.B) { + b.StopTimer() + + db := openTestConn(b) + defer db.Close() + _, err := db.Exec("BEGIN") + if err != nil { + b.Fatal(err) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + res, err := db.Query("SELECT generate_series(1, 50000)") + if err != nil { + b.Fatal(err) + } + res.Close() + } +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 0000000..666b001 --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(b.buf, (s + "\000")...) +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/certs/README b/vendor/github.com/lib/pq/certs/README new file mode 100644 index 0000000..24ab7b2 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/README @@ -0,0 +1,3 @@ +This directory contains certificates and private keys for testing some +SSL-related functionality in Travis. Do NOT use these certificates for +anything other than testing. diff --git a/vendor/github.com/lib/pq/certs/bogus_root.crt b/vendor/github.com/lib/pq/certs/bogus_root.crt new file mode 100644 index 0000000..1239db3 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/bogus_root.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDBjCCAe6gAwIBAgIQSnDYp/Naet9HOZljF5PuwDANBgkqhkiG9w0BAQsFADAr +MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0x +NjAyMDcxNjQ0MzdaFw0xNzAyMDYxNjQ0MzdaMCsxEjAQBgNVBAoTCUNvY2tyb2Fj +aDEVMBMGA1UEAxMMQ29ja3JvYWNoIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxdln3/UdgP7ayA/G1kT7upjLe4ERwQjYQ25q0e1+vgsB5jhiirxJ +e0+WkhhYu/mwoSAXzvlsbZ2PWFyfdanZeD/Lh6SvIeWXVVaPcWVWL1TEcoN2jr5+ +E85MMHmbbmaT2he8s6br2tM/UZxyTQ2XRprIzApbDssyw1c0Yufcpu3C6267FLEl +IfcWrzDhnluFhthhtGXv3ToD8IuMScMC5qlKBXtKmD1B5x14ngO/ecNJ+OlEi0HU +mavK4KWgI2rDXRZ2EnCpyTZdkc3kkRnzKcg653oOjMDRZdrhfIrha+Jq38ACsUmZ +Su7Sp5jkIHOCO8Zg+l6GKVSq37dKMapD8wIDAQABoyYwJDAOBgNVHQ8BAf8EBAMC +AuQwEgYDVR0TAQH/BAgwBgEB/wIBATANBgkqhkiG9w0BAQsFAAOCAQEAwZ2Tu0Yu +rrSVdMdoPEjT1IZd+5OhM/SLzL0ddtvTithRweLHsw2lDQYlXFqr24i3UGZJQ1sp +cqSrNwswgLUQT3vWyTjmM51HEb2vMYWKmjZ+sBQYAUP1CadrN/+OTfNGnlF1+B4w +IXOzh7EvQmJJnNybLe4a/aRvj1NE2n8Z898B76SVU9WbfKKz8VwLzuIPDqkKcZda +lMy5yzthyztV9YjcWs2zVOUGZvGdAhDrvZuUq6mSmxrBEvR2LBOggmVf3tGRT+Ls +lW7c9Lrva5zLHuqmoPP07A+vuI9a0D1X44jwGDuPWJ5RnTOQ63Uez12mKNjqleHw +DnkwNanuO8dhAA== +-----END CERTIFICATE----- diff --git a/vendor/github.com/lib/pq/certs/postgresql.crt b/vendor/github.com/lib/pq/certs/postgresql.crt new file mode 100644 index 0000000..6e6b428 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/postgresql.crt @@ -0,0 +1,69 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA + Validity + Not Before: Oct 11 15:10:11 2014 GMT + Not After : Oct 8 15:10:11 2024 GMT + Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pqgosslcert + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (1024 bit) + Modulus (1024 bit): + 00:e3:8c:06:9a:70:54:51:d1:34:34:83:39:cd:a2: + 59:0f:05:ed:8d:d8:0e:34:d0:92:f4:09:4d:ee:8c: + 78:55:49:24:f8:3c:e0:34:58:02:b2:e7:94:58:c1: + e8:e5:bb:d1:af:f6:54:c1:40:b1:90:70:79:0d:35: + 54:9c:8f:16:e9:c2:f0:92:e6:64:49:38:c1:76:f8: + 47:66:c4:5b:4a:b6:a9:43:ce:c8:be:6c:4d:2b:94: + 97:3c:55:bc:d1:d0:6e:b7:53:ae:89:5c:4b:6b:86: + 40:be:c1:ae:1e:64:ce:9c:ae:87:0a:69:e5:c8:21: + 12:be:ae:1d:f6:45:df:16:a7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 9B:25:31:63:A2:D8:06:FF:CB:E3:E9:96:FF:0D:BA:DC:12:7D:04:CF + X509v3 Authority Key Identifier: + keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 3e:f5:f8:0b:4e:11:bd:00:86:1f:ce:dc:97:02:98:91:11:f5: + 65:f6:f2:8a:b2:3e:47:92:05:69:28:c9:e9:b4:f7:cf:93:d1: + 2d:81:5d:00:3c:23:be:da:70:ea:59:e1:2c:d3:25:49:ae:a6: + 95:54:c1:10:df:23:e3:fe:d6:e4:76:c7:6b:73:ad:1b:34:7c: + e2:56:cc:c0:37:ae:c5:7a:11:20:6c:3d:05:0e:99:cd:22:6c: + cf:59:a1:da:28:d4:65:ba:7d:2f:2b:3d:69:6d:a6:c1:ae:57: + bf:56:64:13:79:f8:48:46:65:eb:81:67:28:0b:7b:de:47:10: + b3:80:3c:31:d1:58:94:01:51:4a:c7:c8:1a:01:a8:af:c4:cd: + bb:84:a5:d9:8b:b4:b9:a1:64:3e:95:d9:90:1d:d5:3f:67:cc: + 3b:ba:f5:b4:d1:33:77:ee:c2:d2:3e:7e:c5:66:6e:b7:35:4c: + 60:57:b0:b8:be:36:c8:f3:d3:95:8c:28:4a:c9:f7:27:a4:0d: + e5:96:99:eb:f5:c8:bd:f3:84:6d:ef:02:f9:8a:36:7d:6b:5f: + 36:68:37:41:d9:74:ae:c6:78:2e:44:86:a1:ad:43:ca:fb:b5: + 3e:ba:10:23:09:02:ac:62:d1:d0:83:c8:95:b9:e3:5e:30:ff: + 5b:2b:38:fa +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIBAjANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP +MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp +dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTEwMTFa +Fw0yNDEwMDgxNTEwMTFaMGQxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx +EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx +FDASBgNVBAMTC3BxZ29zc2xjZXJ0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0WAKy55RYwejl +u9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+bE0rlJc8VbzR +0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQABo1owWDAdBgNV +HQ4EFgQUmyUxY6LYBv/L4+mW/w263BJ9BM8wHwYDVR0jBBgwFoAUUpPtHnYKn2VP +3hlmwdUiQDXLoHIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQEL +BQADggEBAD71+AtOEb0Ahh/O3JcCmJER9WX28oqyPkeSBWkoyem098+T0S2BXQA8 +I77acOpZ4SzTJUmuppVUwRDfI+P+1uR2x2tzrRs0fOJWzMA3rsV6ESBsPQUOmc0i +bM9Zodoo1GW6fS8rPWltpsGuV79WZBN5+EhGZeuBZygLe95HELOAPDHRWJQBUUrH +yBoBqK/EzbuEpdmLtLmhZD6V2ZAd1T9nzDu69bTRM3fuwtI+fsVmbrc1TGBXsLi+ +Nsjz05WMKErJ9yekDeWWmev1yL3zhG3vAvmKNn1rXzZoN0HZdK7GeC5EhqGtQ8r7 +tT66ECMJAqxi0dCDyJW5414w/1srOPo= +-----END CERTIFICATE----- diff --git a/vendor/github.com/lib/pq/certs/postgresql.key b/vendor/github.com/lib/pq/certs/postgresql.key new file mode 100644 index 0000000..eb8b20b --- /dev/null +++ b/vendor/github.com/lib/pq/certs/postgresql.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0 +WAKy55RYwejlu9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+ +bE0rlJc8VbzR0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQAB +AoGAM5dM6/kp9P700i8qjOgRPym96Zoh5nGfz/rIE5z/r36NBkdvIg8OVZfR96nH +b0b9TOMR5lsPp0sI9yivTWvX6qyvLJRWy2vvx17hXK9NxXUNTAm0PYZUTvCtcPeX +RnJpzQKNZQPkFzF0uXBc4CtPK2Vz0+FGvAelrhYAxnw1dIkCQQD+9qaW5QhXjsjb +Nl85CmXgxPmGROcgLQCO+omfrjf9UXrituU9Dz6auym5lDGEdMFnkzfr+wpasEy9 +mf5ZZOhDAkEA5HjXfVGaCtpydOt6hDon/uZsyssCK2lQ7NSuE3vP+sUsYMzIpEoy +t3VWXqKbo+g9KNDTP4WEliqp1aiSIylzzQJANPeqzihQnlgEdD4MdD4rwhFJwVIp +Le8Lcais1KaN7StzOwxB/XhgSibd2TbnPpw+3bSg5n5lvUdo+e62/31OHwJAU1jS +I+F09KikQIr28u3UUWT2IzTT4cpVv1AHAQyV3sG3YsjSGT0IK20eyP9BEBZU2WL0 +7aNjrvR5aHxKc5FXsQJABsFtyGpgI5X4xufkJZVZ+Mklz2n7iXa+XPatMAHFxAtb +EEMt60rngwMjXAzBSC6OYuYogRRAY3UCacNC5VhLYQ== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/lib/pq/certs/root.crt b/vendor/github.com/lib/pq/certs/root.crt new file mode 100644 index 0000000..aecf8f6 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/root.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIJANmheROCdW1NMA0GCSqGSIb3DQEBBQUAMF4xCzAJBgNV +BAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGExEjAQBgNVBAcTCUxhcyBWZWdhczEaMBgG +A1UEChMRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMTBXBxIENBMB4XDTE0MTAx +MTE1MDQyOVoXDTI0MTAwODE1MDQyOVowXjELMAkGA1UEBhMCVVMxDzANBgNVBAgT +Bk5ldmFkYTESMBAGA1UEBxMJTGFzIFZlZ2FzMRowGAYDVQQKExFnaXRodWIuY29t +L2xpYi9wcTEOMAwGA1UEAxMFcHEgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCV4PxP7ShzWBzUCThcKk3qZtOLtHmszQVtbqhvgTpm1kTRtKBdVMu0 +pLAHQ3JgJCnAYgH0iZxVGoMP16T3irdgsdC48+nNTFM2T0cCdkfDURGIhSFN47cb +Pgy306BcDUD2q7ucW33+dlFSRuGVewocoh4BWM/vMtMvvWzdi4Ag/L/jhb+5wZxZ +sWymsadOVSDePEMKOvlCa3EdVwVFV40TVyDb+iWBUivDAYsS2a3KajuJrO6MbZiE +Sp2RCIkZS2zFmzWxVRi9ZhzIZhh7EVF9JAaNC3T52jhGUdlRq3YpBTMnd89iOh74 +6jWXG7wSuPj3haFzyNhmJ0ZUh+2Ynoh1AgMBAAGjgcMwgcAwHQYDVR0OBBYEFFKT +7R52Cp9lT94ZZsHVIkA1y6ByMIGQBgNVHSMEgYgwgYWAFFKT7R52Cp9lT94ZZsHV +IkA1y6ByoWKkYDBeMQswCQYDVQQGEwJVUzEPMA0GA1UECBMGTmV2YWRhMRIwEAYD +VQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdpdGh1Yi5jb20vbGliL3BxMQ4wDAYD +VQQDEwVwcSBDQYIJANmheROCdW1NMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF +BQADggEBAAEhCLWkqJNMI8b4gkbmj5fqQ/4+oO83bZ3w2Oqf6eZ8I8BC4f2NOyE6 +tRUlq5+aU7eqC1cOAvGjO+YHN/bF/DFpwLlzvUSXt+JP/pYcUjL7v+pIvwqec9hD +ndvM4iIbkD/H/OYQ3L+N3W+G1x7AcFIX+bGCb3PzYVQAjxreV6//wgKBosMGFbZo +HPxT9RPMun61SViF04H5TNs0derVn1+5eiiYENeAhJzQNyZoOOUuX1X/Inx9bEPh +C5vFBtSMgIytPgieRJVWAiMLYsfpIAStrHztRAbBs2DU01LmMgRvHdxgFEKinC/d +UHZZQDP+6pT+zADrGhQGXe4eThaO6f0= +-----END CERTIFICATE----- diff --git a/vendor/github.com/lib/pq/certs/server.crt b/vendor/github.com/lib/pq/certs/server.crt new file mode 100644 index 0000000..ddc995a --- /dev/null +++ b/vendor/github.com/lib/pq/certs/server.crt @@ -0,0 +1,81 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA + Validity + Not Before: Oct 11 15:05:15 2014 GMT + Not After : Oct 8 15:05:15 2024 GMT + Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=postgres + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (2048 bit) + Modulus (2048 bit): + 00:d7:8a:4c:85:fb:17:a5:3c:8f:e0:72:11:29:ce: + 3f:b0:1f:3f:7d:c6:ee:7f:a7:fc:02:2b:35:47:08: + a6:3d:90:df:5c:56:14:94:00:c7:6d:d1:d2:e2:61: + 95:77:b8:e3:a6:66:31:f9:1f:21:7d:62:e1:27:da: + 94:37:61:4a:ea:63:53:a0:61:b8:9c:bb:a5:e2:e7: + b7:a6:d8:0f:05:04:c7:29:e2:ea:49:2b:7f:de:15: + 00:a6:18:70:50:c7:0c:de:9a:f9:5a:96:b0:e1:94: + 06:c6:6d:4a:21:3b:b4:0f:a5:6d:92:86:34:b2:4e: + d7:0e:a7:19:c0:77:0b:7b:87:c8:92:de:42:ff:86: + d2:b7:9a:a4:d4:15:23:ca:ad:a5:69:21:b8:ce:7e: + 66:cb:85:5d:b9:ed:8b:2d:09:8d:94:e4:04:1e:72: + ec:ef:d0:76:90:15:5a:a4:f7:91:4b:e9:ce:4e:9d: + 5d:9a:70:17:9c:d8:e9:73:83:ea:3d:61:99:a6:cd: + ac:91:40:5a:88:77:e5:4e:2a:8e:3d:13:f3:f9:38: + 6f:81:6b:8a:95:ca:0e:07:ab:6f:da:b4:8c:d9:ff: + aa:78:03:aa:c7:c2:cf:6f:64:92:d3:d8:83:d5:af: + f1:23:18:a7:2e:7b:17:0b:e7:7d:f1:fa:a8:41:a3: + 04:57 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + EE:F0:B3:46:DC:C7:09:EB:0E:B6:2F:E5:FE:62:60:45:44:9F:59:CC + X509v3 Authority Key Identifier: + keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 7e:5a:6e:be:bf:d2:6c:c1:d6:fa:b6:fb:3f:06:53:36:08:87: + 9d:95:b1:39:af:9e:f6:47:38:17:39:da:25:7c:f2:ad:0c:e3: + ab:74:19:ca:fb:8c:a0:50:c0:1d:19:8a:9c:21:ed:0f:3a:d1: + 96:54:2e:10:09:4f:b8:70:f7:2b:99:43:d2:c6:15:bc:3f:24: + 7d:28:39:32:3f:8d:a4:4f:40:75:7f:3e:0d:1c:d1:69:f2:4e: + 98:83:47:97:d2:25:ac:c9:36:86:2f:04:a6:c4:86:c7:c4:00: + 5f:7f:b9:ad:fc:bf:e9:f5:78:d7:82:1a:51:0d:fc:ab:9e:92: + 1d:5f:0c:18:d1:82:e0:14:c9:ce:91:89:71:ff:49:49:ff:35: + bf:7b:44:78:42:c1:d0:66:65:bb:28:2e:60:ca:9b:20:12:a9: + 90:61:b1:96:ec:15:46:c9:37:f7:07:90:8a:89:45:2a:3f:37: + ec:dc:e3:e5:8f:c3:3a:57:80:a5:54:60:0c:e1:b2:26:99:2b: + 40:7e:36:d1:9a:70:02:ec:63:f4:3b:72:ae:81:fb:30:20:6d: + cb:48:46:c6:b5:8f:39:b1:84:05:25:55:8d:f5:62:f6:1b:46: + 2e:da:a3:4c:26:12:44:d7:56:b6:b8:a9:ca:d3:ab:71:45:7c: + 9f:48:6d:1e +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIBATANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP +MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp +dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTA1MTVa +Fw0yNDEwMDgxNTA1MTVaMGExCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx +EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx +ETAPBgNVBAMTCHBvc3RncmVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYUlADHbdHS4mGV +d7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLqSSt/3hUAphhw +UMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C/4bSt5qk1BUj +yq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1dmnAXnNjpc4Pq +PWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOqx8LPb2SS09iD +1a/xIxinLnsXC+d98fqoQaMEVwIDAQABo1owWDAdBgNVHQ4EFgQU7vCzRtzHCesO +ti/l/mJgRUSfWcwwHwYDVR0jBBgwFoAUUpPtHnYKn2VP3hlmwdUiQDXLoHIwCQYD +VR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQELBQADggEBAH5abr6/0mzB +1vq2+z8GUzYIh52VsTmvnvZHOBc52iV88q0M46t0Gcr7jKBQwB0Zipwh7Q860ZZU +LhAJT7hw9yuZQ9LGFbw/JH0oOTI/jaRPQHV/Pg0c0WnyTpiDR5fSJazJNoYvBKbE +hsfEAF9/ua38v+n1eNeCGlEN/Kuekh1fDBjRguAUyc6RiXH/SUn/Nb97RHhCwdBm +ZbsoLmDKmyASqZBhsZbsFUbJN/cHkIqJRSo/N+zc4+WPwzpXgKVUYAzhsiaZK0B+ +NtGacALsY/Q7cq6B+zAgbctIRsa1jzmxhAUlVY31YvYbRi7ao0wmEkTXVra4qcrT +q3FFfJ9IbR4= +-----END CERTIFICATE----- diff --git a/vendor/github.com/lib/pq/certs/server.key b/vendor/github.com/lib/pq/certs/server.key new file mode 100644 index 0000000..bd7b019 --- /dev/null +++ b/vendor/github.com/lib/pq/certs/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYU +lADHbdHS4mGVd7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLq +SSt/3hUAphhwUMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C +/4bSt5qk1BUjyq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1d +mnAXnNjpc4PqPWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOq +x8LPb2SS09iD1a/xIxinLnsXC+d98fqoQaMEVwIDAQABAoIBAF3ZoihUhJ82F4+r +Gz4QyDpv4L1reT2sb1aiabhcU8ZK5nbWJG+tRyjSS/i2dNaEcttpdCj9HR/zhgZM +bm0OuAgG58rVwgS80CZUruq++Qs+YVojq8/gWPTiQD4SNhV2Fmx3HkwLgUk3oxuT +SsvdqzGE3okGVrutCIcgy126eA147VPMoej1Bb3fO6npqK0pFPhZfAc0YoqJuM+k +obRm5pAnGUipyLCFXjA9HYPKwYZw2RtfdA3CiImHeanSdqS+ctrC9y8BV40Th7gZ +haXdKUNdjmIxV695QQ1mkGqpKLZFqhzKioGQ2/Ly2d1iaKN9fZltTusu8unepWJ2 +tlT9qMECgYEA9uHaF1t2CqE+AJvWTihHhPIIuLxoOQXYea1qvxfcH/UMtaLKzCNm +lQ5pqCGsPvp+10f36yttO1ZehIvlVNXuJsjt0zJmPtIolNuJY76yeussfQ9jHheB +5uPEzCFlHzxYbBUyqgWaF6W74okRGzEGJXjYSP0yHPPdU4ep2q3bGiUCgYEA34Af +wBSuQSK7uLxArWHvQhyuvi43ZGXls6oRGl+Ysj54s8BP6XGkq9hEJ6G4yxgyV+BR +DUOs5X8/TLT8POuIMYvKTQthQyCk0eLv2FLdESDuuKx0kBVY3s8lK3/z5HhrdOiN +VMNZU+xDKgKc3hN9ypkk8vcZe6EtH7Y14e0rVcsCgYBTgxi8F/M5K0wG9rAqphNz +VFBA9XKn/2M33cKjO5X5tXIEKzpAjaUQvNxexG04rJGljzG8+mar0M6ONahw5yD1 +O7i/XWgazgpuOEkkVYiYbd8RutfDgR4vFVMn3hAP3eDnRtBplRWH9Ec3HTiNIys6 +F8PKBOQjyRZQQC7jyzW3hQKBgACe5HeuFwXLSOYsb6mLmhR+6+VPT4wR1F95W27N +USk9jyxAnngxfpmTkiziABdgS9N+pfr5cyN4BP77ia/Jn6kzkC5Cl9SN5KdIkA3z +vPVtN/x/ThuQU5zaymmig1ThGLtMYggYOslG4LDfLPxY5YKIhle+Y+259twdr2yf +Mf2dAoGAaGv3tWMgnIdGRk6EQL/yb9PKHo7ShN+tKNlGaK7WwzBdKs+Fe8jkgcr7 +pz4Ne887CmxejdISzOCcdT+Zm9Bx6I/uZwWOtDvWpIgIxVX9a9URj/+D1MxTE/y4 +d6H+c89yDY62I2+drMpdjCd3EtCaTlxpTbRS+s1eAHMH7aEkcCE= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 0000000..43c8df2 --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,1854 @@ +package pq + +import ( + "bufio" + "crypto/md5" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/lib/pq/oid" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") +) + +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d *Driver) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &Driver{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +type defaultDialer struct{} + +func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) { + return net.Dial(ntw, addr) +} +func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout(ntw, addr, timeout) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If true, this connection is bad and all public-facing functions should + // return ErrBadConn. + bad bool + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool +} + +// Handle driver-side settings in parsed connection string. +func (cn *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value, ok := o[key]; ok { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) + if err != nil { + return err + } + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } + } +} + +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b + return &writeBuf{ + buf: cn.scratch[:5], + pos: 1, + } +} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(name string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, name) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, name string) (_ driver.Conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") { + name, err = ParseURL(name) + if err != nil { + return nil, err + } + } + + if err := parseOpts(name, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v", + "ISO, MDY", datestyle)) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + cn := &conn{ + opts: o, + dialer: d, + } + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(d, o) + if err != nil { + return nil, err + } + + err = cn.ssl(o) + if err != nil { + return nil, err + } + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + panicking = false + return cn, err +} + +func dial(d Dialer, o values) (net.Conn, error) { + ntw, addr := network(o) + // SSL is not necessary or supported over UNIX domain sockets + if ntw == "unix" { + o["sslmode"] = "disable" + } + + // Zero or not specified means wait indefinitely. + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + conn, err := d.DialTimeout(ntw, addr, duration) + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + return d.Dial(ntw, addr) +} + +func network(o values) (string, string) { + host := o["host"] + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o["port"]) +} + +type values map[string]string + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o[string(keyRunes)] = "" + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o[string(keyRunes)] = string(valRunes) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.bad = true + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN" + mode) + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.bad = true + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.bad = true + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + +func (cn *conn) Commit() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.Rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "COMMIT" { + cn.bad = true + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + // done + return + case 'E': + err = parseError(r) + case 'I': + res = emptyRows + case 'T', 'D': + // ignore any results + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.bad = true + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' && res.colNames == nil { + res.result, res.tag = cn.parseComplete(r.string()) + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.bad = true + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, t := range colTyps { + switch t.OID { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + fallthrough + case oid.T_uuid: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + // Skip cn.bad return here because we always want to close a connection. + defer cn.errRecover(&err) + + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + return cn.sendSimpleMessage('X') +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + if cn.inCopy { + return nil, errCopyInProgress + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + }, nil +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err +} + +func (cn *conn) send(m *writeBuf) { + _, err := cn.c.Write(m.wrap()) + if err != nil { + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.bad = true + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'E': + panic(parseError(r)) + case 'N': + // ignore + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A', 'N': + // ignore + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err + } + + if upgrade == nil { + // Nothing to do + return nil + } + + w := cn.writeBuf(0) + w.int32(80877103) + if err = cn.sendStartupPacket(w); err != nil { + return err + } + + b := cn.scratch[:1] + _, err = io.ReadFull(cn.c, b) + if err != nil { + return err + } + + if b[0] != 'S' { + return ErrSSLNotSupported + } + + cn.c, err = upgrade(cn.c) + return err +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } + + for { + t, r := cn.recv() + switch t { + case 'K': + cn.processBackendKeyData(r) + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o["password"]) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText = []byte{0, 0} + +type stmt struct { + cn *conn + name string + colNames []string + colFmts []format + colFmtData []byte + colTyps []fieldDesc + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if st.cn.bad { + return driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.bad = true + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.bad = true + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.bad = true + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.bad = true + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rows struct { + cn *conn + finish func() + colNames []string + colTyps []fieldDesc + colFmts []format + done bool + rb readBuf + result driver.Result + tag string +} + +func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if conn.bad { + return driver.ErrBadConn + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.bad = true + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) + } + return + case 'T': + rs.colNames, rs.colFmts, rs.colTyps = parsePortalRowDescribe(&rs.rb) + return io.EOF + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +func (rs *rows) HasNextResultSet() bool { + return !rs.done +} + +func (rs *rows) NextResultSet() error { + return nil +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (cn *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + var minor int + _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + if err == nil { + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + } + + case "TimeZone": + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + cn.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.bad = true + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []fieldDesc) { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return nil, nil, nil + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.bad = true + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.bad = true + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.bad = true + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } + // ignore any results + default: + cn.bad = true + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colFmts = make([]format, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + colFmts[i] = format(r.int16()) + } + return +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 0000000..a5254f2 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,131 @@ +// +build go1.8 + +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + _ = cn.cancel() + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel() error { + c, err := dial(cn.dialer, cn.opts) + if err != nil { + return err + } + defer c.Close() + + { + can := conn{ + c: c, + } + err = can.ssl(cn.opts) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} diff --git a/vendor/github.com/lib/pq/conn_test.go b/vendor/github.com/lib/pq/conn_test.go new file mode 100644 index 0000000..e654b85 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_test.go @@ -0,0 +1,1659 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "net" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func forceBinaryParameters() bool { + bp := os.Getenv("PQTEST_BINARY_PARAMETERS") + if bp == "yes" { + return true + } else if bp == "" || bp == "no" { + return false + } else { + panic("unexpected value for PQTEST_BINARY_PARAMETERS") + } +} + +func testConninfo(conninfo string) string { + defaultTo := func(envvar string, value string) { + if os.Getenv(envvar) == "" { + os.Setenv(envvar, value) + } + } + defaultTo("PGDATABASE", "pqgotest") + defaultTo("PGSSLMODE", "disable") + defaultTo("PGCONNECT_TIMEOUT", "20") + + if forceBinaryParameters() && + !strings.HasPrefix(conninfo, "postgres://") && + !strings.HasPrefix(conninfo, "postgresql://") { + conninfo = conninfo + " binary_parameters=yes" + } + return conninfo +} + +func openTestConnConninfo(conninfo string) (*sql.DB, error) { + return sql.Open("postgres", testConninfo(conninfo)) +} + +func openTestConn(t Fatalistic) *sql.DB { + conn, err := openTestConnConninfo("") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func getServerVersion(t *testing.T, db *sql.DB) int { + var version int + err := db.QueryRow("SHOW server_version_num").Scan(&version) + if err != nil { + t.Fatal(err) + } + return version +} + +func TestReconnect(t *testing.T) { + db1 := openTestConn(t) + defer db1.Close() + tx, err := db1.Begin() + if err != nil { + t.Fatal(err) + } + var pid1 int + err = tx.QueryRow("SELECT pg_backend_pid()").Scan(&pid1) + if err != nil { + t.Fatal(err) + } + db2 := openTestConn(t) + defer db2.Close() + _, err = db2.Exec("SELECT pg_terminate_backend($1)", pid1) + if err != nil { + t.Fatal(err) + } + // The rollback will probably "fail" because we just killed + // its connection above + _ = tx.Rollback() + + const expected int = 42 + var result int + err = db1.QueryRow(fmt.Sprintf("SELECT %d", expected)).Scan(&result) + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Errorf("got %v; expected %v", result, expected) + } +} + +func TestCommitInFailedTransaction(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + rows, err := txn.Query("SELECT error") + if err == nil { + rows.Close() + t.Fatal("expected failure") + } + err = txn.Commit() + if err != ErrInFailedTransaction { + t.Fatalf("expected ErrInFailedTransaction; got %#v", err) + } +} + +func TestOpenURL(t *testing.T) { + testURL := func(url string) { + db, err := openTestConnConninfo(url) + if err != nil { + t.Fatal(err) + } + defer db.Close() + // database/sql might not call our Open at all unless we do something with + // the connection + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + txn.Rollback() + } + testURL("postgres://") + testURL("postgresql://") +} + +const pgpassFile = "/tmp/pqgotest_pgpass" + +func TestPgpass(t *testing.T) { + if os.Getenv("TRAVIS") != "true" { + t.Skip("not running under Travis, skipping pgpass tests") + } + + testAssert := func(conninfo string, expected string, reason string) { + conn, err := openTestConnConninfo(conninfo) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + txn, err := conn.Begin() + if err != nil { + if expected != "fail" { + t.Fatalf(reason, err) + } + return + } + rows, err := txn.Query("SELECT USER") + if err != nil { + txn.Rollback() + if expected != "fail" { + t.Fatalf(reason, err) + } + } else { + rows.Close() + if expected != "ok" { + t.Fatalf(reason, err) + } + } + txn.Rollback() + } + testAssert("", "ok", "missing .pgpass, unexpected error %#v") + os.Setenv("PGPASSFILE", pgpassFile) + testAssert("host=/tmp", "fail", ", unexpected error %#v") + os.Remove(pgpassFile) + pgpass, err := os.OpenFile(pgpassFile, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + t.Fatalf("Unexpected error writing pgpass file %#v", err) + } + _, err = pgpass.WriteString(`# comment +server:5432:some_db:some_user:pass_A +*:5432:some_db:some_user:pass_B +localhost:*:*:*:pass_C +*:*:*:*:pass_fallback +`) + if err != nil { + t.Fatalf("Unexpected error writing pgpass file %#v", err) + } + pgpass.Close() + + assertPassword := func(extra values, expected string) { + o := values{ + "host": "localhost", + "sslmode": "disable", + "connect_timeout": "20", + "user": "majid", + "port": "5432", + "extra_float_digits": "2", + "dbname": "pqgotest", + "client_encoding": "UTF8", + "datestyle": "ISO, MDY", + } + for k, v := range extra { + o[k] = v + } + (&conn{}).handlePgpass(o) + if pw := o["password"]; pw != expected { + t.Fatalf("For %v expected %s got %s", extra, expected, pw) + } + } + // wrong permissions for the pgpass file means it should be ignored + assertPassword(values{"host": "example.com", "user": "foo"}, "") + // fix the permissions and check if it has taken effect + os.Chmod(pgpassFile, 0600) + assertPassword(values{"host": "server", "dbname": "some_db", "user": "some_user"}, "pass_A") + assertPassword(values{"host": "example.com", "user": "foo"}, "pass_fallback") + assertPassword(values{"host": "example.com", "dbname": "some_db", "user": "some_user"}, "pass_B") + // localhost also matches the default "" and UNIX sockets + assertPassword(values{"host": "", "user": "some_user"}, "pass_C") + assertPassword(values{"host": "/tmp", "user": "some_user"}, "pass_C") + // cleanup + os.Remove(pgpassFile) + os.Setenv("PGPASSFILE", "") +} + +func TestExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + r, err := db.Exec("INSERT INTO temp VALUES (1)") + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 1 { + t.Fatalf("expected 1 row affected, not %d", n) + } + + r, err = db.Exec("INSERT INTO temp VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + + // SELECT doesn't send the number of returned rows in the command tag + // before 9.0 + if getServerVersion(t, db) >= 90000 { + r, err = db.Exec("SELECT g FROM generate_series(1, 2) g") + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 2 { + t.Fatalf("expected 2 rows affected, not %d", n) + } + + r, err = db.Exec("SELECT g FROM generate_series(1, $1) g", 3) + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + } +} + +func TestStatment(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1") + if err != nil { + t.Fatal(err) + } + + st1, err := db.Prepare("SELECT 2") + if err != nil { + t.Fatal(err) + } + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } + + var i int + err = r.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } + + // st1 + + r1, err := st1.Query() + if err != nil { + t.Fatal(err) + } + defer r1.Close() + + if !r1.Next() { + if r.Err() != nil { + t.Fatal(r1.Err()) + } + t.Fatal("expected row") + } + + err = r1.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 2 { + t.Fatalf("expected 2, got %d", i) + } +} + +func TestRowsCloseBeforeDone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + + err = r.Close() + if err != nil { + t.Fatal(err) + } + + if r.Next() { + t.Fatal("unexpected row") + } + + if r.Err() != nil { + t.Fatal(r.Err()) + } +} + +func TestParameterCountMismatch(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var notused int + err := db.QueryRow("SELECT false", 1).Scan(¬used) + if err == nil { + t.Fatal("expected err") + } + // make sure we clean up correctly + err = db.QueryRow("SELECT 1").Scan(¬used) + if err != nil { + t.Fatal(err) + } + + err = db.QueryRow("SELECT $1").Scan(¬used) + if err == nil { + t.Fatal("expected err") + } + // make sure we clean up correctly + err = db.QueryRow("SELECT 1").Scan(¬used) + if err != nil { + t.Fatal(err) + } +} + +// Test that EmptyQueryResponses are handled correctly. +func TestEmptyQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + res, err := db.Exec("") + if err != nil { + t.Fatal(err) + } + if _, err := res.RowsAffected(); err != errNoRowsAffected { + t.Fatalf("expected %s, got %v", errNoRowsAffected, err) + } + if _, err := res.LastInsertId(); err != errNoLastInsertID { + t.Fatalf("expected %s, got %v", errNoLastInsertID, err) + } + rows, err := db.Query("") + if err != nil { + t.Fatal(err) + } + cols, err := rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 0 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + + stmt, err := db.Prepare("") + if err != nil { + t.Fatal(err) + } + res, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + if _, err := res.RowsAffected(); err != errNoRowsAffected { + t.Fatalf("expected %s, got %v", errNoRowsAffected, err) + } + if _, err := res.LastInsertId(); err != errNoLastInsertID { + t.Fatalf("expected %s, got %v", errNoLastInsertID, err) + } + rows, err = stmt.Query() + if err != nil { + t.Fatal(err) + } + cols, err = rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 0 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } +} + +// Test that rows.Columns() is correct even if there are no result rows. +func TestEmptyResultSetColumns(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar WHERE FALSE") + if err != nil { + t.Fatal(err) + } + cols, err := rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 2 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + if cols[0] != "a" || cols[1] != "bar" { + t.Fatalf("unexpected Columns result %v", cols) + } + + stmt, err := db.Prepare("SELECT $1::int AS a, text 'bar' AS bar WHERE FALSE") + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query(1) + if err != nil { + t.Fatal(err) + } + cols, err = rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 2 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + if cols[0] != "a" || cols[1] != "bar" { + t.Fatalf("unexpected Columns result %v", cols) + } + +} + +func TestEncodeDecode(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + q := ` + SELECT + E'\\000\\001\\002'::bytea, + 'foobar'::text, + NULL::integer, + '2000-1-1 01:02:03.04-7'::timestamptz, + 0::boolean, + 123, + -321, + 3.14::float8 + WHERE + E'\\000\\001\\002'::bytea = $1 + AND 'foobar'::text = $2 + AND $3::integer is NULL + ` + // AND '2000-1-1 12:00:00.000000-7'::timestamp = $3 + + exp1 := []byte{0, 1, 2} + exp2 := "foobar" + + r, err := db.Query(q, exp1, exp2, nil) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("expected row") + } + + var got1 []byte + var got2 string + var got3 = sql.NullInt64{Valid: true} + var got4 time.Time + var got5, got6, got7, got8 interface{} + + err = r.Scan(&got1, &got2, &got3, &got4, &got5, &got6, &got7, &got8) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(exp1, got1) { + t.Errorf("expected %q byte: %q", exp1, got1) + } + + if !reflect.DeepEqual(exp2, got2) { + t.Errorf("expected %q byte: %q", exp2, got2) + } + + if got3.Valid { + t.Fatal("expected invalid") + } + + if got4.Year() != 2000 { + t.Fatal("wrong year") + } + + if got5 != false { + t.Fatalf("expected false, got %q", got5) + } + + if got6 != int64(123) { + t.Fatalf("expected 123, got %d", got6) + } + + if got7 != int64(-321) { + t.Fatalf("expected -321, got %d", got7) + } + + if got8 != float64(3.14) { + t.Fatalf("expected 3.14, got %f", got8) + } +} + +func TestNoData(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1 WHERE true = false") + if err != nil { + t.Fatal(err) + } + defer st.Close() + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("unexpected row") + } + + _, err = db.Query("SELECT * FROM nonexistenttable WHERE age=$1", 20) + if err == nil { + t.Fatal("Should have raised an error on non existent table") + } + + _, err = db.Query("SELECT * FROM nonexistenttable") + if err == nil { + t.Fatal("Should have raised an error on non existent table") + } +} + +func TestErrorDuringStartup(t *testing.T) { + // Don't use the normal connection setup, this is intended to + // blow up in the startup packet from a non-existent user. + db, err := openTestConnConninfo("user=thisuserreallydoesntexist") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + _, err = db.Begin() + if err == nil { + t.Fatal("expected error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "invalid_authorization_specification" && e.Code.Name() != "invalid_password" { + t.Fatalf("expected invalid_authorization_specification or invalid_password, got %s (%+v)", e.Code.Name(), err) + } +} + +type testConn struct { + closed bool + net.Conn +} + +func (c *testConn) Close() error { + c.closed = true + return c.Conn.Close() +} + +type testDialer struct { + conns []*testConn +} + +func (d *testDialer) Dial(ntw, addr string) (net.Conn, error) { + c, err := net.Dial(ntw, addr) + if err != nil { + return nil, err + } + tc := &testConn{Conn: c} + d.conns = append(d.conns, tc) + return tc, nil +} + +func (d *testDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) { + c, err := net.DialTimeout(ntw, addr, timeout) + if err != nil { + return nil, err + } + tc := &testConn{Conn: c} + d.conns = append(d.conns, tc) + return tc, nil +} + +func TestErrorDuringStartupClosesConn(t *testing.T) { + // Don't use the normal connection setup, this is intended to + // blow up in the startup packet from a non-existent user. + var d testDialer + c, err := DialOpen(&d, testConninfo("user=thisuserreallydoesntexist")) + if err == nil { + c.Close() + t.Fatal("expected dial error") + } + if len(d.conns) != 1 { + t.Fatalf("got len(d.conns) = %d, want = %d", len(d.conns), 1) + } + if !d.conns[0].closed { + t.Error("connection leaked") + } +} + +func TestBadConn(t *testing.T) { + var err error + + cn := conn{} + func() { + defer cn.errRecover(&err) + panic(io.EOF) + }() + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + if !cn.bad { + t.Fatalf("expected cn.bad") + } + + cn = conn{} + func() { + defer cn.errRecover(&err) + e := &Error{Severity: Efatal} + panic(e) + }() + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + if !cn.bad { + t.Fatalf("expected cn.bad") + } +} + +// TestCloseBadConn tests that the underlying connection can be closed with +// Close after an error. +func TestCloseBadConn(t *testing.T) { + nc, err := net.Dial("tcp", "localhost:5432") + if err != nil { + t.Fatal(err) + } + cn := conn{c: nc} + func() { + defer cn.errRecover(&err) + panic(io.EOF) + }() + // Verify we can write before closing. + if _, err := nc.Write(nil); err != nil { + t.Fatal(err) + } + // First close should close the connection. + if err := cn.Close(); err != nil { + t.Fatal(err) + } + + // During the Go 1.9 cycle, https://github.com/golang/go/commit/3792db5 + // changed this error from + // + // net.errClosing = errors.New("use of closed network connection") + // + // to + // + // internal/poll.ErrClosing = errors.New("use of closed file or network connection") + const errClosing = "use of closed" + + // Verify write after closing fails. + if _, err := nc.Write(nil); err == nil { + t.Fatal("expected error") + } else if !strings.Contains(err.Error(), errClosing) { + t.Fatalf("expected %s error, got %s", errClosing, err) + } + // Verify second close fails. + if err := cn.Close(); err == nil { + t.Fatal("expected error") + } else if !strings.Contains(err.Error(), errClosing) { + t.Fatalf("expected %s error, got %s", errClosing, err) + } +} + +func TestErrorOnExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec("INSERT INTO foo VALUES (0), (0)") + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestErrorOnQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Query("INSERT INTO foo VALUES (0), (0)") + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestErrorOnQueryRowSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + var v int + err = txn.QueryRow("INSERT INTO foo VALUES (0), (0)").Scan(&v) + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +// Test the QueryRow bug workarounds in stmt.exec() and simpleQuery() +func TestQueryRowBugWorkaround(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // stmt.exec() + _, err := db.Exec("CREATE TEMP TABLE notnulltemp (a varchar(10) not null)") + if err != nil { + t.Fatal(err) + } + + var a string + err = db.QueryRow("INSERT INTO notnulltemp(a) values($1) RETURNING a", nil).Scan(&a) + if err == sql.ErrNoRows { + t.Fatalf("expected constraint violation error; got: %v", err) + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "not_null_violation" { + t.Fatalf("expected not_null_violation; got: %s (%+v)", pge.Code.Name(), err) + } + + // Test workaround in simpleQuery() + tx, err := db.Begin() + if err != nil { + t.Fatalf("unexpected error %s in Begin", err) + } + defer tx.Rollback() + + _, err = tx.Exec("SET LOCAL check_function_bodies TO FALSE") + if err != nil { + t.Fatalf("could not disable check_function_bodies: %s", err) + } + _, err = tx.Exec(` +CREATE OR REPLACE FUNCTION bad_function() +RETURNS integer +-- hack to prevent the function from being inlined +SET check_function_bodies TO TRUE +AS $$ + SELECT text 'bad' +$$ LANGUAGE sql`) + if err != nil { + t.Fatalf("could not create function: %s", err) + } + + err = tx.QueryRow("SELECT * FROM bad_function()").Scan(&a) + if err == nil { + t.Fatalf("expected error") + } + pge, ok = err.(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "invalid_function_definition" { + t.Fatalf("expected invalid_function_definition; got: %s (%+v)", pge.Code.Name(), err) + } + + err = tx.Rollback() + if err != nil { + t.Fatalf("unexpected error %s in Rollback", err) + } + + // Also test that simpleQuery()'s workaround works when the query fails + // after a row has been received. + rows, err := db.Query(` +select + (select generate_series(1, ss.i)) +from (select gs.i + from generate_series(1, 2) gs(i) + order by gs.i limit 2) ss`) + if err != nil { + t.Fatalf("query failed: %s", err) + } + if !rows.Next() { + t.Fatalf("expected at least one result row; got %s", rows.Err()) + } + var i int + err = rows.Scan(&i) + if err != nil { + t.Fatalf("rows.Scan() failed: %s", err) + } + if i != 1 { + t.Fatalf("unexpected value for i: %d", i) + } + if rows.Next() { + t.Fatalf("unexpected row") + } + pge, ok = rows.Err().(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "cardinality_violation" { + t.Fatalf("expected cardinality_violation; got: %s (%+v)", pge.Code.Name(), rows.Err()) + } +} + +func TestSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("select 1") + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } +} + +func TestBindError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("create temp table test (i integer)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Query("select * from test where i=$1", "hhh") + if err == nil { + t.Fatal("expected an error") + } + + // Should not get error here + r, err := db.Query("select * from test where i=$1", 1) + if err != nil { + t.Fatal(err) + } + defer r.Close() +} + +func TestParseErrorInExtendedQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Query("PARSE_ERROR $1", 1) + pqErr, _ := err.(*Error) + // Expecting a syntax error. + if err == nil || pqErr == nil || pqErr.Code != "42601" { + t.Fatalf("expected syntax error, got %s", err) + } + + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// TestReturning tests that an INSERT query using the RETURNING clause returns a row. +func TestReturning(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE distributors (did integer default 0, dname text)") + if err != nil { + t.Fatal(err) + } + + rows, err := db.Query("INSERT INTO distributors (did, dname) VALUES (DEFAULT, 'XYZ Widgets') " + + "RETURNING did;") + if err != nil { + t.Fatal(err) + } + if !rows.Next() { + t.Fatal("no rows") + } + var did int + err = rows.Scan(&did) + if err != nil { + t.Fatal(err) + } + if did != 0 { + t.Fatalf("bad value for did: got %d, want %d", did, 0) + } + + if rows.Next() { + t.Fatal("unexpected next row") + } + err = rows.Err() + if err != nil { + t.Fatal(err) + } +} + +func TestIssue186(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // Exec() a query which returns results + _, err := db.Exec("VALUES (1), (2), (3)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + // Query() a query which doesn't return any results + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + rows, err := txn.Query("CREATE TEMP TABLE foo(f1 int)") + if err != nil { + t.Fatal(err) + } + if err = rows.Close(); err != nil { + t.Fatal(err) + } + + // small trick to get NoData from a parameterized query + _, err = txn.Exec("CREATE RULE nodata AS ON INSERT TO foo DO INSTEAD NOTHING") + if err != nil { + t.Fatal(err) + } + rows, err = txn.Query("INSERT INTO foo VALUES ($1)", 1) + if err != nil { + t.Fatal(err) + } + if err = rows.Close(); err != nil { + t.Fatal(err) + } +} + +func TestIssue196(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + row := db.QueryRow("SELECT float4 '0.10000122' = $1, float8 '35.03554004971999' = $2", + float32(0.10000122), float64(35.03554004971999)) + + var float4match, float8match bool + err := row.Scan(&float4match, &float8match) + if err != nil { + t.Fatal(err) + } + if !float4match { + t.Errorf("Expected float4 fidelity to be maintained; got no match") + } + if !float8match { + t.Errorf("Expected float8 fidelity to be maintained; got no match") + } +} + +// Test that any CommandComplete messages sent before the query results are +// ignored. +func TestIssue282(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var searchPath string + err := db.QueryRow(` + SET LOCAL search_path TO pg_catalog; + SET LOCAL search_path TO pg_catalog; + SHOW search_path`).Scan(&searchPath) + if err != nil { + t.Fatal(err) + } + if searchPath != "pg_catalog" { + t.Fatalf("unexpected search_path %s", searchPath) + } +} + +func TestReadFloatPrecision(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + row := db.QueryRow("SELECT float4 '0.10000122', float8 '35.03554004971999'") + var float4val float32 + var float8val float64 + err := row.Scan(&float4val, &float8val) + if err != nil { + t.Fatal(err) + } + if float4val != float32(0.10000122) { + t.Errorf("Expected float4 fidelity to be maintained; got no match") + } + if float8val != float64(35.03554004971999) { + t.Errorf("Expected float8 fidelity to be maintained; got no match") + } +} + +func TestXactMultiStmt(t *testing.T) { + // minified test case based on bug reports from + // pico303@gmail.com and rangelspam@gmail.com + t.Skip("Skipping failing test") + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Commit() + + rows, err := tx.Query("select 1") + if err != nil { + t.Fatal(err) + } + + if rows.Next() { + var val int32 + if err = rows.Scan(&val); err != nil { + t.Fatal(err) + } + } else { + t.Fatal("Expected at least one row in first query in xact") + } + + rows2, err := tx.Query("select 2") + if err != nil { + t.Fatal(err) + } + + if rows2.Next() { + var val2 int32 + if err := rows2.Scan(&val2); err != nil { + t.Fatal(err) + } + } else { + t.Fatal("Expected at least one row in second query in xact") + } + + if err = rows.Err(); err != nil { + t.Fatal(err) + } + + if err = rows2.Err(); err != nil { + t.Fatal(err) + } + + if err = tx.Commit(); err != nil { + t.Fatal(err) + } +} + +var envParseTests = []struct { + Expected map[string]string + Env []string +}{ + { + Env: []string{"PGDATABASE=hello", "PGUSER=goodbye"}, + Expected: map[string]string{"dbname": "hello", "user": "goodbye"}, + }, + { + Env: []string{"PGDATESTYLE=ISO, MDY"}, + Expected: map[string]string{"datestyle": "ISO, MDY"}, + }, + { + Env: []string{"PGCONNECT_TIMEOUT=30"}, + Expected: map[string]string{"connect_timeout": "30"}, + }, +} + +func TestParseEnviron(t *testing.T) { + for i, tt := range envParseTests { + results := parseEnviron(tt.Env) + if !reflect.DeepEqual(tt.Expected, results) { + t.Errorf("%d: Expected: %#v Got: %#v", i, tt.Expected, results) + } + } +} + +func TestParseComplete(t *testing.T) { + tpc := func(commandTag string, command string, affectedRows int64, shouldFail bool) { + defer func() { + if p := recover(); p != nil { + if !shouldFail { + t.Error(p) + } + } + }() + cn := &conn{} + res, c := cn.parseComplete(commandTag) + if c != command { + t.Errorf("Expected %v, got %v", command, c) + } + n, err := res.RowsAffected() + if err != nil { + t.Fatal(err) + } + if n != affectedRows { + t.Errorf("Expected %d, got %d", affectedRows, n) + } + } + + tpc("ALTER TABLE", "ALTER TABLE", 0, false) + tpc("INSERT 0 1", "INSERT", 1, false) + tpc("UPDATE 100", "UPDATE", 100, false) + tpc("SELECT 100", "SELECT", 100, false) + tpc("FETCH 100", "FETCH", 100, false) + // allow COPY (and others) without row count + tpc("COPY", "COPY", 0, false) + // don't fail on command tags we don't recognize + tpc("UNKNOWNCOMMANDTAG", "UNKNOWNCOMMANDTAG", 0, false) + + // failure cases + tpc("INSERT 1", "", 0, true) // missing oid + tpc("UPDATE 0 1", "", 0, true) // too many numbers + tpc("SELECT foo", "", 0, true) // invalid row count +} + +// Test interface conformance. +var ( + _ driver.ExecerContext = (*conn)(nil) + _ driver.QueryerContext = (*conn)(nil) +) + +func TestNullAfterNonNull(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 9::integer UNION SELECT NULL::integer") + if err != nil { + t.Fatal(err) + } + + var n sql.NullInt64 + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Int64 != 9 { + t.Fatalf("expected 2, not %d", n.Int64) + } + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Valid { + t.Fatal("expected n to be invalid") + } + + if n.Int64 != 0 { + t.Fatalf("expected n to 2, not %d", n.Int64) + } +} + +func Test64BitErrorChecking(t *testing.T) { + defer func() { + if err := recover(); err != nil { + t.Fatal("panic due to 0xFFFFFFFF != -1 " + + "when int is 64 bits") + } + }() + + db := openTestConn(t) + defer db.Close() + + r, err := db.Query(`SELECT * +FROM (VALUES (0::integer, NULL::text), (1, 'test string')) AS t;`) + + if err != nil { + t.Fatal(err) + } + + defer r.Close() + + for r.Next() { + } +} + +func TestCommit(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + sqlInsert := "INSERT INTO temp VALUES (1)" + sqlSelect := "SELECT * FROM temp" + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(sqlInsert) + if err != nil { + t.Fatal(err) + } + err = tx.Commit() + if err != nil { + t.Fatal(err) + } + var i int + err = db.QueryRow(sqlSelect).Scan(&i) + if err != nil { + t.Fatal(err) + } + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } +} + +func TestErrorClass(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Query("SELECT int 'notint'") + if err == nil { + t.Fatal("expected error") + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *pq.Error, got %#+v", err) + } + if pge.Code.Class() != "22" { + t.Fatalf("expected class 28, got %v", pge.Code.Class()) + } + if pge.Code.Class().Name() != "data_exception" { + t.Fatalf("expected data_exception, got %v", pge.Code.Class().Name()) + } +} + +func TestParseOpts(t *testing.T) { + tests := []struct { + in string + expected values + valid bool + }{ + {"dbname=hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user=goodbye ", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname = hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user =goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user= goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"host=localhost password='correct horse battery staple'", values{"host": "localhost", "password": "correct horse battery staple"}, true}, + {"dbname=データベース password=パスワード", values{"dbname": "データベース", "password": "パスワード"}, true}, + {"dbname=hello user=''", values{"dbname": "hello", "user": ""}, true}, + {"user='' dbname=hello", values{"dbname": "hello", "user": ""}, true}, + // The last option value is an empty string if there's no non-whitespace after its = + {"dbname=hello user= ", values{"dbname": "hello", "user": ""}, true}, + + // The parser ignores spaces after = and interprets the next set of non-whitespace characters as the value. + {"user= password=foo", values{"user": "password=foo"}, true}, + + // Backslash escapes next char + {`user=a\ \'\\b`, values{"user": `a '\b`}, true}, + {`user='a \'b'`, values{"user": `a 'b`}, true}, + + // Incomplete escape + {`user=x\`, values{}, false}, + + // No '=' after the key + {"postgre://marko@internet", values{}, false}, + {"dbname user=goodbye", values{}, false}, + {"user=foo blah", values{}, false}, + {"user=foo blah ", values{}, false}, + + // Unterminated quoted value + {"dbname=hello user='unterminated", values{}, false}, + } + + for _, test := range tests { + o := make(values) + err := parseOpts(test.in, o) + + switch { + case err != nil && test.valid: + t.Errorf("%q got unexpected error: %s", test.in, err) + case err == nil && test.valid && !reflect.DeepEqual(test.expected, o): + t.Errorf("%q got: %#v want: %#v", test.in, o, test.expected) + case err == nil && !test.valid: + t.Errorf("%q expected an error", test.in) + } + } +} + +func TestRuntimeParameters(t *testing.T) { + tests := []struct { + conninfo string + param string + expected string + success bool + }{ + // invalid parameter + {"DOESNOTEXIST=foo", "", "", false}, + // we can only work with a specific value for these two + {"client_encoding=SQL_ASCII", "", "", false}, + {"datestyle='ISO, YDM'", "", "", false}, + // "options" should work exactly as it does in libpq + {"options='-c search_path=pqgotest'", "search_path", "pqgotest", true}, + // pq should override client_encoding in this case + {"options='-c client_encoding=SQL_ASCII'", "client_encoding", "UTF8", true}, + // allow client_encoding to be set explicitly + {"client_encoding=UTF8", "client_encoding", "UTF8", true}, + // test a runtime parameter not supported by libpq + {"work_mem='139kB'", "work_mem", "139kB", true}, + // test fallback_application_name + {"application_name=foo fallback_application_name=bar", "application_name", "foo", true}, + {"application_name='' fallback_application_name=bar", "application_name", "", true}, + {"fallback_application_name=bar", "application_name", "bar", true}, + } + + for _, test := range tests { + db, err := openTestConnConninfo(test.conninfo) + if err != nil { + t.Fatal(err) + } + + // application_name didn't exist before 9.0 + if test.param == "application_name" && getServerVersion(t, db) < 90000 { + db.Close() + continue + } + + tryGetParameterValue := func() (value string, success bool) { + defer db.Close() + row := db.QueryRow("SELECT current_setting($1)", test.param) + err = row.Scan(&value) + if err != nil { + return "", false + } + return value, true + } + + value, success := tryGetParameterValue() + if success != test.success && !test.success { + t.Fatalf("%v: unexpected error: %v", test.conninfo, err) + } + if success != test.success { + t.Fatalf("unexpected outcome %v (was expecting %v) for conninfo \"%s\"", + success, test.success, test.conninfo) + } + if value != test.expected { + t.Fatalf("bad value for %s: got %s, want %s with conninfo \"%s\"", + test.param, value, test.expected, test.conninfo) + } + } +} + +func TestIsUTF8(t *testing.T) { + var cases = []struct { + name string + want bool + }{ + {"unicode", true}, + {"utf-8", true}, + {"utf_8", true}, + {"UTF-8", true}, + {"UTF8", true}, + {"utf8", true}, + {"u n ic_ode", true}, + {"ut_f%8", true}, + {"ubf8", false}, + {"punycode", false}, + } + + for _, test := range cases { + if g := isUTF8(test.name); g != test.want { + t.Errorf("isUTF8(%q) = %v want %v", test.name, g, test.want) + } + } +} + +func TestQuoteIdentifier(t *testing.T) { + var cases = []struct { + input string + want string + }{ + {`foo`, `"foo"`}, + {`foo bar baz`, `"foo bar baz"`}, + {`foo"bar`, `"foo""bar"`}, + {"foo\x00bar", `"foo"`}, + {"\x00foo", `""`}, + } + + for _, test := range cases { + got := QuoteIdentifier(test.input) + if got != test.want { + t.Errorf("QuoteIdentifier(%q) = %v want %v", test.input, got, test.want) + } + } +} + +func TestRowsResultTag(t *testing.T) { + type ResultTag interface { + Result() driver.Result + Tag() string + } + + tests := []struct { + query string + tag string + ra int64 + }{ + { + query: "CREATE TEMP TABLE temp (a int)", + tag: "CREATE TABLE", + }, + { + query: "INSERT INTO temp VALUES (1), (2)", + tag: "INSERT", + ra: 2, + }, + { + query: "SELECT 1", + }, + // A SELECT anywhere should take precedent. + { + query: "SELECT 1; INSERT INTO temp VALUES (1), (2)", + }, + { + query: "INSERT INTO temp VALUES (1), (2); SELECT 1", + }, + // Multiple statements that don't return rows should return the last tag. + { + query: "CREATE TEMP TABLE t (a int); DROP TABLE t", + tag: "DROP TABLE", + }, + // Ensure a rows-returning query in any position among various tags-returing + // statements will prefer the rows. + { + query: "SELECT 1; CREATE TEMP TABLE t (a int); DROP TABLE t", + }, + { + query: "CREATE TEMP TABLE t (a int); SELECT 1; DROP TABLE t", + }, + { + query: "CREATE TEMP TABLE t (a int); DROP TABLE t; SELECT 1", + }, + // Verify that an no-results query doesn't set the tag. + { + query: "CREATE TEMP TABLE t (a int); SELECT 1 WHERE FALSE; DROP TABLE t;", + }, + } + + // If this is the only test run, this will correct the connection string. + openTestConn(t).Close() + + conn, err := Open("") + if err != nil { + t.Fatal(err) + } + defer conn.Close() + q := conn.(driver.QueryerContext) + + for _, test := range tests { + if rows, err := q.QueryContext(context.Background(), test.query, nil); err != nil { + t.Fatalf("%s: %s", test.query, err) + } else { + r := rows.(ResultTag) + if tag := r.Tag(); tag != test.tag { + t.Fatalf("%s: unexpected tag %q", test.query, tag) + } + res := r.Result() + if ra, _ := res.RowsAffected(); ra != test.ra { + t.Fatalf("%s: unexpected rows affected: %d", test.query, ra) + } + rows.Close() + } + } +} + +// TestQuickClose tests that closing a query early allows a subsequent query to work. +func TestQuickClose(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + rows, err := tx.Query("SELECT 1; SELECT 2;") + if err != nil { + t.Fatal(err) + } + if err := rows.Close(); err != nil { + t.Fatal(err) + } + + var id int + if err := tx.QueryRow("SELECT 3").Scan(&id); err != nil { + t.Fatal(err) + } + if id != 3 { + t.Fatalf("unexpected %d", id) + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 0000000..345c239 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,282 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + sync.Mutex // guards err + err error +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + ci.setBad() + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.setBad() + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + case 'N': + // NoticeResponse + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.setBad() + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) setBad() { + ci.Lock() + ci.cn.bad = true + ci.Unlock() +} + +func (ci *copyin) isBad() bool { + ci.Lock() + b := ci.cn.bad + ci.Unlock() + return b +} + +func (ci *copyin) isErrorSet() bool { + ci.Lock() + isSet := (ci.err != nil) + ci.Unlock() + return isSet +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.Lock() + if ci.err == nil { + ci.err = err + } + ci.Unlock() +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if ci.isBad() { + return nil, driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if ci.isErrorSet() { + return nil, ci.err + } + + if len(v) == 0 { + return nil, ci.Close() + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if ci.isBad() { + return driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + ci.cn.inCopy = false + + if ci.isErrorSet() { + err = ci.err + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/copy_test.go b/vendor/github.com/lib/pq/copy_test.go new file mode 100644 index 0000000..a888a89 --- /dev/null +++ b/vendor/github.com/lib/pq/copy_test.go @@ -0,0 +1,468 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "net" + "strings" + "testing" +) + +func TestCopyInStmt(t *testing.T) { + stmt := CopyIn("table name") + if stmt != `COPY "table name" () FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyIn("table name", "column 1", "column 2") + if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyIn(`table " name """`, `co"lumn""`) + if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` { + t.Fatal(stmt) + } +} + +func TestCopyInSchemaStmt(t *testing.T) { + stmt := CopyInSchema("schema name", "table name") + if stmt != `COPY "schema name"."table name" () FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyInSchema("schema name", "table name", "column 1", "column 2") + if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`) + if stmt != `COPY "schema "" name """"""".`+ + `"table "" name """"""" ("co""lumn""""") FROM STDIN` { + t.Fatal(stmt) + } +} + +func TestCopyInMultipleValues(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + t.Fatal(err) + } + + longString := strings.Repeat("#", 500) + + for i := 0; i < 500; i++ { + _, err = stmt.Exec(int64(i), longString) + if err != nil { + t.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + t.Fatal(err) + } + + if num != 500 { + t.Fatalf("expected 500 items, not %d", num) + } +} + +func TestCopyInRaiseStmtTrigger(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + var exists int + err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists) + if err == sql.ErrNoRows { + t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger") + } else if err != nil { + t.Fatal(err) + } + } + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec(` + CREATE OR REPLACE FUNCTION pg_temp.temptest() + RETURNS trigger AS + $BODY$ begin + raise notice 'Hello world'; + return new; + end $BODY$ + LANGUAGE plpgsql`) + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec(` + CREATE TRIGGER temptest_trigger + BEFORE INSERT + ON temp + FOR EACH ROW + EXECUTE PROCEDURE pg_temp.temptest()`) + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + t.Fatal(err) + } + + longString := strings.Repeat("#", 500) + + _, err = stmt.Exec(int64(1), longString) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + t.Fatal(err) + } + + if num != 1 { + t.Fatalf("expected 1 items, not %d", num) + } +} + +func TestCopyInTypes(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing")) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + var text string + var blob []byte + var nothing sql.NullString + + err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, ¬hing) + if err != nil { + t.Fatal(err) + } + + if num != 1234567890 { + t.Fatal("unexpected result", num) + } + if text != "Héllö\n ☃!\r\t\\" { + t.Fatal("unexpected result", text) + } + if !bytes.Equal(blob, []byte{0, 255, 9, 10, 13}) { + t.Fatal("unexpected result", blob) + } + if nothing.Valid { + t.Fatal("unexpected result", nothing.String) + } +} + +func TestCopyInWrongType(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "num")) + if err != nil { + t.Fatal(err) + } + defer stmt.Close() + + _, err = stmt.Exec("Héllö\n ☃!\r\t\\") + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err == nil { + t.Fatal("expected error") + } + if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" { + t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge) + } +} + +func TestCopyOutsideOfTxnError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Prepare(CopyIn("temp", "num")) + if err == nil { + t.Fatal("COPY outside of transaction did not return an error") + } + if err != errCopyNotSupportedOutsideTxn { + t.Fatalf("expected %s, got %s", err, err.Error()) + } +} + +func TestCopyInBinaryError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + _, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary") + if err != errBinaryCopyNotSupported { + t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + _, err = txn.Prepare("COPY temp (num) TO STDOUT") + if err != errCopyToNotSupported { + t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +func TestCopySyntaxError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Prepare("COPY ") + if err == nil { + t.Fatal("expected error") + } + if pge := err.(*Error); pge.Code.Name() != "syntax_error" { + t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +// Tests for connection errors in copyin.resploop() +func TestCopyRespLoopConnectionError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + var pid int + err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid) + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a")) + if err != nil { + t.Fatal(err) + } + defer stmt.Close() + + _, err = db.Exec("SELECT pg_terminate_backend($1)", pid) + if err != nil { + t.Fatal(err) + } + + if getServerVersion(t, db) < 90500 { + // We have to try and send something over, since postgres before + // version 9.5 won't process SIGTERMs while it's waiting for + // CopyData/CopyEnd messages; see tcop/postgres.c. + _, err = stmt.Exec(1) + if err != nil { + t.Fatal(err) + } + } + _, err = stmt.Exec() + if err == nil { + t.Fatalf("expected error") + } + switch pge := err.(type) { + case *Error: + if pge.Code.Name() != "admin_shutdown" { + t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name()) + } + case *net.OpError: + // ignore + default: + if err == driver.ErrBadConn { + // likely an EPIPE + } else { + t.Fatalf("unexpected error, got %+#v", err) + } + } + + _ = stmt.Close() +} + +func BenchmarkCopyIn(b *testing.B) { + db := openTestConn(b) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + b.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + b.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + _, err = stmt.Exec(int64(i), "hello world!") + if err != nil { + b.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + b.Fatal(err) + } + + err = stmt.Close() + if err != nil { + b.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + b.Fatal(err) + } + + if num != b.N { + b.Fatalf("expected %d items, not %d", b.N, num) + } +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 0000000..a1b0297 --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,245 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid'" + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + + +Queries + + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + +Errors + + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +http://godoc.org/github.com/lib/pq/example/listen. + +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 0000000..3b0d365 --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,603 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + switch f { + case formatBinary: + return binaryDecode(parameterStatus, s, typ) + case formatText: + return textDecode(parameterStatus, s, typ) + default: + panic("not reached") + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b + + default: + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) + case oid.T_bytea: + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + bits := 64 + if typ == oid.T_float4 { + bits = 32 + } + f, err := strconv.ParseFloat(string(s), bits) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + return t +} + +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) + if err != nil { + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := p.mustAtoi(str, 0, monSep) + daySep := monSep + 3 + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) + timeSep := daySep + 3 + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + + var hour, minute, second int + if len(str) > minLen { + p.expect(str, ' ', timeSep) + minSep := timeSep + 3 + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) + secSep := minSep + 3 + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) + secEnd := secSep + 3 + second = p.mustAtoi(str, secSep+1, secEnd) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx] == '.' { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + switch c := str[tzStart]; c { + case '-': + tzSign = -1 + case '+': + tzSign = +1 + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) + remainderIdx += 3 + var tzMin, tzSec int + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + + if isBC { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t, p.err +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) []byte { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) + + _, offset := t.Zone() + offset = offset % 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte, err error) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + return nil, err + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + return nil, fmt.Errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + if err != nil { + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result, nil +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/encode_test.go b/vendor/github.com/lib/pq/encode_test.go new file mode 100644 index 0000000..d58798a --- /dev/null +++ b/vendor/github.com/lib/pq/encode_test.go @@ -0,0 +1,766 @@ +package pq + +import ( + "bytes" + "database/sql" + "fmt" + "regexp" + "testing" + "time" + + "github.com/lib/pq/oid" +) + +func TestScanTimestamp(t *testing.T) { + var nt NullTime + tn := time.Now() + nt.Scan(tn) + if !nt.Valid { + t.Errorf("Expected Valid=false") + } + if nt.Time != tn { + t.Errorf("Time value mismatch") + } +} + +func TestScanNilTimestamp(t *testing.T) { + var nt NullTime + nt.Scan(nil) + if nt.Valid { + t.Errorf("Expected Valid=false") + } +} + +var timeTests = []struct { + str string + timeval time.Time +}{ + {"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"0001-12-31 BC", time.Date(0, time.December, 31, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03 BC", time.Date(-2000, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, + time.FixedZone("", -7*60*60))}, + {"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -7*60*60))}, + {"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -(7*60*60+42*60)))}, + {"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -(7*60*60+30*60+9)))}, + {"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", 7*60*60))}, + {"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, + {"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, + time.FixedZone("", -7*60*60))}, + {"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, + {"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, + {"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, + {"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, +} + +// Test that parsing the string results in the expected value. +func TestParseTs(t *testing.T) { + for i, tt := range timeTests { + val, err := ParseTimestamp(nil, tt.str) + if err != nil { + t.Errorf("%d: got error: %v", i, err) + } else if val.String() != tt.timeval.String() { + t.Errorf("%d: expected to parse %q into %q; got %q", + i, tt.str, tt.timeval, val) + } + } +} + +var timeErrorTests = []string{ + "BC", + " BC", + "2001", + "2001-2-03", + "2001-02-3", + "2001-02-03 ", + "2001-02-03 B", + "2001-02-03 04", + "2001-02-03 04:", + "2001-02-03 04:05", + "2001-02-03 04:05 B", + "2001-02-03 04:05 BC", + "2001-02-03 04:05:", + "2001-02-03 04:05:6", + "2001-02-03 04:05:06 B", + "2001-02-03 04:05:06BC", + "2001-02-03 04:05:06.123 B", +} + +// Test that parsing the string results in an error. +func TestParseTsErrors(t *testing.T) { + for i, tt := range timeErrorTests { + _, err := ParseTimestamp(nil, tt) + if err == nil { + t.Errorf("%d: expected an error from parsing: %v", i, tt) + } + } +} + +// Now test that sending the value into the database and parsing it back +// returns the same time.Time value. +func TestEncodeAndParseTs(t *testing.T) { + db, err := openTestConnConninfo("timezone='Etc/UTC'") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + for i, tt := range timeTests { + var dbstr string + err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr) + if err != nil { + t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err) + continue + } + + val, err := ParseTimestamp(nil, dbstr) + if err != nil { + t.Errorf("%d: could not parse value %q: %s", i, dbstr, err) + continue + } + val = val.In(tt.timeval.Location()) + if val.String() != tt.timeval.String() { + t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val) + } + } +} + +var formatTimeTests = []struct { + time time.Time + expected string +}{ + {time.Time{}, "0001-01-01 00:00:00Z"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03 04:05:06.123456789Z"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03 04:05:06.123456789+02:00"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03 04:05:06.123456789-06:00"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03 04:05:06-07:30:09"}, + + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z"}, + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00"}, + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00"}, + + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z BC"}, + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00 BC"}, + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00 BC"}, + + {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09"}, + {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09 BC"}, +} + +func TestFormatTs(t *testing.T) { + for i, tt := range formatTimeTests { + val := string(formatTs(tt.time)) + if val != tt.expected { + t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected) + } + } +} + +func TestFormatTsBackend(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var str string + err := db.QueryRow("SELECT '2001-02-03T04:05:06.007-08:09:10'::time::text").Scan(&str) + if err == nil { + t.Fatalf("PostgreSQL is accepting an ISO timestamp input for time") + } + + for i, tt := range formatTimeTests { + for _, typ := range []string{"date", "time", "timetz", "timestamp", "timestamptz"} { + err = db.QueryRow("SELECT $1::"+typ+"::text", tt.time).Scan(&str) + if err != nil { + t.Errorf("%d: incorrect time format for %v on the backend: %v", i, typ, err) + } + } + } +} + +func TestTimestampWithTimeZone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + + // try several different locations, all included in Go's zoneinfo.zip + for _, locName := range []string{ + "UTC", + "America/Chicago", + "America/New_York", + "Australia/Darwin", + "Australia/Perth", + } { + loc, err := time.LoadLocation(locName) + if err != nil { + t.Logf("Could not load time zone %s - skipping", locName) + continue + } + + // Postgres timestamps have a resolution of 1 microsecond, so don't + // use the full range of the Nanosecond argument + refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc) + + for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} { + // Switch Postgres's timezone to test different output timestamp formats + _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone)) + if err != nil { + t.Fatal(err) + } + + var gotTime time.Time + row := tx.QueryRow("select $1::timestamp with time zone", refTime) + err = row.Scan(&gotTime) + if err != nil { + t.Fatal(err) + } + + if !refTime.Equal(gotTime) { + t.Errorf("timestamps not equal: %s != %s", refTime, gotTime) + } + + // check that the time zone is set correctly based on TimeZone + pgLoc, err := time.LoadLocation(pgTimeZone) + if err != nil { + t.Logf("Could not load time zone %s - skipping", pgLoc) + continue + } + translated := refTime.In(pgLoc) + if translated.String() != gotTime.String() { + t.Errorf("timestamps not equal: %s != %s", translated, gotTime) + } + } + } +} + +func TestTimestampWithOutTimezone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + test := func(ts, pgts string) { + r, err := db.Query("SELECT $1::timestamp", pgts) + if err != nil { + t.Fatalf("Could not run query: %v", err) + } + + if !r.Next() { + t.Fatal("Expected at least one row") + } + + var result time.Time + err = r.Scan(&result) + if err != nil { + t.Fatalf("Did not expect error scanning row: %v", err) + } + + expected, err := time.Parse(time.RFC3339, ts) + if err != nil { + t.Fatalf("Could not parse test time literal: %v", err) + } + + if !result.Equal(expected) { + t.Fatalf("Expected time to match %v: got mismatch %v", + expected, result) + } + + if r.Next() { + t.Fatal("Expected only one row") + } + } + + test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00") + + // Test higher precision time + test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033") +} + +func TestInfinityTimestamp(t *testing.T) { + db := openTestConn(t) + defer db.Close() + var err error + var resultT time.Time + + expectedErrorStrRegexp := regexp.MustCompile( + `^sql: Scan error on column index 0(, name "timestamp(tz)?"|): unsupported`) + + type testCases []struct { + Query string + Param string + ExpectedErrorStrRegexp *regexp.Regexp + ExpectedVal interface{} + } + tc := testCases{ + {"SELECT $1::timestamp", "-infinity", expectedErrorStrRegexp, "-infinity"}, + {"SELECT $1::timestamptz", "-infinity", expectedErrorStrRegexp, "-infinity"}, + {"SELECT $1::timestamp", "infinity", expectedErrorStrRegexp, "infinity"}, + {"SELECT $1::timestamptz", "infinity", expectedErrorStrRegexp, "infinity"}, + } + // try to assert []byte to time.Time + for _, q := range tc { + err = db.QueryRow(q.Query, q.Param).Scan(&resultT) + if !q.ExpectedErrorStrRegexp.MatchString(err.Error()) { + t.Errorf("Scanning -/+infinity, expected error to match regexp %q, got %q", + q.ExpectedErrorStrRegexp, err) + } + } + // yield []byte + for _, q := range tc { + var resultI interface{} + err = db.QueryRow(q.Query, q.Param).Scan(&resultI) + if err != nil { + t.Errorf("Scanning -/+infinity, expected no error, got %q", err) + } + result, ok := resultI.([]byte) + if !ok { + t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI) + } + if string(result) != q.ExpectedVal { + t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result) + } + } + + y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC) + y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC) + EnableInfinityTs(y1500, y2500) + + err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning infinity, expected no error, got %q", err) + } + if !resultT.Equal(y2500) { + t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT) + } + + err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning infinity, expected no error, got %q", err) + } + if !resultT.Equal(y2500) { + t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String()) + } + + err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning -infinity, expected no error, got %q", err) + } + if !resultT.Equal(y1500) { + t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) + } + + err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning -infinity, expected no error, got %q", err) + } + if !resultT.Equal(y1500) { + t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) + } + + ym1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC) + y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC) + var s string + err = db.QueryRow("SELECT $1::timestamp::text", ym1500).Scan(&s) + if err != nil { + t.Errorf("Encoding -infinity, expected no error, got %q", err) + } + if s != "-infinity" { + t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) + } + err = db.QueryRow("SELECT $1::timestamptz::text", ym1500).Scan(&s) + if err != nil { + t.Errorf("Encoding -infinity, expected no error, got %q", err) + } + if s != "-infinity" { + t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) + } + + err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s) + if err != nil { + t.Errorf("Encoding infinity, expected no error, got %q", err) + } + if s != "infinity" { + t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) + } + err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s) + if err != nil { + t.Errorf("Encoding infinity, expected no error, got %q", err) + } + if s != "infinity" { + t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) + } + + disableInfinityTs() + + var panicErrorString string + func() { + defer func() { + panicErrorString, _ = recover().(string) + }() + EnableInfinityTs(y2500, y1500) + }() + if panicErrorString != infinityTsNegativeMustBeSmaller { + t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString) + } +} + +func TestStringWithNul(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + hello0world := string("hello\x00world") + _, err := db.Query("SELECT $1::text", &hello0world) + if err == nil { + t.Fatal("Postgres accepts a string with nul in it; " + + "injection attacks may be plausible") + } +} + +func TestByteSliceToText(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("hello world") + row := db.QueryRow("SELECT $1::text", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if string(result) != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } +} + +func TestStringToBytea(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := "hello world" + row := db.QueryRow("SELECT $1::bytea", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(result, []byte(b)) { + t.Fatalf("expected %v but got %v", b, result) + } +} + +func TestTextByteSliceToUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11") + row := db.QueryRow("SELECT $1::uuid", b) + + var result string + err := row.Scan(&result) + if forceBinaryParameters() { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22P03" { + t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) + } + } else { + if err != nil { + t.Fatal(err) + } + + if result != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } + } +} + +func TestBinaryByteSlicetoUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte{'\xa0', '\xee', '\xbc', '\x99', + '\x9c', '\x0b', + '\x4e', '\xf8', + '\xbb', '\x00', '\x6b', + '\xb9', '\xbd', '\x38', '\x0a', '\x11'} + row := db.QueryRow("SELECT $1::uuid", b) + + var result string + err := row.Scan(&result) + if forceBinaryParameters() { + if err != nil { + t.Fatal(err) + } + + if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") { + t.Fatalf("expected %v but got %v", b, result) + } + } else { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22021" { + t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) + } + } +} + +func TestStringToUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11" + row := db.QueryRow("SELECT $1::uuid", s) + + var result string + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if result != s { + t.Fatalf("expected %v but got %v", s, result) + } +} + +func TestTextByteSliceToInt(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + expected := 12345678 + b := []byte(fmt.Sprintf("%d", expected)) + row := db.QueryRow("SELECT $1::int", b) + + var result int + err := row.Scan(&result) + if forceBinaryParameters() { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22P03" { + t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) + } + } else { + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Fatalf("expected %v but got %v", expected, result) + } + } +} + +func TestBinaryByteSliceToInt(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + expected := 12345678 + b := []byte{'\x00', '\xbc', '\x61', '\x4e'} + row := db.QueryRow("SELECT $1::int", b) + + var result int + err := row.Scan(&result) + if forceBinaryParameters() { + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Fatalf("expected %v but got %v", expected, result) + } + } else { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22021" { + t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) + } + } +} + +func TestTextDecodeIntoString(t *testing.T) { + input := []byte("hello world") + want := string(input) + for _, typ := range []oid.Oid{oid.T_char, oid.T_varchar, oid.T_text} { + got := decode(¶meterStatus{}, input, typ, formatText) + if got != want { + t.Errorf("invalid string decoding output for %T(%+v), got %v but expected %v", typ, typ, got, want) + } + } +} + +func TestByteaOutputFormatEncoding(t *testing.T) { + input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123") + want := []byte("\\x5c78000102fffe6162636465666730313233") + got := encode(¶meterStatus{serverVersion: 90000}, input, oid.T_bytea) + if !bytes.Equal(want, got) { + t.Errorf("invalid hex bytea output, got %v but expected %v", got, want) + } + + want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123") + got = encode(¶meterStatus{serverVersion: 84000}, input, oid.T_bytea) + if !bytes.Equal(want, got) { + t.Errorf("invalid escape bytea output, got %v but expected %v", got, want) + } +} + +func TestByteaOutputFormats(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + // skip + return + } + + testByteaOutputFormat := func(f string, usePrepared bool) { + expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08") + sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')" + + var data []byte + + // use a txn to avoid relying on getting the same connection + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("SET LOCAL bytea_output TO " + f) + if err != nil { + t.Fatal(err) + } + var rows *sql.Rows + var stmt *sql.Stmt + if usePrepared { + stmt, err = txn.Prepare(sqlQuery) + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query() + } else { + // use Query; QueryRow would hide the actual error + rows, err = txn.Query(sqlQuery) + } + if err != nil { + t.Fatal(err) + } + if !rows.Next() { + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + t.Fatal("shouldn't happen") + } + err = rows.Scan(&data) + if err != nil { + t.Fatal(err) + } + err = rows.Close() + if err != nil { + t.Fatal(err) + } + if stmt != nil { + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + } + if !bytes.Equal(data, expectedData) { + t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData) + } + } + + testByteaOutputFormat("hex", false) + testByteaOutputFormat("escape", false) + testByteaOutputFormat("hex", true) + testByteaOutputFormat("escape", true) +} + +func TestAppendEncodedText(t *testing.T) { + var buf []byte + + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, int64(10)) + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, 42.0000000001) + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, "hello\tworld") + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255}) + + if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" { + t.Fatal(string(buf)) + } +} + +func TestAppendEscapedText(t *testing.T) { + if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" { + t.Fatal(string(esc)) + } + if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" { + t.Fatal(string(esc)) + } + if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" { + t.Fatal(string(esc)) + } +} + +func TestAppendEscapedTextExistingBuffer(t *testing.T) { + buf := []byte("123\t") + if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" { + t.Fatal(string(esc)) + } + buf = []byte("123\t") + if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" { + t.Fatal(string(esc)) + } + buf = []byte("123\t") + if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" { + t.Fatal(string(esc)) + } +} + +func BenchmarkAppendEscapedText(b *testing.B) { + longString := "" + for i := 0; i < 100; i++ { + longString += "123456789\n" + } + for i := 0; i < b.N; i++ { + appendEscapedText(nil, longString) + } +} + +func BenchmarkAppendEscapedTextNoEscape(b *testing.B) { + longString := "" + for i := 0; i < 100; i++ { + longString += "1234567890" + } + for i := 0; i < b.N; i++ { + appendEscapedText(nil, longString) + } +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 0000000..96aae29 --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,515 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (c *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + c.bad = true + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + c.bad = true + *err = v + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + c.bad = true + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + c.bad = true + } +} diff --git a/vendor/github.com/lib/pq/example/listen/doc.go b/vendor/github.com/lib/pq/example/listen/doc.go new file mode 100644 index 0000000..91e2ddb --- /dev/null +++ b/vendor/github.com/lib/pq/example/listen/doc.go @@ -0,0 +1,98 @@ +/* + +Package listen is a self-contained Go program which uses the LISTEN / NOTIFY +mechanism to avoid polling the database while waiting for more work to arrive. + + // + // You can see the program in action by defining a function similar to + // the following: + // + // CREATE OR REPLACE FUNCTION public.get_work() + // RETURNS bigint + // LANGUAGE sql + // AS $$ + // SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END + // $$ + // ; + + package main + + import ( + "database/sql" + "fmt" + "time" + + "github.com/lib/pq" + ) + + func doWork(db *sql.DB, work int64) { + // work here + } + + func getWork(db *sql.DB) { + for { + // get work from the database here + var work sql.NullInt64 + err := db.QueryRow("SELECT get_work()").Scan(&work) + if err != nil { + fmt.Println("call to get_work() failed: ", err) + time.Sleep(10 * time.Second) + continue + } + if !work.Valid { + // no more work to do + fmt.Println("ran out of work") + return + } + + fmt.Println("starting work on ", work.Int64) + go doWork(db, work.Int64) + } + } + + func waitForNotification(l *pq.Listener) { + select { + case <-l.Notify: + fmt.Println("received notification, new work available") + case <-time.After(90 * time.Second): + go l.Ping() + // Check if there's more work available, just in case it takes + // a while for the Listener to notice connection loss and + // reconnect. + fmt.Println("received no work for 90 seconds, checking for new work") + } + } + + func main() { + var conninfo string = "" + + db, err := sql.Open("postgres", conninfo) + if err != nil { + panic(err) + } + + reportProblem := func(ev pq.ListenerEventType, err error) { + if err != nil { + fmt.Println(err.Error()) + } + } + + minReconn := 10 * time.Second + maxReconn := time.Minute + listener := pq.NewListener(conninfo, minReconn, maxReconn, reportProblem) + err = listener.Listen("getwork") + if err != nil { + panic(err) + } + + fmt.Println("entering main loop") + for { + // process all available work before waiting for notifications + getWork(db) + waitForNotification(listener) + } + } + + +*/ +package listen diff --git a/vendor/github.com/lib/pq/go18_test.go b/vendor/github.com/lib/pq/go18_test.go new file mode 100644 index 0000000..1a88a5b --- /dev/null +++ b/vendor/github.com/lib/pq/go18_test.go @@ -0,0 +1,321 @@ +// +build go1.8 + +package pq + +import ( + "context" + "database/sql" + "runtime" + "strings" + "testing" + "time" +) + +func TestMultipleSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + rows, err := db.Query("select 1; set time zone default; select 2; select 3") + if err != nil { + t.Fatal(err) + } + defer rows.Close() + + var i int + for rows.Next() { + if err := rows.Scan(&i); err != nil { + t.Fatal(err) + } + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } + } + if !rows.NextResultSet() { + t.Fatal("expected more result sets", rows.Err()) + } + for rows.Next() { + if err := rows.Scan(&i); err != nil { + t.Fatal(err) + } + if i != 2 { + t.Fatalf("expected 2, got %d", i) + } + } + + // Make sure that if we ignore a result we can still query. + + rows, err = db.Query("select 4; select 5") + if err != nil { + t.Fatal(err) + } + defer rows.Close() + + for rows.Next() { + if err := rows.Scan(&i); err != nil { + t.Fatal(err) + } + if i != 4 { + t.Fatalf("expected 4, got %d", i) + } + } + if !rows.NextResultSet() { + t.Fatal("expected more result sets", rows.Err()) + } + for rows.Next() { + if err := rows.Scan(&i); err != nil { + t.Fatal(err) + } + if i != 5 { + t.Fatalf("expected 5, got %d", i) + } + } + if rows.NextResultSet() { + t.Fatal("unexpected result set") + } +} + +const contextRaceIterations = 100 + +func TestContextCancelExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // Delay execution for just a bit until db.ExecContext has begun. + defer time.AfterFunc(time.Millisecond*10, cancel).Stop() + + // Not canceled until after the exec has started. + if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil { + t.Fatal("expected error") + } else if err.Error() != "pq: canceling statement due to user request" { + t.Fatalf("unexpected error: %s", err) + } + + // Context is already canceled, so error should come before execution. + if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil { + t.Fatal("expected error") + } else if err.Error() != "context canceled" { + t.Fatalf("unexpected error: %s", err) + } + + for i := 0; i < contextRaceIterations; i++ { + func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if _, err := db.ExecContext(ctx, "select 1"); err != nil { + t.Fatal(err) + } + }() + + if _, err := db.Exec("select 1"); err != nil { + t.Fatal(err) + } + } +} + +func TestContextCancelQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // Delay execution for just a bit until db.QueryContext has begun. + defer time.AfterFunc(time.Millisecond*10, cancel).Stop() + + // Not canceled until after the exec has started. + if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil { + t.Fatal("expected error") + } else if err.Error() != "pq: canceling statement due to user request" { + t.Fatalf("unexpected error: %s", err) + } + + // Context is already canceled, so error should come before execution. + if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil { + t.Fatal("expected error") + } else if err.Error() != "context canceled" { + t.Fatalf("unexpected error: %s", err) + } + + for i := 0; i < contextRaceIterations; i++ { + func() { + ctx, cancel := context.WithCancel(context.Background()) + rows, err := db.QueryContext(ctx, "select 1") + cancel() + if err != nil { + t.Fatal(err) + } else if err := rows.Close(); err != nil { + t.Fatal(err) + } + }() + + if rows, err := db.Query("select 1"); err != nil { + t.Fatal(err) + } else if err := rows.Close(); err != nil { + t.Fatal(err) + } + } +} + +// TestIssue617 tests that a failed query in QueryContext doesn't lead to a +// goroutine leak. +func TestIssue617(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + const N = 10 + + numGoroutineStart := runtime.NumGoroutine() + for i := 0; i < N; i++ { + func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, err := db.QueryContext(ctx, `SELECT * FROM DOESNOTEXIST`) + pqErr, _ := err.(*Error) + // Expecting "pq: relation \"doesnotexist\" does not exist" error. + if err == nil || pqErr == nil || pqErr.Code != "42P01" { + t.Fatalf("expected undefined table error, got %v", err) + } + }() + } + numGoroutineFinish := runtime.NumGoroutine() + + // We use N/2 and not N because the GC and other actors may increase or + // decrease the number of goroutines. + if numGoroutineFinish-numGoroutineStart >= N/2 { + t.Errorf("goroutine leak detected, was %d, now %d", numGoroutineStart, numGoroutineFinish) + } +} + +func TestContextCancelBegin(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + ctx, cancel := context.WithCancel(context.Background()) + tx, err := db.BeginTx(ctx, nil) + if err != nil { + t.Fatal(err) + } + + // Delay execution for just a bit until tx.Exec has begun. + defer time.AfterFunc(time.Millisecond*10, cancel).Stop() + + // Not canceled until after the exec has started. + if _, err := tx.Exec("select pg_sleep(1)"); err == nil { + t.Fatal("expected error") + } else if err.Error() != "pq: canceling statement due to user request" { + t.Fatalf("unexpected error: %s", err) + } + + // Transaction is canceled, so expect an error. + if _, err := tx.Query("select pg_sleep(1)"); err == nil { + t.Fatal("expected error") + } else if err != sql.ErrTxDone { + t.Fatalf("unexpected error: %s", err) + } + + // Context is canceled, so cannot begin a transaction. + if _, err := db.BeginTx(ctx, nil); err == nil { + t.Fatal("expected error") + } else if err.Error() != "context canceled" { + t.Fatalf("unexpected error: %s", err) + } + + for i := 0; i < contextRaceIterations; i++ { + func() { + ctx, cancel := context.WithCancel(context.Background()) + tx, err := db.BeginTx(ctx, nil) + cancel() + if err != nil { + t.Fatal(err) + } else if err := tx.Rollback(); err != nil && + err.Error() != "pq: canceling statement due to user request" && + err != sql.ErrTxDone { + t.Fatal(err) + } + }() + + if tx, err := db.Begin(); err != nil { + t.Fatal(err) + } else if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + } +} + +func TestTxOptions(t *testing.T) { + db := openTestConn(t) + defer db.Close() + ctx := context.Background() + + tests := []struct { + level sql.IsolationLevel + isolation string + }{ + { + level: sql.LevelDefault, + isolation: "", + }, + { + level: sql.LevelReadUncommitted, + isolation: "read uncommitted", + }, + { + level: sql.LevelReadCommitted, + isolation: "read committed", + }, + { + level: sql.LevelRepeatableRead, + isolation: "repeatable read", + }, + { + level: sql.LevelSerializable, + isolation: "serializable", + }, + } + + for _, test := range tests { + for _, ro := range []bool{true, false} { + tx, err := db.BeginTx(ctx, &sql.TxOptions{ + Isolation: test.level, + ReadOnly: ro, + }) + if err != nil { + t.Fatal(err) + } + + var isolation string + err = tx.QueryRow("select current_setting('transaction_isolation')").Scan(&isolation) + if err != nil { + t.Fatal(err) + } + + if test.isolation != "" && isolation != test.isolation { + t.Errorf("wrong isolation level: %s != %s", isolation, test.isolation) + } + + var isRO string + err = tx.QueryRow("select current_setting('transaction_read_only')").Scan(&isRO) + if err != nil { + t.Fatal(err) + } + + if ro != (isRO == "on") { + t.Errorf("read/[write,only] not set: %t != %s for level %s", + ro, isRO, test.isolation) + } + + tx.Rollback() + } + } + + _, err := db.BeginTx(ctx, &sql.TxOptions{ + Isolation: sql.LevelLinearizable, + }) + if err == nil { + t.Fatal("expected LevelLinearizable to fail") + } + if !strings.Contains(err.Error(), "isolation level not supported") { + t.Errorf("Expected error to mention isolation level, got %q", err) + } +} diff --git a/vendor/github.com/lib/pq/hstore/hstore.go b/vendor/github.com/lib/pq/hstore/hstore.go new file mode 100644 index 0000000..f1470db --- /dev/null +++ b/vendor/github.com/lib/pq/hstore/hstore.go @@ -0,0 +1,118 @@ +package hstore + +import ( + "database/sql" + "database/sql/driver" + "strings" +) + +// Hstore is a wrapper for transferring Hstore values back and forth easily. +type Hstore struct { + Map map[string]sql.NullString +} + +// escapes and quotes hstore keys/values +// s should be a sql.NullString or string +func hQuote(s interface{}) string { + var str string + switch v := s.(type) { + case sql.NullString: + if !v.Valid { + return "NULL" + } + str = v.String + case string: + str = v + default: + panic("not a string or sql.NullString") + } + + str = strings.Replace(str, "\\", "\\\\", -1) + return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"` +} + +// Scan implements the Scanner interface. +// +// Note h.Map is reallocated before the scan to clear existing values. If the +// hstore column's database value is NULL, then h.Map is set to nil instead. +func (h *Hstore) Scan(value interface{}) error { + if value == nil { + h.Map = nil + return nil + } + h.Map = make(map[string]sql.NullString) + var b byte + pair := [][]byte{{}, {}} + pi := 0 + inQuote := false + didQuote := false + sawSlash := false + bindex := 0 + for bindex, b = range value.([]byte) { + if sawSlash { + pair[pi] = append(pair[pi], b) + sawSlash = false + continue + } + + switch b { + case '\\': + sawSlash = true + continue + case '"': + inQuote = !inQuote + if !didQuote { + didQuote = true + } + continue + default: + if !inQuote { + switch b { + case ' ', '\t', '\n', '\r': + continue + case '=': + continue + case '>': + pi = 1 + didQuote = false + continue + case ',': + s := string(pair[1]) + if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { + h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} + } else { + h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} + } + pair[0] = []byte{} + pair[1] = []byte{} + pi = 0 + continue + } + } + } + pair[pi] = append(pair[pi], b) + } + if bindex > 0 { + s := string(pair[1]) + if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { + h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} + } else { + h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} + } + } + return nil +} + +// Value implements the driver Valuer interface. Note if h.Map is nil, the +// database column value will be set to NULL. +func (h Hstore) Value() (driver.Value, error) { + if h.Map == nil { + return nil, nil + } + parts := []string{} + for key, val := range h.Map { + thispart := hQuote(key) + "=>" + hQuote(val) + parts = append(parts, thispart) + } + return []byte(strings.Join(parts, ",")), nil +} diff --git a/vendor/github.com/lib/pq/hstore/hstore_test.go b/vendor/github.com/lib/pq/hstore/hstore_test.go new file mode 100644 index 0000000..1c9f2bd --- /dev/null +++ b/vendor/github.com/lib/pq/hstore/hstore_test.go @@ -0,0 +1,148 @@ +package hstore + +import ( + "database/sql" + "os" + "testing" + + _ "github.com/lib/pq" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func openTestConn(t Fatalistic) *sql.DB { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + conn, err := sql.Open("postgres", "") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func TestHstore(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // quitely create hstore if it doesn't exist + _, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore") + if err != nil { + t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error()) + } + + hs := Hstore{} + + // test for null-valued hstores + err = db.QueryRow("SELECT NULL::hstore").Scan(&hs) + if err != nil { + t.Fatal(err) + } + if hs.Map != nil { + t.Fatalf("expected null map") + } + + err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) + if err != nil { + t.Fatalf("re-query null map failed: %s", err.Error()) + } + if hs.Map != nil { + t.Fatalf("expected null map") + } + + // test for empty hstores + err = db.QueryRow("SELECT ''::hstore").Scan(&hs) + if err != nil { + t.Fatal(err) + } + if hs.Map == nil { + t.Fatalf("expected empty map, got null map") + } + if len(hs.Map) != 0 { + t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) + } + + err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) + if err != nil { + t.Fatalf("re-query empty map failed: %s", err.Error()) + } + if hs.Map == nil { + t.Fatalf("expected empty map, got null map") + } + if len(hs.Map) != 0 { + t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) + } + + // a few example maps to test out + hsOnePair := Hstore{ + Map: map[string]sql.NullString{ + "key1": {String: "value1", Valid: true}, + }, + } + + hsThreePairs := Hstore{ + Map: map[string]sql.NullString{ + "key1": {String: "value1", Valid: true}, + "key2": {String: "value2", Valid: true}, + "key3": {String: "value3", Valid: true}, + }, + } + + hsSmorgasbord := Hstore{ + Map: map[string]sql.NullString{ + "nullstring": {String: "NULL", Valid: true}, + "actuallynull": {String: "", Valid: false}, + "NULL": {String: "NULL string key", Valid: true}, + "withbracket": {String: "value>42", Valid: true}, + "withequal": {String: "value=42", Valid: true}, + `"withquotes1"`: {String: `this "should" be fine`, Valid: true}, + `"withquotes"2"`: {String: `this "should\" also be fine`, Valid: true}, + "embedded1": {String: "value1=>x1", Valid: true}, + "embedded2": {String: `"value2"=>x2`, Valid: true}, + "withnewlines": {String: "\n\nvalue\t=>2", Valid: true}, + "<>": {String: `this, "should,\" also, => be fine`, Valid: true}, + }, + } + + // test encoding in query params, then decoding during Scan + testBidirectional := func(h Hstore) { + err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs) + if err != nil { + t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error()) + } + if hs.Map == nil { + t.Fatalf("expected %d-pair map, got null map", len(h.Map)) + } + if len(hs.Map) != len(h.Map) { + t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map)) + } + + for key, val := range hs.Map { + otherval, found := h.Map[key] + if !found { + t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map)) + } + if otherval.Valid != val.Valid { + t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map)) + } + if otherval.String != val.String { + t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map)) + } + } + } + + testBidirectional(hsOnePair) + testBidirectional(hsThreePairs) + testBidirectional(hsSmorgasbord) +} diff --git a/vendor/github.com/lib/pq/issues_test.go b/vendor/github.com/lib/pq/issues_test.go new file mode 100644 index 0000000..3a330a0 --- /dev/null +++ b/vendor/github.com/lib/pq/issues_test.go @@ -0,0 +1,26 @@ +package pq + +import "testing" + +func TestIssue494(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + query := `CREATE TEMP TABLE t (i INT PRIMARY KEY)` + if _, err := db.Exec(query); err != nil { + t.Fatal(err) + } + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + + if _, err := txn.Prepare(CopyIn("t", "i")); err != nil { + t.Fatal(err) + } + + if _, err := txn.Query("SELECT 1"); err == nil { + t.Fatal("expected error") + } +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 0000000..947d189 --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,794 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// NewListenerConn creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: c, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'N', 'S': + // ignore + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +// Close closes the connection. +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +// ListenerEventType is an enumeration of listener event types. +type ListenerEventType int + +const ( + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. + ListenerEventConnected ListenerEventType = iota + + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. + ListenerEventDisconnected + + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. + ListenerEventConnectionAttemptFailed +) + +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + dialer Dialer + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + dialer: d, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func(notificationChan <-chan *Notification) { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }(notificationChan) + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(time.Until(nextReconnect)) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/notify_test.go b/vendor/github.com/lib/pq/notify_test.go new file mode 100644 index 0000000..075666d --- /dev/null +++ b/vendor/github.com/lib/pq/notify_test.go @@ -0,0 +1,570 @@ +package pq + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "sync" + "testing" + "time" +) + +var errNilNotification = errors.New("nil notification") + +func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error { + select { + case n := <-ch: + if n == nil { + return errNilNotification + } + if n.Channel != relname || n.Extra != extra { + return fmt.Errorf("unexpected notification %v", n) + } + return nil + case <-time.After(1500 * time.Millisecond): + return fmt.Errorf("timeout") + } +} + +func expectNoNotification(t *testing.T, ch <-chan *Notification) error { + select { + case n := <-ch: + return fmt.Errorf("unexpected notification %v", n) + case <-time.After(100 * time.Millisecond): + return nil + } +} + +func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error { + select { + case e := <-eventch: + if e != et { + return fmt.Errorf("unexpected event %v", e) + } + return nil + case <-time.After(1500 * time.Millisecond): + panic("expectEvent timeout") + } +} + +func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error { + select { + case e := <-eventch: + return fmt.Errorf("unexpected event %v", e) + case <-time.After(100 * time.Millisecond): + return nil + } +} + +func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + notificationChan := make(chan *Notification) + l, err := NewListenerConn("", notificationChan) + if err != nil { + t.Fatal(err) + } + + return l, notificationChan +} + +func TestNewListenerConn(t *testing.T) { + l, _ := newTestListenerConn(t) + + defer l.Close() +} + +func TestConnListen(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestConnUnlisten(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } + + ok, err = l.Unlisten("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, channel) + if err != nil { + t.Fatal(err) + } +} + +func TestConnUnlistenAll(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } + + ok, err = l.UnlistenAll() + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, channel) + if err != nil { + t.Fatal(err) + } +} + +func TestConnClose(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + err := l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != errListenerConnClosed { + t.Fatalf("expected errListenerConnClosed; got %v", err) + } +} + +func TestConnPing(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + err := l.Ping() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Ping() + if err != errListenerConnClosed { + t.Fatalf("expected errListenerConnClosed; got %v", err) + } +} + +// Test for deadlock where a query fails while another one is queued +func TestConnExecDeadlock(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + l.ExecSimpleQuery("SELECT pg_sleep(60)") + wg.Done() + }() + runtime.Gosched() + go func() { + l.ExecSimpleQuery("SELECT 1") + wg.Done() + }() + // give the two goroutines some time to get into position + runtime.Gosched() + // calls Close on the net.Conn; equivalent to a network failure + l.Close() + + defer time.AfterFunc(10*time.Second, func() { + panic("timed out") + }).Stop() + wg.Wait() +} + +// Test for ListenerConn being closed while a slow query is executing +func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)") + if sent { + panic("expected sent=false") + } + // could be any of a number of errors + if err == nil { + panic("expected error") + } + wg.Done() + }() + // give the above goroutine some time to get into position + runtime.Gosched() + err := l.Close() + if err != nil { + t.Fatal(err) + } + + defer time.AfterFunc(10*time.Second, func() { + panic("timed out") + }).Stop() + wg.Wait() +} + +func TestNotifyExtra(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + t.Skip("skipping NOTIFY payload test since the server does not appear to support it") + } + + l, channel := newTestListenerConn(t) + defer l.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test, 'something'") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "something") + if err != nil { + t.Fatal(err) + } +} + +// create a new test listener and also set the timeouts +func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + eventch := make(chan ListenerEventType, 16) + l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t }) + err := expectEvent(t, eventch, ListenerEventConnected) + if err != nil { + t.Fatal(err) + } + return l, eventch +} + +func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) { + return newTestListenerTimeout(t, time.Hour, time.Hour) +} + +func TestListenerListen(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerUnlisten(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = l.Unlisten("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, l.Notify) + if err != nil { + t.Fatal(err) + } +} + +func TestListenerUnlistenAll(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = l.UnlistenAll() + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, l.Notify) + if err != nil { + t.Fatal(err) + } +} + +func TestListenerFailedQuery(t *testing.T) { + l, eventch := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + // shouldn't cause a disconnect + ok, err := l.cn.ExecSimpleQuery("SELECT error") + if !ok { + t.Fatalf("could not send query to server: %v", err) + } + _, ok = err.(PGError) + if !ok { + t.Fatalf("unexpected error %v", err) + } + err = expectNoEvent(t, eventch) + if err != nil { + t.Fatal(err) + } + + // should still work + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerReconnect(t *testing.T) { + l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + // kill the connection and make sure it comes back up + ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())") + if ok { + t.Fatalf("could not kill the connection: %v", err) + } + if err != io.EOF { + t.Fatalf("unexpected error %v", err) + } + err = expectEvent(t, eventch, ListenerEventDisconnected) + if err != nil { + t.Fatal(err) + } + err = expectEvent(t, eventch, ListenerEventReconnected) + if err != nil { + t.Fatal(err) + } + + // should still work + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + // should get nil after Reconnected + err = expectNotification(t, l.Notify, "", "") + if err != errNilNotification { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerClose(t *testing.T) { + l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + err := l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != errListenerClosed { + t.Fatalf("expected errListenerClosed; got %v", err) + } +} + +func TestListenerPing(t *testing.T) { + l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + err := l.Ping() + if err != nil { + t.Fatal(err) + } + + err = l.Close() + if err != nil { + t.Fatal(err) + } + + err = l.Ping() + if err != errListenerClosed { + t.Fatalf("expected errListenerClosed; got %v", err) + } +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 0000000..caaede2 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go new file mode 100644 index 0000000..7c634cd --- /dev/null +++ b/vendor/github.com/lib/pq/oid/gen.go @@ -0,0 +1,93 @@ +// +build ignore + +// Generate the table of OID values +// Run with 'go run gen.go'. +package main + +import ( + "database/sql" + "fmt" + "log" + "os" + "os/exec" + "strings" + + _ "github.com/lib/pq" +) + +// OID represent a postgres Object Identifier Type. +type OID struct { + ID int + Type string +} + +// Name returns an upper case version of the oid type. +func (o OID) Name() string { + return strings.ToUpper(o.Type) +} + +func main() { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + db, err := sql.Open("postgres", "") + if err != nil { + log.Fatal(err) + } + rows, err := db.Query(` + SELECT typname, oid + FROM pg_type WHERE oid < 10000 + ORDER BY oid; + `) + if err != nil { + log.Fatal(err) + } + oids := make([]*OID, 0) + for rows.Next() { + var oid OID + if err = rows.Scan(&oid.Type, &oid.ID); err != nil { + log.Fatal(err) + } + oids = append(oids, &oid) + } + if err = rows.Err(); err != nil { + log.Fatal(err) + } + cmd := exec.Command("gofmt") + cmd.Stderr = os.Stderr + w, err := cmd.StdinPipe() + if err != nil { + log.Fatal(err) + } + f, err := os.Create("types.go") + if err != nil { + log.Fatal(err) + } + cmd.Stdout = f + err = cmd.Start() + if err != nil { + log.Fatal(err) + } + fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.") + fmt.Fprintln(w, "\npackage oid") + fmt.Fprintln(w, "const (") + for _, oid := range oids { + fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID) + } + fmt.Fprintln(w, ")") + fmt.Fprintln(w, "var TypeName = map[Oid]string{") + for _, oid := range oids { + fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name()) + } + fmt.Fprintln(w, "}") + w.Close() + cmd.Wait() +} diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 0000000..ecc84c2 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,343 @@ +// Code generated by gen.go. DO NOT EDIT. + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_index_am_handler Oid = 325 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 +) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 0000000..c6aa5b9 --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/rows_test.go b/vendor/github.com/lib/pq/rows_test.go new file mode 100644 index 0000000..3033bc0 --- /dev/null +++ b/vendor/github.com/lib/pq/rows_test.go @@ -0,0 +1,220 @@ +// +build go1.8 + +package pq + +import ( + "math" + "reflect" + "testing" + + "github.com/lib/pq/oid" +) + +func TestDataTypeName(t *testing.T) { + tts := []struct { + typ oid.Oid + name string + }{ + {oid.T_int8, "INT8"}, + {oid.T_int4, "INT4"}, + {oid.T_int2, "INT2"}, + {oid.T_varchar, "VARCHAR"}, + {oid.T_text, "TEXT"}, + {oid.T_bool, "BOOL"}, + {oid.T_numeric, "NUMERIC"}, + {oid.T_date, "DATE"}, + {oid.T_time, "TIME"}, + {oid.T_timetz, "TIMETZ"}, + {oid.T_timestamp, "TIMESTAMP"}, + {oid.T_timestamptz, "TIMESTAMPTZ"}, + {oid.T_bytea, "BYTEA"}, + } + + for i, tt := range tts { + dt := fieldDesc{OID: tt.typ} + if name := dt.Name(); name != tt.name { + t.Errorf("(%d) got: %s want: %s", i, name, tt.name) + } + } +} + +func TestDataType(t *testing.T) { + tts := []struct { + typ oid.Oid + kind reflect.Kind + }{ + {oid.T_int8, reflect.Int64}, + {oid.T_int4, reflect.Int32}, + {oid.T_int2, reflect.Int16}, + {oid.T_varchar, reflect.String}, + {oid.T_text, reflect.String}, + {oid.T_bool, reflect.Bool}, + {oid.T_date, reflect.Struct}, + {oid.T_time, reflect.Struct}, + {oid.T_timetz, reflect.Struct}, + {oid.T_timestamp, reflect.Struct}, + {oid.T_timestamptz, reflect.Struct}, + {oid.T_bytea, reflect.Slice}, + } + + for i, tt := range tts { + dt := fieldDesc{OID: tt.typ} + if kind := dt.Type().Kind(); kind != tt.kind { + t.Errorf("(%d) got: %s want: %s", i, kind, tt.kind) + } + } +} + +func TestDataTypeLength(t *testing.T) { + tts := []struct { + typ oid.Oid + len int + mod int + length int64 + ok bool + }{ + {oid.T_int4, 0, -1, 0, false}, + {oid.T_varchar, 65535, 9, 5, true}, + {oid.T_text, 65535, -1, math.MaxInt64, true}, + {oid.T_bytea, 65535, -1, math.MaxInt64, true}, + } + + for i, tt := range tts { + dt := fieldDesc{OID: tt.typ, Len: tt.len, Mod: tt.mod} + if l, k := dt.Length(); k != tt.ok || l != tt.length { + t.Errorf("(%d) got: %d, %t want: %d, %t", i, l, k, tt.length, tt.ok) + } + } +} + +func TestDataTypePrecisionScale(t *testing.T) { + tts := []struct { + typ oid.Oid + mod int + precision, scale int64 + ok bool + }{ + {oid.T_int4, -1, 0, 0, false}, + {oid.T_numeric, 589830, 9, 2, true}, + {oid.T_text, -1, 0, 0, false}, + } + + for i, tt := range tts { + dt := fieldDesc{OID: tt.typ, Mod: tt.mod} + p, s, k := dt.PrecisionScale() + if k != tt.ok { + t.Errorf("(%d) got: %t want: %t", i, k, tt.ok) + } + if p != tt.precision { + t.Errorf("(%d) wrong precision got: %d want: %d", i, p, tt.precision) + } + if s != tt.scale { + t.Errorf("(%d) wrong scale got: %d want: %d", i, s, tt.scale) + } + } +} + +func TestRowsColumnTypes(t *testing.T) { + columnTypesTests := []struct { + Name string + TypeName string + Length struct { + Len int64 + OK bool + } + DecimalSize struct { + Precision int64 + Scale int64 + OK bool + } + ScanType reflect.Type + }{ + { + Name: "a", + TypeName: "INT4", + Length: struct { + Len int64 + OK bool + }{ + Len: 0, + OK: false, + }, + DecimalSize: struct { + Precision int64 + Scale int64 + OK bool + }{ + Precision: 0, + Scale: 0, + OK: false, + }, + ScanType: reflect.TypeOf(int32(0)), + }, { + Name: "bar", + TypeName: "TEXT", + Length: struct { + Len int64 + OK bool + }{ + Len: math.MaxInt64, + OK: true, + }, + DecimalSize: struct { + Precision int64 + Scale int64 + OK bool + }{ + Precision: 0, + Scale: 0, + OK: false, + }, + ScanType: reflect.TypeOf(""), + }, + } + + db := openTestConn(t) + defer db.Close() + + rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar, 1.28::numeric(9, 2) AS dec") + if err != nil { + t.Fatal(err) + } + + columns, err := rows.ColumnTypes() + if err != nil { + t.Fatal(err) + } + if len(columns) != 3 { + t.Errorf("expected 3 columns found %d", len(columns)) + } + + for i, tt := range columnTypesTests { + c := columns[i] + if c.Name() != tt.Name { + t.Errorf("(%d) got: %s, want: %s", i, c.Name(), tt.Name) + } + if c.DatabaseTypeName() != tt.TypeName { + t.Errorf("(%d) got: %s, want: %s", i, c.DatabaseTypeName(), tt.TypeName) + } + l, ok := c.Length() + if l != tt.Length.Len { + t.Errorf("(%d) got: %d, want: %d", i, l, tt.Length.Len) + } + if ok != tt.Length.OK { + t.Errorf("(%d) got: %t, want: %t", i, ok, tt.Length.OK) + } + p, s, ok := c.DecimalSize() + if p != tt.DecimalSize.Precision { + t.Errorf("(%d) got: %d, want: %d", i, p, tt.DecimalSize.Precision) + } + if s != tt.DecimalSize.Scale { + t.Errorf("(%d) got: %d, want: %d", i, s, tt.DecimalSize.Scale) + } + if ok != tt.DecimalSize.OK { + t.Errorf("(%d) got: %t, want: %t", i, ok, tt.DecimalSize.OK) + } + if c.ScanType() != tt.ScanType { + t.Errorf("(%d) got: %v, want: %v", i, c.ScanType(), tt.ScanType) + } + } +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 0000000..e1a326a --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,169 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + sslRenegotiation(&tlsConf) + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_go1.7.go b/vendor/github.com/lib/pq/ssl_go1.7.go new file mode 100644 index 0000000..d7ba43b --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_go1.7.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package pq + +import "crypto/tls" + +// Accept renegotiation requests initiated by the backend. +// +// Renegotiation was deprecated then removed from PostgreSQL 9.5, but +// the default configuration of older versions has it enabled. Redshift +// also initiates renegotiations and cannot be reconfigured. +func sslRenegotiation(conf *tls.Config) { + conf.Renegotiation = tls.RenegotiateFreelyAsClient +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 0000000..3b7c3a2 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,20 @@ +// +build !windows + +package pq + +import "os" + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + if info.Mode().Perm()&0077 != 0 { + return ErrSSLKeyHasWorldPermissions + } + return nil +} diff --git a/vendor/github.com/lib/pq/ssl_renegotiation.go b/vendor/github.com/lib/pq/ssl_renegotiation.go new file mode 100644 index 0000000..85ed5e4 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_renegotiation.go @@ -0,0 +1,8 @@ +// +build !go1.7 + +package pq + +import "crypto/tls" + +// Renegotiation is not supported by crypto/tls until Go 1.7. +func sslRenegotiation(*tls.Config) {} diff --git a/vendor/github.com/lib/pq/ssl_test.go b/vendor/github.com/lib/pq/ssl_test.go new file mode 100644 index 0000000..3eafbfd --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_test.go @@ -0,0 +1,279 @@ +package pq + +// This file contains SSL tests + +import ( + _ "crypto/sha256" + "crypto/x509" + "database/sql" + "os" + "path/filepath" + "testing" +) + +func maybeSkipSSLTests(t *testing.T) { + // Require some special variables for testing certificates + if os.Getenv("PQSSLCERTTEST_PATH") == "" { + t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests") + } + + value := os.Getenv("PQGOSSLTESTS") + if value == "" || value == "0" { + t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests") + } else if value != "1" { + t.Fatalf("unexpected value %q for PQGOSSLTESTS", value) + } +} + +func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) { + db, err := openTestConnConninfo(conninfo) + if err != nil { + // should never fail + t.Fatal(err) + } + // Do something with the connection to see whether it's working or not. + tx, err := db.Begin() + if err == nil { + return db, tx.Rollback() + } + _ = db.Close() + return nil, err +} + +func checkSSLSetup(t *testing.T, conninfo string) { + _, err := openSSLConn(t, conninfo) + if pge, ok := err.(*Error); ok { + if pge.Code.Name() != "invalid_authorization_specification" { + t.Fatalf("unexpected error code '%s'", pge.Code.Name()) + } + } else { + t.Fatalf("expected %T, got %v", (*Error)(nil), err) + } +} + +// Connect over SSL and run a simple query to test the basics +func TestSSLConnection(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + db, err := openSSLConn(t, "sslmode=require user=pqgossltest") + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// Test sslmode=verify-full +func TestSSLVerifyFull(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Not OK according to the system CA + _, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok := err.(x509.UnknownAuthorityError) + if !ok { + t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err) + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + // No match on Common Name + _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok = err.(x509.HostnameError) + if !ok { + t.Fatalf("expected x509.HostnameError, got %#+v", err) + } + // OK + _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest") + if err != nil { + t.Fatal(err) + } +} + +// Test sslmode=require sslrootcert=rootCertPath +func TestSSLRequireWithRootCert(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + bogusRootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "bogus_root.crt") + bogusRootCert := "sslrootcert=" + bogusRootCertPath + " " + + // Not OK according to the bogus CA + _, err := openSSLConn(t, bogusRootCert+"host=postgres sslmode=require user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok := err.(x509.UnknownAuthorityError) + if !ok { + t.Fatalf("expected x509.UnknownAuthorityError, got %s, %#+v", err, err) + } + + nonExistentCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "non_existent.crt") + nonExistentCert := "sslrootcert=" + nonExistentCertPath + " " + + // No match on Common Name, but that's OK because we're not validating anything. + _, err = openSSLConn(t, nonExistentCert+"host=127.0.0.1 sslmode=require user=pqgossltest") + if err != nil { + t.Fatal(err) + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + + // No match on Common Name, but that's OK because we're not validating the CN. + _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=require user=pqgossltest") + if err != nil { + t.Fatal(err) + } + // Everything OK + _, err = openSSLConn(t, rootCert+"host=postgres sslmode=require user=pqgossltest") + if err != nil { + t.Fatal(err) + } +} + +// Test sslmode=verify-ca +func TestSSLVerifyCA(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Not OK according to the system CA + { + _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest") + if _, ok := err.(x509.UnknownAuthorityError); !ok { + t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err) + } + } + + // Still not OK according to the system CA; empty sslrootcert is treated as unspecified. + { + _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest sslrootcert=''") + if _, ok := err.(x509.UnknownAuthorityError); !ok { + t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err) + } + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + // No match on Common Name, but that's OK + if _, err := openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest"); err != nil { + t.Fatal(err) + } + // Everything OK + if _, err := openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest"); err != nil { + t.Fatal(err) + } +} + +// Authenticate over SSL using client certificates +func TestSSLClientCertificates(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + const baseinfo = "sslmode=require user=pqgosslcert" + + // Certificate not specified, should fail + { + _, err := openSSLConn(t, baseinfo) + if pge, ok := err.(*Error); ok { + if pge.Code.Name() != "invalid_authorization_specification" { + t.Fatalf("unexpected error code '%s'", pge.Code.Name()) + } + } else { + t.Fatalf("expected %T, got %v", (*Error)(nil), err) + } + } + + // Empty certificate specified, should fail + { + _, err := openSSLConn(t, baseinfo+" sslcert=''") + if pge, ok := err.(*Error); ok { + if pge.Code.Name() != "invalid_authorization_specification" { + t.Fatalf("unexpected error code '%s'", pge.Code.Name()) + } + } else { + t.Fatalf("expected %T, got %v", (*Error)(nil), err) + } + } + + // Non-existent certificate specified, should fail + { + _, err := openSSLConn(t, baseinfo+" sslcert=/tmp/filedoesnotexist") + if pge, ok := err.(*Error); ok { + if pge.Code.Name() != "invalid_authorization_specification" { + t.Fatalf("unexpected error code '%s'", pge.Code.Name()) + } + } else { + t.Fatalf("expected %T, got %v", (*Error)(nil), err) + } + } + + certpath, ok := os.LookupEnv("PQSSLCERTTEST_PATH") + if !ok { + t.Fatalf("PQSSLCERTTEST_PATH not present in environment") + } + + sslcert := filepath.Join(certpath, "postgresql.crt") + + // Cert present, key not specified, should fail + { + _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert) + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err) + } + } + + // Cert present, empty key specified, should fail + { + _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=''") + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err) + } + } + + // Cert present, non-existent key, should fail + { + _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=/tmp/filedoesnotexist") + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err) + } + } + + // Key has wrong permissions (passing the cert as the key), should fail + if _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslcert); err != ErrSSLKeyHasWorldPermissions { + t.Fatalf("expected %s, got %#+v", ErrSSLKeyHasWorldPermissions, err) + } + + sslkey := filepath.Join(certpath, "postgresql.key") + + // Should work + if db, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslkey); err != nil { + t.Fatal(err) + } else { + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + if err := rows.Close(); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 0000000..5d2c763 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 0000000..f4d8a7c --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/url_test.go b/vendor/github.com/lib/pq/url_test.go new file mode 100644 index 0000000..4ff0ce0 --- /dev/null +++ b/vendor/github.com/lib/pq/url_test.go @@ -0,0 +1,66 @@ +package pq + +import ( + "testing" +) + +func TestSimpleParseURL(t *testing.T) { + expected := "host=hostname.remote" + str, err := ParseURL("postgres://hostname.remote") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) + } +} + +func TestIPv6LoopbackParseURL(t *testing.T) { + expected := "host=::1 port=1234" + str, err := ParseURL("postgres://[::1]:1234") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) + } +} + +func TestFullParseURL(t *testing.T) { + expected := `dbname=database host=hostname.remote password=top\ secret port=1234 user=username` + str, err := ParseURL("postgres://username:top%20secret@hostname.remote:1234/database") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected) + } +} + +func TestInvalidProtocolParseURL(t *testing.T) { + _, err := ParseURL("http://hostname.remote") + switch err { + case nil: + t.Fatal("Expected an error from parsing invalid protocol") + default: + msg := "invalid connection protocol: http" + if err.Error() != msg { + t.Fatalf("Unexpected error message:\n+ %s\n- %s", + err.Error(), msg) + } + } +} + +func TestMinimalURL(t *testing.T) { + cs, err := ParseURL("postgres://") + if err != nil { + t.Fatal(err) + } + + if cs != "" { + t.Fatalf("expected blank connection string, got: %q", cs) + } +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 0000000..bf98252 --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,24 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 0000000..2b69126 --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 0000000..9a1b9e0 --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/github.com/lib/pq/uuid_test.go b/vendor/github.com/lib/pq/uuid_test.go new file mode 100644 index 0000000..8ecee2f --- /dev/null +++ b/vendor/github.com/lib/pq/uuid_test.go @@ -0,0 +1,46 @@ +package pq + +import ( + "reflect" + "strings" + "testing" +) + +func TestDecodeUUIDBinaryError(t *testing.T) { + t.Parallel() + _, err := decodeUUIDBinary([]byte{0x12, 0x34}) + + if err == nil { + t.Fatal("Expected error, got none") + } + if !strings.HasPrefix(err.Error(), "pq:") { + t.Errorf("Expected error to start with %q, got %q", "pq:", err.Error()) + } + if !strings.Contains(err.Error(), "bad length: 2") { + t.Errorf("Expected error to contain length, got %q", err.Error()) + } +} + +func BenchmarkDecodeUUIDBinary(b *testing.B) { + x := []byte{0x03, 0xa3, 0x52, 0x2f, 0x89, 0x28, 0x49, 0x87, 0x84, 0xd6, 0x93, 0x7b, 0x36, 0xec, 0x27, 0x6f} + + for i := 0; i < b.N; i++ { + decodeUUIDBinary(x) + } +} + +func TestDecodeUUIDBackend(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var s = "a0ecc91d-a13f-4fe4-9fce-7e09777cc70a" + var scanned interface{} + + err := db.QueryRow(`SELECT $1::uuid`, s).Scan(&scanned) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if !reflect.DeepEqual(scanned, []byte(s)) { + t.Errorf("Expected []byte(%q), got %T(%q)", s, scanned, scanned) + } +} diff --git a/vendor/github.com/spf13/cobra/.circleci/config.yml b/vendor/github.com/spf13/cobra/.circleci/config.yml new file mode 100644 index 0000000..bbba32b --- /dev/null +++ b/vendor/github.com/spf13/cobra/.circleci/config.yml @@ -0,0 +1,38 @@ +workflows: + version: 2 + main: + jobs: + - go-current + - go-previous + - go-latest +base: &base + working_directory: /go/src/github.com/spf13/cobra + steps: + - checkout + - run: + name: "All Commands" + command: | + mkdir -p bin + curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck + chmod +x bin/shellcheck + go get -t -v ./... + PATH=$PATH:$PWD/bin go test -v ./... + go build + diff -u <(echo -n) <(gofmt -d -s .) + if [ -z $NOVET ]; then + diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); + fi +version: 2 +jobs: + go-current: + docker: + - image: circleci/golang:1.10.0 + <<: *base + go-previous: + docker: + - image: circleci/golang:1.9.4 + <<: *base + go-latest: + docker: + - image: circleci/golang:latest + <<: *base diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore new file mode 100644 index 0000000..1b8c7c2 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -0,0 +1,36 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore +# swap +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +# session +Session.vim +# temporary +.netrwhist +*~ +# auto-generated tag files +tags + +*.exe + +cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap new file mode 100644 index 0000000..94ec530 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.mailmap @@ -0,0 +1,3 @@ +Steve Francia +Bjørn Erik Pedersen +Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml new file mode 100644 index 0000000..5afcb20 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -0,0 +1,21 @@ +language: go + +matrix: + include: + - go: 1.9.4 + - go: 1.10.0 + - go: tip + allow_failures: + - go: tip + +before_install: + - mkdir -p bin + - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck + - chmod +x bin/shellcheck +script: + - PATH=$PATH:$PWD/bin go test -v ./... + - go build + - diff -u <(echo -n) <(gofmt -d -s .) + - if [ -z $NOVET ]; then + diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); + fi diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 0000000..298f0e2 --- /dev/null +++ b/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 0000000..851fcc0 --- /dev/null +++ b/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,736 @@ +![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) + +Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. + +Many of the most widely used Go projects are built using Cobra including: + +* [Kubernetes](http://kubernetes.io/) +* [Hugo](http://gohugo.io) +* [rkt](https://github.com/coreos/rkt) +* [etcd](https://github.com/coreos/etcd) +* [Moby (former Docker)](https://github.com/moby/moby) +* [Docker (distribution)](https://github.com/docker/distribution) +* [OpenShift](https://www.openshift.com/) +* [Delve](https://github.com/derekparker/delve) +* [GopherJS](http://www.gopherjs.org/) +* [CockroachDB](http://www.cockroachlabs.com/) +* [Bleve](http://www.blevesearch.com/) +* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) +* [GiantSwarm's swarm](https://github.com/giantswarm/cli) +* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +* [rclone](http://rclone.org/) +* [nehm](https://github.com/bogem/nehm) +* [Pouch](https://github.com/alibaba/pouch) + +[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) +[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) +[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) + +# Table of Contents + +- [Overview](#overview) +- [Concepts](#concepts) + * [Commands](#commands) + * [Flags](#flags) +- [Installing](#installing) +- [Getting Started](#getting-started) + * [Using the Cobra Generator](#using-the-cobra-generator) + * [Using the Cobra Library](#using-the-cobra-library) + * [Working with Flags](#working-with-flags) + * [Positional and Custom Arguments](#positional-and-custom-arguments) + * [Example](#example) + * [Help Command](#help-command) + * [Usage Message](#usage-message) + * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) + * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) + * [Generating documentation for your command](#generating-documentation-for-your-command) + * [Generating bash completions](#generating-bash-completions) +- [Contributing](#contributing) +- [License](#license) + +# Overview + +Cobra is a library providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. + +Cobra is also an application that will generate your application scaffolding to rapidly +develop a Cobra-based application. + +Cobra provides: +* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. +* Fully POSIX-compliant flags (including short & long versions) +* Nested subcommands +* Global, local and cascading flags +* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` +* Intelligent suggestions (`app srver`... did you mean `app server`?) +* Automatic help generation for commands and flags +* Automatic help flag recognition of `-h`, `--help`, etc. +* Automatically generated bash autocomplete for your application +* Automatically generated man pages for your application +* Command aliases so you can change things without breaking them +* The flexibility to define your own help, usage, etc. +* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps + +# Concepts + +Cobra is built on a structure of commands, arguments & flags. + +**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. + +The best applications will read like sentences when used. Users will know how +to use the application because they will natively understand how to use it. + +The pattern to follow is +`APPNAME VERB NOUN --ADJECTIVE.` + or +`APPNAME COMMAND ARG --FLAG` + +A few good real world examples may better illustrate this point. + +In the following example, 'server' is a command, and 'port' is a flag: + + hugo server --port=1313 + +In this command we are telling Git to clone the url bare. + + git clone URL --bare + +## Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above, 'server' is the command. + +[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) + +## Flags + +A flag is a way to modify the behavior of a command. Cobra supports +fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above, 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/spf13/pflag), a fork of the flag standard library +which maintains the same interface while adding POSIX compliance. + +# Installing +Using Cobra is easy. First, use `go get` to install the latest version +of the library. This command will install the `cobra` generator executable +along with the library and its dependencies: + + go get -u github.com/spf13/cobra/cobra + +Next, include Cobra in your application: + +```go +import "github.com/spf13/cobra" +``` + +# Getting Started + +While you are welcome to provide your own organization, typically a Cobra-based +application will follow the following organizational structure: + +``` + â–¾ appName/ + â–¾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. + +```go +package main + +import ( + "fmt" + "os" + + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. + +## Using the Cobra Library + +To manually implement Cobra you need to create a bare main.go file and a rootCmd file. +You will optionally provide additional commands as you see fit. + +### Create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} +``` + +You will additionally define flags and handle configuration in your init() function. + +For example cmd/root.go: + +```go +import ( + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func init() { + cobra.OnInitialize(initConfig) + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") + rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") +} + +func initConfig() { + // Don't forget to read config either from cfgFile or from home directory! + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigName(".cobra") + } + + if err := viper.ReadInConfig(); err != nil { + fmt.Println("Can't read config:", err) + os.Exit(1) + } +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import ( + "fmt" + "os" + + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + +```go +rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + +### Local Flag on Parent Commands + +By default Cobra only parses local flags on the target command, any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will +parse local flags on each command before executing the target command. + +```go +command := cobra.Command{ + Use: "print [OPTIONS] [COMMANDS]", + TraverseChildren: true, +} +``` + +### Bind Flags with Config + +You can also bind your flags with [viper](https://github.com/spf13/viper): +```go +var author string + +func init() { + rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) +} +``` + +In this example the persistent flag `author` is bound with `viper`. +**Note**, that the variable `author` will not be set to the value from config, +when the `--author` flag is not provided by user. + +More in [viper documentation](https://github.com/spf13/viper#working-with-flags). + +### Required flags + +Flags are optional by default. If instead you wish your command to report an error +when a flag has not been set, mark it as required: +```go +rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkFlagRequired("region") +``` + +## Positional and Custom Arguments + +Validation of positional arguments can be specified using the `Args` field +of `Command`. + +The following validators are built in: + +- `NoArgs` - the command will report an error if there are any positional args. +- `ArbitraryArgs` - the command will accept any args. +- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. +- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. +- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. +- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. +- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. + +An example of setting the custom validator: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("requires at least one arg") + } + if myapp.IsValidColor(args[0]) { + return nil + } + return fmt.Errorf("invalid color specified: %s", args[0]) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. +For many years people have printed back to the screen.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. +Echo works a lot like print, except it has a child command.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing +a count and a string.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + $ cobra help + + Cobra is a CLI library for Go that empowers applications. + This application is a tool to generate the needed files + to quickly create a Cobra application. + + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use +with following functions: + +```go +cmd.SetHelpCommand(cmd *Command) +cmd.SetHelpFunc(f func(*Command, []string)) +cmd.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage Message + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + $ cobra --invalid + Error: unknown flag: --invalid + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. +Like help, the function and template are overridable through public methods: + +```go +cmd.SetUsageFunc(f func(*Command) error) +cmd.SetUsageTemplate(s string) +``` + +## Version Flag + +Cobra adds a top-level '--version' flag if the Version field is set on the root command. +Running an application with the '--version' flag will print the version to stdout using +the version template. The template can be customized using the +`cmd.SetVersionTemplate(s string)` function. + +## PreRun and PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + rootCmd.Execute() + fmt.Println() + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + rootCmd.Execute() +} +``` + +Output: +``` +Inside rootCmd PersistentPreRun with args: [] +Inside rootCmd PreRun with args: [] +Inside rootCmd Run with args: [] +Inside rootCmd PostRun with args: [] +Inside rootCmd PersistentPostRun with args: [] + +Inside rootCmd PersistentPreRun with args: [arg1 arg2] +Inside subCmd PreRun with args: [arg1 arg2] +Inside subCmd Run with args: [arg1 arg2] +Inside subCmd PostRun with args: [arg1 arg2] +Inside subCmd PersistentPostRun with args: [arg1 arg2] +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating documentation for your command + +Cobra can generate documentation based on subcommands, flags, etc. in the following formats: + +- [Markdown](doc/md_docs.md) +- [ReStructured Text](doc/rest_docs.md) +- [Man Page](doc/man_docs.md) + +## Generating bash completions + +Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). + +# Contributing + +1. Fork it +2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) +3. Create your feature branch (`git checkout -b my-new-feature`) +4. Make changes and add them (`git add .`) +5. Commit your changes (`git commit -m 'Add some feature'`) +6. Push to the branch (`git push origin my-new-feature`) +7. Create new pull request + +# License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go new file mode 100644 index 0000000..a5d8a92 --- /dev/null +++ b/vendor/github.com/spf13/cobra/args.go @@ -0,0 +1,89 @@ +package cobra + +import ( + "fmt" +) + +type PositionalArgs func(cmd *Command, args []string) error + +// Legacy arg validation has the following behaviour: +// - root commands with no subcommands can take arbitrary arguments +// - root commands with subcommands will do subcommand validity checking +// - subcommands will always accept arbitrary arguments +func legacyArgs(cmd *Command, args []string) error { + // no subcommand, always take args + if !cmd.HasSubCommands() { + return nil + } + + // root command with subcommands, do subcommand checking. + if !cmd.HasParent() && len(args) > 0 { + return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + return nil +} + +// NoArgs returns an error if any args are included. +func NoArgs(cmd *Command, args []string) error { + if len(args) > 0 { + return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) + } + return nil +} + +// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. +func OnlyValidArgs(cmd *Command, args []string) error { + if len(cmd.ValidArgs) > 0 { + for _, v := range args { + if !stringInSlice(v, cmd.ValidArgs) { + return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + } + } + return nil +} + +// ArbitraryArgs never returns an error. +func ArbitraryArgs(cmd *Command, args []string) error { + return nil +} + +// MinimumNArgs returns an error if there is not at least N args. +func MinimumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < n { + return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) + } + return nil + } +} + +// MaximumNArgs returns an error if there are more than N args. +func MaximumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) > n { + return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// ExactArgs returns an error if there are not exactly n args. +func ExactArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) != n { + return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// RangeArgs returns an error if the number of args is not within the expected range. +func RangeArgs(min int, max int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < min || len(args) > max { + return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) + } + return nil + } +} diff --git a/vendor/github.com/spf13/cobra/args_test.go b/vendor/github.com/spf13/cobra/args_test.go new file mode 100644 index 0000000..d797b6f --- /dev/null +++ b/vendor/github.com/spf13/cobra/args_test.go @@ -0,0 +1,241 @@ +package cobra + +import ( + "strings" + "testing" +) + +func TestNoArgs(t *testing.T) { + c := &Command{Use: "c", Args: NoArgs, Run: emptyRun} + + output, err := executeCommand(c) + if output != "" { + t.Errorf("Unexpected string: %v", output) + } + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestNoArgsWithArgs(t *testing.T) { + c := &Command{Use: "c", Args: NoArgs, Run: emptyRun} + + _, err := executeCommand(c, "illegal") + if err == nil { + t.Fatal("Expected an error") + } + + got := err.Error() + expected := `unknown command "illegal" for "c"` + if got != expected { + t.Errorf("Expected: %q, got: %q", expected, got) + } +} + +func TestOnlyValidArgs(t *testing.T) { + c := &Command{ + Use: "c", + Args: OnlyValidArgs, + ValidArgs: []string{"one", "two"}, + Run: emptyRun, + } + + output, err := executeCommand(c, "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestOnlyValidArgsWithInvalidArgs(t *testing.T) { + c := &Command{ + Use: "c", + Args: OnlyValidArgs, + ValidArgs: []string{"one", "two"}, + Run: emptyRun, + } + + _, err := executeCommand(c, "three") + if err == nil { + t.Fatal("Expected an error") + } + + got := err.Error() + expected := `invalid argument "three" for "c"` + if got != expected { + t.Errorf("Expected: %q, got: %q", expected, got) + } +} + +func TestArbitraryArgs(t *testing.T) { + c := &Command{Use: "c", Args: ArbitraryArgs, Run: emptyRun} + output, err := executeCommand(c, "a", "b") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestMinimumNArgs(t *testing.T) { + c := &Command{Use: "c", Args: MinimumNArgs(2), Run: emptyRun} + output, err := executeCommand(c, "a", "b", "c") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestMinimumNArgsWithLessArgs(t *testing.T) { + c := &Command{Use: "c", Args: MinimumNArgs(2), Run: emptyRun} + _, err := executeCommand(c, "a") + + if err == nil { + t.Fatal("Expected an error") + } + + got := err.Error() + expected := "requires at least 2 arg(s), only received 1" + if got != expected { + t.Fatalf("Expected %q, got %q", expected, got) + } +} + +func TestMaximumNArgs(t *testing.T) { + c := &Command{Use: "c", Args: MaximumNArgs(3), Run: emptyRun} + output, err := executeCommand(c, "a", "b") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestMaximumNArgsWithMoreArgs(t *testing.T) { + c := &Command{Use: "c", Args: MaximumNArgs(2), Run: emptyRun} + _, err := executeCommand(c, "a", "b", "c") + + if err == nil { + t.Fatal("Expected an error") + } + + got := err.Error() + expected := "accepts at most 2 arg(s), received 3" + if got != expected { + t.Fatalf("Expected %q, got %q", expected, got) + } +} + +func TestExactArgs(t *testing.T) { + c := &Command{Use: "c", Args: ExactArgs(3), Run: emptyRun} + output, err := executeCommand(c, "a", "b", "c") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestExactArgsWithInvalidCount(t *testing.T) { + c := &Command{Use: "c", Args: ExactArgs(2), Run: emptyRun} + _, err := executeCommand(c, "a", "b", "c") + + if err == nil { + t.Fatal("Expected an error") + } + + got := err.Error() + expected := "accepts 2 arg(s), received 3" + if got != expected { + t.Fatalf("Expected %q, got %q", expected, got) + } +} + +func TestRangeArgs(t *testing.T) { + c := &Command{Use: "c", Args: RangeArgs(2, 4), Run: emptyRun} + output, err := executeCommand(c, "a", "b", "c") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestRangeArgsWithInvalidCount(t *testing.T) { + c := &Command{Use: "c", Args: RangeArgs(2, 4), Run: emptyRun} + _, err := executeCommand(c, "a") + + if err == nil { + t.Fatal("Expected an error") + } + + got := err.Error() + expected := "accepts between 2 and 4 arg(s), received 1" + if got != expected { + t.Fatalf("Expected %q, got %q", expected, got) + } +} + +func TestRootTakesNoArgs(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + _, err := executeCommand(rootCmd, "illegal", "args") + if err == nil { + t.Fatal("Expected an error") + } + + got := err.Error() + expected := `unknown command "illegal" for "root"` + if !strings.Contains(got, expected) { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestRootTakesArgs(t *testing.T) { + rootCmd := &Command{Use: "root", Args: ArbitraryArgs, Run: emptyRun} + childCmd := &Command{Use: "child", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + _, err := executeCommand(rootCmd, "legal", "args") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestChildTakesNoArgs(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Args: NoArgs, Run: emptyRun} + rootCmd.AddCommand(childCmd) + + _, err := executeCommand(rootCmd, "child", "illegal", "args") + if err == nil { + t.Fatal("Expected an error") + } + + got := err.Error() + expected := `unknown command "illegal" for "root child"` + if !strings.Contains(got, expected) { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestChildTakesArgs(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Args: ArbitraryArgs, Run: emptyRun} + rootCmd.AddCommand(childCmd) + + _, err := executeCommand(rootCmd, "child", "legal", "args") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 0000000..291eae7 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,555 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +// Annotations for Bash completion. +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" + BashCompCustom = "cobra_annotation_bash_completion_custom" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" + BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" +) + +func writePreamble(buf *bytes.Buffer, name string) { + buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + buf.WriteString(fmt.Sprintf(` +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +__%[1]s_index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__%[1]s_contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__%[1]s_handle_reply() +{ + __%[1]s_debug "${FUNCNAME[0]}" + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + if [[ $(type -t compopt) = "builtin" ]]; then + [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace + fi + + # complete after --flag=abc + if [[ $cur == *=* ]]; then + if [[ $(type -t compopt) = "builtin" ]]; then + compopt +o nospace + fi + + local index flag + flag="${cur%%=*}" + __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" + COMPREPLY=() + if [[ ${index} -ge 0 ]]; then + PREFIX="" + cur="${cur#*=}" + ${flags_completion[${index}]} + if [ -n "${ZSH_VERSION}" ]; then + # zsh completion needs --flag= prefix + eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" + fi + fi + fi + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + completions=("${commands[@]}") + if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + fi + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions+=("${must_have_one_flag[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then + COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) + fi + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi + + # available in bash-completion >= 2, not always present on macOS + if declare -F __ltrim_colon_completions >/dev/null; then + __ltrim_colon_completions "$cur" + fi + + # If there is only 1 completion and it is a flag with an = it will be completed + # but we don't want a space after the = + if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then + compopt -o nospace + fi +} + +# The arguments should be in the form "ext1|ext2|extn" +__%[1]s_handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__%[1]s_handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 +} + +__%[1]s_handle_flag() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" + if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # if you set a flag which only applies to this command, don't show subcommands + if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then + commands=() + fi + + # keep flag value with flagname as flaghash + # flaghash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + fi + + # skip the argument to a two word flag + if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__%[1]s_handle_noun() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__%[1]s_handle_command() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + if [[ $c -eq 0 ]]; then + next_command="_%[1]s_root_command" + else + next_command="_${words[c]//:/__}" + fi + fi + c=$((c+1)) + __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" + declare -F "$next_command" >/dev/null && $next_command +} + +__%[1]s_handle_word() +{ + if [[ $c -ge $cword ]]; then + __%[1]s_handle_reply + return + fi + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __%[1]s_handle_flag + elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then + __%[1]s_handle_command + elif [[ $c -eq 0 ]]; then + __%[1]s_handle_command + else + __%[1]s_handle_noun + fi + __%[1]s_handle_word +} + +`, name)) +} + +func writePostscript(buf *bytes.Buffer, name string) { + name = strings.Replace(name, ":", "__", -1) + buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) + buf.WriteString(fmt.Sprintf(`{ + local cur prev words cword + declare -A flaghash 2>/dev/null || : + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __%[1]s_init_completion -n "=" || return + fi + + local c=0 + local flags=() + local two_word_flags=() + local local_nonpersistent_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%[1]s") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __%[1]s_handle_word +} + +`, name)) + buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%s %s +else + complete -o default -o nospace -F __start_%s %s +fi + +`, name, name, name, name)) + buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") +} + +func writeCommands(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" commands=()\n") + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) + } + buf.WriteString("\n") +} + +func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + var ext string + if len(value) > 0 { + ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") + } else { + ext = "_filedir" + } + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + case BashCompCustom: + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + if len(value) > 0 { + handlers := strings.Join(value, "; ") + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) + } else { + buf.WriteString(" flags_completion+=(:)\n") + } + case BashCompSubdirsInDir: + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + var ext string + if len(value) == 1 { + ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] + } else { + ext = "_filedir -d" + } + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + } + } +} + +func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { + name := flag.Shorthand + format := " " + if len(flag.NoOptDefVal) == 0 { + format += "two_word_" + } + format += "flags+=(\"-%s\")\n" + buf.WriteString(fmt.Sprintf(format, name)) + writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) +} + +func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { + name := flag.Name + format := " flags+=(\"--%s" + if len(flag.NoOptDefVal) == 0 { + format += "=" + } + format += "\")\n" + buf.WriteString(fmt.Sprintf(format, name)) + writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) +} + +func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { + name := flag.Name + format := " local_nonpersistent_flags+=(\"--%s" + if len(flag.NoOptDefVal) == 0 { + format += "=" + } + format += "\")\n" + buf.WriteString(fmt.Sprintf(format, name)) +} + +func writeFlags(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(` flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + +`) + localNonPersistentFlags := cmd.LocalNonPersistentFlags() + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + writeFlag(buf, flag, cmd) + if len(flag.Shorthand) > 0 { + writeShortFlag(buf, flag, cmd) + } + if localNonPersistentFlags.Lookup(flag.Name) != nil { + writeLocalNonPersistentFlag(buf, flag) + } + }) + cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + writeFlag(buf, flag, cmd) + if len(flag.Shorthand) > 0 { + writeShortFlag(buf, flag, cmd) + } + }) + + buf.WriteString("\n") +} + +func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" must_have_one_flag=()\n") + flags := cmd.NonInheritedFlags() + flags.VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + for key := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += "\")\n" + buf.WriteString(fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) + } + } + } + }) +} + +func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" must_have_one_noun=()\n") + sort.Sort(sort.StringSlice(cmd.ValidArgs)) + for _, value := range cmd.ValidArgs { + buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + } +} + +func writeArgAliases(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" noun_aliases=()\n") + sort.Sort(sort.StringSlice(cmd.ArgAliases)) + for _, value := range cmd.ArgAliases { + buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) + } +} + +func gen(buf *bytes.Buffer, cmd *Command) { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + gen(buf, c) + } + commandName := cmd.CommandPath() + commandName = strings.Replace(commandName, " ", "_", -1) + commandName = strings.Replace(commandName, ":", "__", -1) + + if cmd.Root() == cmd { + buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + } else { + buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + } + + buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) + writeCommands(buf, cmd) + writeFlags(buf, cmd) + writeRequiredFlag(buf, cmd) + writeRequiredNouns(buf, cmd) + writeArgAliases(buf, cmd) + buf.WriteString("}\n\n") +} + +// GenBashCompletion generates bash completion file and writes to the passed writer. +func (c *Command) GenBashCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + writePreamble(buf, c.Name()) + if len(c.BashCompletionFunction) > 0 { + buf.WriteString(c.BashCompletionFunction + "\n") + } + gen(buf, c) + writePostscript(buf, c.Name()) + + _, err := buf.WriteTo(w) + return err +} + +func nonCompletableFlag(flag *pflag.Flag) bool { + return flag.Hidden || len(flag.Deprecated) > 0 +} + +// GenBashCompletionFile generates bash completion file. +func (c *Command) GenBashCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletion(outFile) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(c.Flags(), name) +} + +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(c.PersistentFlags(), name) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (c *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func (c *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(c.Flags(), name, f) +} + +// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md new file mode 100644 index 0000000..8d01f45 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -0,0 +1,221 @@ +# Generating Bash Completions For Your Own cobra.Command + +Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: + +```go +package main + +import ( + "io/ioutil" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + "k8s.io/kubernetes/pkg/kubectl/cmd/util" +) + +func main() { + kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + kubectl.GenBashCompletionFile("out.sh") +} +``` + +`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. + +## Creating your own custom functions + +Some more actual code that works in kubernetes: + +```bash +const ( + bash_completion_func = `__kubectl_parse_get() +{ + local kubectl_output out + if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then + out=($(echo "${kubectl_output}" | awk '{print $1}')) + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__kubectl_get_resource() +{ + if [[ ${#nouns[@]} -eq 0 ]]; then + return 1 + fi + __kubectl_parse_get ${nouns[${#nouns[@]} -1]} + if [[ $? -eq 0 ]]; then + return 0 + fi +} + +__custom_func() { + case ${last_command} in + kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) + __kubectl_get_resource + return + ;; + *) + ;; + esac +} +`) +``` + +And then I set that in my command definition: + +```go +cmds := &cobra.Command{ + Use: "kubectl", + Short: "kubectl controls the Kubernetes cluster manager", + Long: `kubectl controls the Kubernetes cluster manager. + +Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, + Run: runHelp, + BashCompletionFunction: bash_completion_func, +} +``` + +The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! + +## Have the completions code complete your 'nouns' + +In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: + +```go +validArgs []string = { "pod", "node", "service", "replicationcontroller" } + +cmd := &cobra.Command{ + Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", + Short: "Display one or many resources", + Long: get_long, + Example: get_example, + Run: func(cmd *cobra.Command, args []string) { + err := RunGet(f, out, cmd, args) + util.CheckErr(err) + }, + ValidArgs: validArgs, +} +``` + +Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like + +```bash +# kubectl get [tab][tab] +node pod replicationcontroller service +``` + +## Plural form and shortcuts for nouns + +If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: + +```go +argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } + +cmd := &cobra.Command{ + ... + ValidArgs: validArgs, + ArgAliases: argAliases +} +``` + +The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by +the completion algorithm if entered manually, e.g. in: + +```bash +# kubectl get rc [tab][tab] +backend frontend database +``` + +Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns +in this example again instead of the replication controllers. + +## Mark flags as required + +Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. + +```go +cmd.MarkFlagRequired("pod") +cmd.MarkFlagRequired("container") +``` + +and you'll get something like + +```bash +# kubectl exec [tab][tab][tab] +-c --container= -p --pod= +``` + +# Specify valid filename extensions for flags that take a filename + +In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. + +```go + annotations := []string{"json", "yaml", "yml"} + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = annotations + + flag := &pflag.Flag{ + Name: "filename", + Shorthand: "f", + Usage: usage, + Value: value, + DefValue: value.String(), + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +Now when you run a command with this filename flag you'll get something like + +```bash +# kubectl create -f +test/ example/ rpmbuild/ +hello.yml test.json +``` + +So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. + +# Specify custom flag completion + +Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify +a custom flag completion function with cobra.BashCompCustom: + +```go + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"} + + flag := &pflag.Flag{ + Name: "namespace", + Usage: usage, + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` +value, e.g.: + +```bash +__kubectl_get_namespaces() +{ + local template + template="{{ range .items }}{{ .metadata.name }} {{ end }}" + local kubectl_out + if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then + COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) + fi +} +``` +# Using bash aliases for commands + +You can also configure the `bash aliases` for the commands and they will also support completions. + +```bash +alias aliasname=origcommand +complete -o default -F __start_origcommand aliasname + +# and now when you run `aliasname` completion will make +# suggestions as it did for `origcommand`. + +$) aliasname +completion firstcommand secondcommand +``` diff --git a/vendor/github.com/spf13/cobra/bash_completions_test.go b/vendor/github.com/spf13/cobra/bash_completions_test.go new file mode 100644 index 0000000..02a4f15 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions_test.go @@ -0,0 +1,217 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "regexp" + "strings" + "testing" +) + +func checkOmit(t *testing.T, found, unexpected string) { + if strings.Contains(found, unexpected) { + t.Errorf("Got: %q\nBut should not have!\n", unexpected) + } +} + +func check(t *testing.T, found, expected string) { + if !strings.Contains(found, expected) { + t.Errorf("Expecting to contain: \n %q\nGot:\n %q\n", expected, found) + } +} + +func checkRegex(t *testing.T, found, pattern string) { + matched, err := regexp.MatchString(pattern, found) + if err != nil { + t.Errorf("Error thrown performing MatchString: \n %s\n", err) + } + if !matched { + t.Errorf("Expecting to match: \n %q\nGot:\n %q\n", pattern, found) + } +} + +func runShellCheck(s string) error { + excluded := []string{ + "SC2034", // PREFIX appears unused. Verify it or export it. + } + cmd := exec.Command("shellcheck", "-s", "bash", "-", "-e", strings.Join(excluded, ",")) + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + + stdin, err := cmd.StdinPipe() + if err != nil { + return err + } + go func() { + stdin.Write([]byte(s)) + stdin.Close() + }() + + return cmd.Run() +} + +// World worst custom function, just keep telling you to enter hello! +const bashCompletionFunc = `__custom_func() { + COMPREPLY=( "hello" ) +} +` + +func TestBashCompletions(t *testing.T) { + rootCmd := &Command{ + Use: "root", + ArgAliases: []string{"pods", "nodes", "services", "replicationcontrollers", "po", "no", "svc", "rc"}, + ValidArgs: []string{"pod", "node", "service", "replicationcontroller"}, + BashCompletionFunction: bashCompletionFunc, + Run: emptyRun, + } + rootCmd.Flags().IntP("introot", "i", -1, "help message for flag introot") + rootCmd.MarkFlagRequired("introot") + + // Filename. + rootCmd.Flags().String("filename", "", "Enter a filename") + rootCmd.MarkFlagFilename("filename", "json", "yaml", "yml") + + // Persistent filename. + rootCmd.PersistentFlags().String("persistent-filename", "", "Enter a filename") + rootCmd.MarkPersistentFlagFilename("persistent-filename") + rootCmd.MarkPersistentFlagRequired("persistent-filename") + + // Filename extensions. + rootCmd.Flags().String("filename-ext", "", "Enter a filename (extension limited)") + rootCmd.MarkFlagFilename("filename-ext") + rootCmd.Flags().String("custom", "", "Enter a filename (extension limited)") + rootCmd.MarkFlagCustom("custom", "__complete_custom") + + // Subdirectories in a given directory. + rootCmd.Flags().String("theme", "", "theme to use (located in /themes/THEMENAME/)") + rootCmd.Flags().SetAnnotation("theme", BashCompSubdirsInDir, []string{"themes"}) + + echoCmd := &Command{ + Use: "echo [string to echo]", + Aliases: []string{"say"}, + Short: "Echo anything to the screen", + Long: "an utterly useless command for testing.", + Example: "Just run cobra-test echo", + Run: emptyRun, + } + + echoCmd.Flags().String("filename", "", "Enter a filename") + echoCmd.MarkFlagFilename("filename", "json", "yaml", "yml") + echoCmd.Flags().String("config", "", "config to use (located in /config/PROFILE/)") + echoCmd.Flags().SetAnnotation("config", BashCompSubdirsInDir, []string{"config"}) + + printCmd := &Command{ + Use: "print [string to print]", + Args: MinimumNArgs(1), + Short: "Print anything to the screen", + Long: "an absolutely utterly useless command for testing.", + Run: emptyRun, + } + + deprecatedCmd := &Command{ + Use: "deprecated [can't do anything here]", + Args: NoArgs, + Short: "A command which is deprecated", + Long: "an absolutely utterly useless command for testing deprecation!.", + Deprecated: "Please use echo instead", + Run: emptyRun, + } + + colonCmd := &Command{ + Use: "cmd:colon", + Run: emptyRun, + } + + timesCmd := &Command{ + Use: "times [# times] [string to echo]", + SuggestFor: []string{"counts"}, + Args: OnlyValidArgs, + ValidArgs: []string{"one", "two", "three", "four"}, + Short: "Echo anything to the screen more times", + Long: "a slightly useless command for testing.", + Run: emptyRun, + } + + echoCmd.AddCommand(timesCmd) + rootCmd.AddCommand(echoCmd, printCmd, deprecatedCmd, colonCmd) + + buf := new(bytes.Buffer) + rootCmd.GenBashCompletion(buf) + output := buf.String() + + check(t, output, "_root") + check(t, output, "_root_echo") + check(t, output, "_root_echo_times") + check(t, output, "_root_print") + check(t, output, "_root_cmd__colon") + + // check for required flags + check(t, output, `must_have_one_flag+=("--introot=")`) + check(t, output, `must_have_one_flag+=("--persistent-filename=")`) + // check for custom completion function + check(t, output, `COMPREPLY=( "hello" )`) + // check for required nouns + check(t, output, `must_have_one_noun+=("pod")`) + // check for noun aliases + check(t, output, `noun_aliases+=("pods")`) + check(t, output, `noun_aliases+=("rc")`) + checkOmit(t, output, `must_have_one_noun+=("pods")`) + // check for filename extension flags + check(t, output, `flags_completion+=("_filedir")`) + // check for filename extension flags + check(t, output, `must_have_one_noun+=("three")`) + // check for filename extension flags + check(t, output, fmt.Sprintf(`flags_completion+=("__%s_handle_filename_extension_flag json|yaml|yml")`, rootCmd.Name())) + // check for filename extension flags in a subcommand + checkRegex(t, output, fmt.Sprintf(`_root_echo\(\)\n{[^}]*flags_completion\+=\("__%s_handle_filename_extension_flag json\|yaml\|yml"\)`, rootCmd.Name())) + // check for custom flags + check(t, output, `flags_completion+=("__complete_custom")`) + // check for subdirs_in_dir flags + check(t, output, fmt.Sprintf(`flags_completion+=("__%s_handle_subdirs_in_dir_flag themes")`, rootCmd.Name())) + // check for subdirs_in_dir flags in a subcommand + checkRegex(t, output, fmt.Sprintf(`_root_echo\(\)\n{[^}]*flags_completion\+=\("__%s_handle_subdirs_in_dir_flag config"\)`, rootCmd.Name())) + + checkOmit(t, output, deprecatedCmd.Name()) + + // If available, run shellcheck against the script. + if err := exec.Command("which", "shellcheck").Run(); err != nil { + return + } + if err := runShellCheck(output); err != nil { + t.Fatalf("shellcheck failed: %v", err) + } +} + +func TestBashCompletionHiddenFlag(t *testing.T) { + c := &Command{Use: "c", Run: emptyRun} + + const flagName = "hiddenFlag" + c.Flags().Bool(flagName, false, "") + c.Flags().MarkHidden(flagName) + + buf := new(bytes.Buffer) + c.GenBashCompletion(buf) + output := buf.String() + + if strings.Contains(output, flagName) { + t.Errorf("Expected completion to not include %q flag: Got %v", flagName, output) + } +} + +func TestBashCompletionDeprecatedFlag(t *testing.T) { + c := &Command{Use: "c", Run: emptyRun} + + const flagName = "deprecated-flag" + c.Flags().Bool(flagName, false, "") + c.Flags().MarkDeprecated(flagName, "use --not-deprecated instead") + + buf := new(bytes.Buffer) + c.GenBashCompletion(buf) + output := buf.String() + + if strings.Contains(output, flagName) { + t.Errorf("expected completion to not include %q flag: Got %v", flagName, output) + } +} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 0000000..7010fd1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,200 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "text/template" + "unicode" +) + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "trimTrailingWhitespaces": trimRightSpace, + "appendIfNotPresent": appendIfNotPresent, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + +var initializers []func() + +// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +// to automatically enable in CLI tools. +// Set this to true to enable it. +var EnablePrefixMatching = false + +// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. +// To disable sorting, set it to false. +var EnableCommandSorting = true + +// MousetrapHelpText enables an information splash screen on Windows +// if the CLI is started from explorer.exe. +// To disable the mousetrap, just set this variable to blank string (""). +// Works only on Microsoft Windows. +var MousetrapHelpText string = `This is a command line tool. + +You need to open cmd.exe and run it from there. +` + +// AddTemplateFunc adds a template function that's available to Usage and Help +// template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +// AddTemplateFuncs adds multiple template functions that are available to Usage and +// Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + +// OnInitialize sets the passed functions to be run when each command's +// Execute method is called. +func OnInitialize(y ...func()) { + initializers = append(initializers, y...) +} + +// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +// ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +// rpad adds padding to the right of a string. +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} + +// ld compares two strings and returns the levenshtein distance between them. +func ld(s, t string, ignoreCase bool) int { + if ignoreCase { + s = strings.ToLower(s) + t = strings.ToLower(t) + } + d := make([][]int, len(s)+1) + for i := range d { + d[i] = make([]int, len(t)+1) + } + for i := range d { + d[i][0] = i + } + for j := range d[0] { + d[0][j] = j + } + for j := 1; j <= len(t); j++ { + for i := 1; i <= len(s); i++ { + if s[i-1] == t[j-1] { + d[i][j] = d[i-1][j-1] + } else { + min := d[i-1][j] + if d[i][j-1] < min { + min = d[i][j-1] + } + if d[i-1][j-1] < min { + min = d[i-1][j-1] + } + d[i][j] = min + 1 + } + } + + } + return d[len(s)][len(t)] +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/vendor/github.com/spf13/cobra/cobra/README.md b/vendor/github.com/spf13/cobra/cobra/README.md new file mode 100644 index 0000000..6054f95 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/README.md @@ -0,0 +1,94 @@ +# Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +In order to use the cobra command, compile it using the following command: + + go get github.com/spf13/cobra/cobra + +This will create the cobra executable under your `$GOPATH/bin` directory. + +### cobra init + +The `cobra init [app]` command will create your initial application code +for you. It is a very powerful application that will populate your program with +the right structure so you can immediately enjoy all the benefits of Cobra. It +will also automatically apply the license you specify to your application. + +Cobra init is pretty smart. You can provide it a full path, or simply a path +similar to what is expected in the import. + +``` +cobra init github.com/spf13/newApp +``` + +### cobra add + +Once an application is initialized, Cobra can create additional commands for you. +Let's say you created an app and you wanted the following commands for it: + +* app serve +* app config +* app config create + +In your project directory (where your main.go file is) you would run the following: + +``` +cobra add serve +cobra add config +cobra add create -p 'configCmd' +``` + +*Note: Use camelCase (not snake_case/snake-case) for command names. +Otherwise, you will encounter errors. +For example, `cobra add add-user` is incorrect, but `cobra add addUser` is valid.* + +Once you have run these three commands you would have an app structure similar to +the following: + +``` + â–¾ app/ + â–¾ cmd/ + serve.go + config.go + create.go + main.go +``` + +At this point you can run `go run main.go` and it would run your app. `go run +main.go serve`, `go run main.go config`, `go run main.go config create` along +with `go run main.go help serve`, etc. would all work. + +Obviously you haven't added your own code to these yet. The commands are ready +for you to give them their tasks. Have fun! + +### Configuring the cobra generator + +The Cobra generator will be easier to use if you provide a simple configuration +file which will help you eliminate providing a bunch of repeated information in +flags over and over. + +An example ~/.cobra.yaml file: + +```yaml +author: Steve Francia +license: MIT +``` + +You can specify no license by setting `license` to `none` or you can specify +a custom license: + +```yaml +license: + header: This file is part of {{ .appName }}. + text: | + {{ .copyright }} + + This is my license. There are many like it, but this one is mine. + My license is my best friend. It is my life. I must master it as I must + master my life. +``` + +You can also use built-in licenses. For example, **GPLv2**, **GPLv3**, **LGPL**, +**AGPL**, **MIT**, **2-Clause BSD** or **3-Clause BSD**. diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/add.go b/vendor/github.com/spf13/cobra/cobra/cmd/add.go new file mode 100644 index 0000000..fb22096 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/add.go @@ -0,0 +1,179 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + "os" + "path/filepath" + "unicode" + + "github.com/spf13/cobra" +) + +func init() { + addCmd.Flags().StringVarP(&packageName, "package", "t", "", "target package name (e.g. github.com/spf13/hugo)") + addCmd.Flags().StringVarP(&parentName, "parent", "p", "rootCmd", "variable name of parent command for this command") +} + +var packageName, parentName string + +var addCmd = &cobra.Command{ + Use: "add [command name]", + Aliases: []string{"command"}, + Short: "Add a command to a Cobra Application", + Long: `Add (cobra add) will create a new command, with a license and +the appropriate structure for a Cobra-based CLI application, +and register it to its parent (default rootCmd). + +If you want your command to be public, pass in the command name +with an initial uppercase letter. + +Example: cobra add server -> resulting in a new cmd/server.go`, + + Run: func(cmd *cobra.Command, args []string) { + if len(args) < 1 { + er("add needs a name for the command") + } + + var project *Project + if packageName != "" { + project = NewProject(packageName) + } else { + wd, err := os.Getwd() + if err != nil { + er(err) + } + project = NewProjectFromPath(wd) + } + + cmdName := validateCmdName(args[0]) + cmdPath := filepath.Join(project.CmdPath(), cmdName+".go") + createCmdFile(project.License(), cmdPath, cmdName) + + fmt.Fprintln(cmd.OutOrStdout(), cmdName, "created at", cmdPath) + }, +} + +// validateCmdName returns source without any dashes and underscore. +// If there will be dash or underscore, next letter will be uppered. +// It supports only ASCII (1-byte character) strings. +// https://github.com/spf13/cobra/issues/269 +func validateCmdName(source string) string { + i := 0 + l := len(source) + // The output is initialized on demand, then first dash or underscore + // occurs. + var output string + + for i < l { + if source[i] == '-' || source[i] == '_' { + if output == "" { + output = source[:i] + } + + // If it's last rune and it's dash or underscore, + // don't add it output and break the loop. + if i == l-1 { + break + } + + // If next character is dash or underscore, + // just skip the current character. + if source[i+1] == '-' || source[i+1] == '_' { + i++ + continue + } + + // If the current character is dash or underscore, + // upper next letter and add to output. + output += string(unicode.ToUpper(rune(source[i+1]))) + // We know, what source[i] is dash or underscore and source[i+1] is + // uppered character, so make i = i+2. + i += 2 + continue + } + + // If the current character isn't dash or underscore, + // just add it. + if output != "" { + output += string(source[i]) + } + i++ + } + + if output == "" { + return source // source is initially valid name. + } + return output +} + +func createCmdFile(license License, path, cmdName string) { + template := `{{comment .copyright}} +{{if .license}}{{comment .license}}{{end}} + +package {{.cmdPackage}} + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// {{.cmdName}}Cmd represents the {{.cmdName}} command +var {{.cmdName}}Cmd = &cobra.Command{ + Use: "{{.cmdName}}", + Short: "A brief description of your command", + Long: ` + "`" + `A longer description that spans multiple lines and likely contains examples +and usage of using your command. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.` + "`" + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("{{.cmdName}} called") + }, +} + +func init() { + {{.parentName}}.AddCommand({{.cmdName}}Cmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // {{.cmdName}}Cmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // {{.cmdName}}Cmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} +` + + data := make(map[string]interface{}) + data["copyright"] = copyrightLine() + data["license"] = license.Header + data["cmdPackage"] = filepath.Base(filepath.Dir(path)) // last dir of path + data["parentName"] = parentName + data["cmdName"] = cmdName + + cmdScript, err := executeTemplate(template, data) + if err != nil { + er(err) + } + err = writeStringToFile(path, cmdScript) + if err != nil { + er(err) + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/add_test.go b/vendor/github.com/spf13/cobra/cobra/cmd/add_test.go new file mode 100644 index 0000000..b920e2b --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/add_test.go @@ -0,0 +1,109 @@ +package cmd + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/spf13/viper" +) + +// TestGoldenAddCmd initializes the project "github.com/spf13/testproject" +// in GOPATH, adds "test" command +// and compares the content of all files in cmd directory of testproject +// with appropriate golden files. +// Use -update to update existing golden files. +func TestGoldenAddCmd(t *testing.T) { + projectName := "github.com/spf13/testproject" + project := NewProject(projectName) + defer os.RemoveAll(project.AbsPath()) + + viper.Set("author", "NAME HERE ") + viper.Set("license", "apache") + viper.Set("year", 2017) + defer viper.Set("author", nil) + defer viper.Set("license", nil) + defer viper.Set("year", nil) + + // Initialize the project first. + initializeProject(project) + + // Then add the "test" command. + cmdName := "test" + cmdPath := filepath.Join(project.CmdPath(), cmdName+".go") + createCmdFile(project.License(), cmdPath, cmdName) + + expectedFiles := []string{".", "root.go", "test.go"} + gotFiles := []string{} + + // Check project file hierarchy and compare the content of every single file + // with appropriate golden file. + err := filepath.Walk(project.CmdPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Make path relative to project.CmdPath(). + // E.g. path = "/home/user/go/src/github.com/spf13/testproject/cmd/root.go" + // then it returns just "root.go". + relPath, err := filepath.Rel(project.CmdPath(), path) + if err != nil { + return err + } + relPath = filepath.ToSlash(relPath) + gotFiles = append(gotFiles, relPath) + goldenPath := filepath.Join("testdata", filepath.Base(path)+".golden") + + switch relPath { + // Known directories. + case ".": + return nil + // Known files. + case "root.go", "test.go": + if *update { + got, err := ioutil.ReadFile(path) + if err != nil { + return err + } + ioutil.WriteFile(goldenPath, got, 0644) + } + return compareFiles(path, goldenPath) + } + // Unknown file. + return errors.New("unknown file: " + path) + }) + if err != nil { + t.Fatal(err) + } + + // Check if some files lack. + if err := checkLackFiles(expectedFiles, gotFiles); err != nil { + t.Fatal(err) + } +} + +func TestValidateCmdName(t *testing.T) { + testCases := []struct { + input string + expected string + }{ + {"cmdName", "cmdName"}, + {"cmd_name", "cmdName"}, + {"cmd-name", "cmdName"}, + {"cmd______Name", "cmdName"}, + {"cmd------Name", "cmdName"}, + {"cmd______name", "cmdName"}, + {"cmd------name", "cmdName"}, + {"cmdName-----", "cmdName"}, + {"cmdname-", "cmdname"}, + } + + for _, testCase := range testCases { + got := validateCmdName(testCase.input) + if testCase.expected != got { + t.Errorf("Expected %q, got %q", testCase.expected, got) + } + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/golden_test.go b/vendor/github.com/spf13/cobra/cobra/cmd/golden_test.go new file mode 100644 index 0000000..59a5a1c --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/golden_test.go @@ -0,0 +1,77 @@ +package cmd + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "os/exec" +) + +var update = flag.Bool("update", false, "update .golden files") + +func init() { + // Mute commands. + addCmd.SetOutput(new(bytes.Buffer)) + initCmd.SetOutput(new(bytes.Buffer)) +} + +// compareFiles compares the content of files with pathA and pathB. +// If contents are equal, it returns nil. +// If not, it returns which files are not equal +// and diff (if system has diff command) between these files. +func compareFiles(pathA, pathB string) error { + contentA, err := ioutil.ReadFile(pathA) + if err != nil { + return err + } + contentB, err := ioutil.ReadFile(pathB) + if err != nil { + return err + } + if !bytes.Equal(contentA, contentB) { + output := new(bytes.Buffer) + output.WriteString(fmt.Sprintf("%q and %q are not equal!\n\n", pathA, pathB)) + + diffPath, err := exec.LookPath("diff") + if err != nil { + // Don't execute diff if it can't be found. + return nil + } + diffCmd := exec.Command(diffPath, "-u", pathA, pathB) + diffCmd.Stdout = output + diffCmd.Stderr = output + + output.WriteString("$ diff -u " + pathA + " " + pathB + "\n") + if err := diffCmd.Run(); err != nil { + output.WriteString("\n" + err.Error()) + } + return errors.New(output.String()) + } + return nil +} + +// checkLackFiles checks if all elements of expected are in got. +func checkLackFiles(expected, got []string) error { + lacks := make([]string, 0, len(expected)) + for _, ev := range expected { + if !stringInStringSlice(ev, got) { + lacks = append(lacks, ev) + } + } + if len(lacks) > 0 { + return fmt.Errorf("Lack %v file(s): %v", len(lacks), lacks) + } + return nil +} + +// stringInStringSlice checks if s is an element of slice. +func stringInStringSlice(s string, slice []string) bool { + for _, v := range slice { + if s == v { + return true + } + } + return false +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go b/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go new file mode 100644 index 0000000..cd94b3e --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go @@ -0,0 +1,168 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" +) + +var srcPaths []string + +func init() { + // Initialize srcPaths. + envGoPath := os.Getenv("GOPATH") + goPaths := filepath.SplitList(envGoPath) + if len(goPaths) == 0 { + // Adapted from https://github.com/Masterminds/glide/pull/798/files. + // As of Go 1.8 the GOPATH is no longer required to be set. Instead there + // is a default value. If there is no GOPATH check for the default value. + // Note, checking the GOPATH first to avoid invoking the go toolchain if + // possible. + + goExecutable := os.Getenv("COBRA_GO_EXECUTABLE") + if len(goExecutable) <= 0 { + goExecutable = "go" + } + + out, err := exec.Command(goExecutable, "env", "GOPATH").Output() + if err != nil { + er(err) + } + + toolchainGoPath := strings.TrimSpace(string(out)) + goPaths = filepath.SplitList(toolchainGoPath) + if len(goPaths) == 0 { + er("$GOPATH is not set") + } + } + srcPaths = make([]string, 0, len(goPaths)) + for _, goPath := range goPaths { + srcPaths = append(srcPaths, filepath.Join(goPath, "src")) + } +} + +func er(msg interface{}) { + fmt.Println("Error:", msg) + os.Exit(1) +} + +// isEmpty checks if a given path is empty. +// Hidden files in path are ignored. +func isEmpty(path string) bool { + fi, err := os.Stat(path) + if err != nil { + er(err) + } + + if !fi.IsDir() { + return fi.Size() == 0 + } + + f, err := os.Open(path) + if err != nil { + er(err) + } + defer f.Close() + + names, err := f.Readdirnames(-1) + if err != nil && err != io.EOF { + er(err) + } + + for _, name := range names { + if len(name) > 0 && name[0] != '.' { + return false + } + } + return true +} + +// exists checks if a file or directory exists. +func exists(path string) bool { + if path == "" { + return false + } + _, err := os.Stat(path) + if err == nil { + return true + } + if !os.IsNotExist(err) { + er(err) + } + return false +} + +func executeTemplate(tmplStr string, data interface{}) (string, error) { + tmpl, err := template.New("").Funcs(template.FuncMap{"comment": commentifyString}).Parse(tmplStr) + if err != nil { + return "", err + } + + buf := new(bytes.Buffer) + err = tmpl.Execute(buf, data) + return buf.String(), err +} + +func writeStringToFile(path string, s string) error { + return writeToFile(path, strings.NewReader(s)) +} + +// writeToFile writes r to file with path only +// if file/directory on given path doesn't exist. +func writeToFile(path string, r io.Reader) error { + if exists(path) { + return fmt.Errorf("%v already exists", path) + } + + dir := filepath.Dir(path) + if dir != "" { + if err := os.MkdirAll(dir, 0777); err != nil { + return err + } + } + + file, err := os.Create(path) + if err != nil { + return err + } + defer file.Close() + + _, err = io.Copy(file, r) + return err +} + +// commentfyString comments every line of in. +func commentifyString(in string) string { + var newlines []string + lines := strings.Split(in, "\n") + for _, line := range lines { + if strings.HasPrefix(line, "//") { + newlines = append(newlines, line) + } else { + if line == "" { + newlines = append(newlines, "//") + } else { + newlines = append(newlines, "// "+line) + } + } + } + return strings.Join(newlines, "\n") +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/init.go b/vendor/github.com/spf13/cobra/cobra/cmd/init.go new file mode 100644 index 0000000..2441370 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/init.go @@ -0,0 +1,234 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + "os" + "path" + "path/filepath" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var initCmd = &cobra.Command{ + Use: "init [name]", + Aliases: []string{"initialize", "initialise", "create"}, + Short: "Initialize a Cobra Application", + Long: `Initialize (cobra init) will create a new application, with a license +and the appropriate structure for a Cobra-based CLI application. + + * If a name is provided, it will be created in the current directory; + * If no name is provided, the current directory will be assumed; + * If a relative path is provided, it will be created inside $GOPATH + (e.g. github.com/spf13/hugo); + * If an absolute path is provided, it will be created; + * If the directory already exists but is empty, it will be used. + +Init will not use an existing directory with contents.`, + + Run: func(cmd *cobra.Command, args []string) { + wd, err := os.Getwd() + if err != nil { + er(err) + } + + var project *Project + if len(args) == 0 { + project = NewProjectFromPath(wd) + } else if len(args) == 1 { + arg := args[0] + if arg[0] == '.' { + arg = filepath.Join(wd, arg) + } + if filepath.IsAbs(arg) { + project = NewProjectFromPath(arg) + } else { + project = NewProject(arg) + } + } else { + er("please provide only one argument") + } + + initializeProject(project) + + fmt.Fprintln(cmd.OutOrStdout(), `Your Cobra application is ready at +`+project.AbsPath()+`. + +Give it a try by going there and running `+"`go run main.go`."+` +Add commands to it by running `+"`cobra add [cmdname]`.") + }, +} + +func initializeProject(project *Project) { + if !exists(project.AbsPath()) { // If path doesn't yet exist, create it + err := os.MkdirAll(project.AbsPath(), os.ModePerm) + if err != nil { + er(err) + } + } else if !isEmpty(project.AbsPath()) { // If path exists and is not empty don't use it + er("Cobra will not create a new project in a non empty directory: " + project.AbsPath()) + } + + // We have a directory and it's empty. Time to initialize it. + createLicenseFile(project.License(), project.AbsPath()) + createMainFile(project) + createRootCmdFile(project) +} + +func createLicenseFile(license License, path string) { + data := make(map[string]interface{}) + data["copyright"] = copyrightLine() + + // Generate license template from text and data. + text, err := executeTemplate(license.Text, data) + if err != nil { + er(err) + } + + // Write license text to LICENSE file. + err = writeStringToFile(filepath.Join(path, "LICENSE"), text) + if err != nil { + er(err) + } +} + +func createMainFile(project *Project) { + mainTemplate := `{{ comment .copyright }} +{{if .license}}{{ comment .license }}{{end}} + +package main + +import "{{ .importpath }}" + +func main() { + cmd.Execute() +} +` + data := make(map[string]interface{}) + data["copyright"] = copyrightLine() + data["license"] = project.License().Header + data["importpath"] = path.Join(project.Name(), filepath.Base(project.CmdPath())) + + mainScript, err := executeTemplate(mainTemplate, data) + if err != nil { + er(err) + } + + err = writeStringToFile(filepath.Join(project.AbsPath(), "main.go"), mainScript) + if err != nil { + er(err) + } +} + +func createRootCmdFile(project *Project) { + template := `{{comment .copyright}} +{{if .license}}{{comment .license}}{{end}} + +package cmd + +import ( + "fmt" + "os" +{{if .viper}} + homedir "github.com/mitchellh/go-homedir"{{end}} + "github.com/spf13/cobra"{{if .viper}} + "github.com/spf13/viper"{{end}} +){{if .viper}} + +var cfgFile string{{end}} + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "{{.appName}}", + Short: "A brief description of your application", + Long: ` + "`" + `A longer description that spans multiple lines and likely contains +examples and usage of using your application. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.` + "`" + `, + // Uncomment the following line if your bare application + // has an action associated with it: + // Run: func(cmd *cobra.Command, args []string) { }, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func init() { {{- if .viper}} + cobra.OnInitialize(initConfig) +{{end}} + // Here you will define your flags and configuration settings. + // Cobra supports persistent flags, which, if defined here, + // will be global for your application.{{ if .viper }} + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.{{ .appName }}.yaml)"){{ else }} + // rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.{{ .appName }}.yaml)"){{ end }} + + // Cobra also supports local flags, which will only run + // when this action is called directly. + rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +}{{ if .viper }} + +// initConfig reads in config file and ENV variables if set. +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Search config in home directory with name ".{{ .appName }}" (without extension). + viper.AddConfigPath(home) + viper.SetConfigName(".{{ .appName }}") + } + + viper.AutomaticEnv() // read in environment variables that match + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +}{{ end }} +` + + data := make(map[string]interface{}) + data["copyright"] = copyrightLine() + data["viper"] = viper.GetBool("useViper") + data["license"] = project.License().Header + data["appName"] = path.Base(project.Name()) + + rootCmdScript, err := executeTemplate(template, data) + if err != nil { + er(err) + } + + err = writeStringToFile(filepath.Join(project.CmdPath(), "root.go"), rootCmdScript) + if err != nil { + er(err) + } + +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/init_test.go b/vendor/github.com/spf13/cobra/cobra/cmd/init_test.go new file mode 100644 index 0000000..40eb403 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/init_test.go @@ -0,0 +1,83 @@ +package cmd + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/spf13/viper" +) + +// TestGoldenInitCmd initializes the project "github.com/spf13/testproject" +// in GOPATH and compares the content of files in initialized project with +// appropriate golden files ("testdata/*.golden"). +// Use -update to update existing golden files. +func TestGoldenInitCmd(t *testing.T) { + projectName := "github.com/spf13/testproject" + project := NewProject(projectName) + defer os.RemoveAll(project.AbsPath()) + + viper.Set("author", "NAME HERE ") + viper.Set("license", "apache") + viper.Set("year", 2017) + defer viper.Set("author", nil) + defer viper.Set("license", nil) + defer viper.Set("year", nil) + + os.Args = []string{"cobra", "init", projectName} + if err := rootCmd.Execute(); err != nil { + t.Fatal("Error by execution:", err) + } + + expectedFiles := []string{".", "cmd", "LICENSE", "main.go", "cmd/root.go"} + gotFiles := []string{} + + // Check project file hierarchy and compare the content of every single file + // with appropriate golden file. + err := filepath.Walk(project.AbsPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Make path relative to project.AbsPath(). + // E.g. path = "/home/user/go/src/github.com/spf13/testproject/cmd/root.go" + // then it returns just "cmd/root.go". + relPath, err := filepath.Rel(project.AbsPath(), path) + if err != nil { + return err + } + relPath = filepath.ToSlash(relPath) + gotFiles = append(gotFiles, relPath) + goldenPath := filepath.Join("testdata", filepath.Base(path)+".golden") + + switch relPath { + // Known directories. + case ".", "cmd": + return nil + // Known files. + case "LICENSE", "main.go", "cmd/root.go": + if *update { + got, err := ioutil.ReadFile(path) + if err != nil { + return err + } + if err := ioutil.WriteFile(goldenPath, got, 0644); err != nil { + t.Fatal("Error while updating file:", err) + } + } + return compareFiles(path, goldenPath) + } + // Unknown file. + return errors.New("unknown file: " + path) + }) + if err != nil { + t.Fatal(err) + } + + // Check if some files lack. + if err := checkLackFiles(expectedFiles, gotFiles); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go new file mode 100644 index 0000000..bc22e97 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go @@ -0,0 +1,683 @@ +package cmd + +func initAgpl() { + Licenses["agpl"] = License{ + Name: "GNU Affero General Public License", + PossibleMatches: []string{"agpl", "affero gpl", "gnu agpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see .`, + Text: ` GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go new file mode 100644 index 0000000..38393d5 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go @@ -0,0 +1,238 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initApache2() { + Licenses["apache"] = License{ + Name: "Apache 2.0", + PossibleMatches: []string{"apache", "apache20", "apache 2.0", "apache2.0", "apache-2.0"}, + Header: ` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.`, + Text: ` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go new file mode 100644 index 0000000..4a847e0 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go @@ -0,0 +1,71 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initBsdClause2() { + Licenses["freebsd"] = License{ + Name: "Simplified BSD License", + PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2-clause bsd", + "2 clause bsd", "simplified bsd license"}, + Header: `All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.`, + Text: `{{ .copyright }} +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go new file mode 100644 index 0000000..c7476b3 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go @@ -0,0 +1,78 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initBsdClause3() { + Licenses["bsd"] = License{ + Name: "NewBSD", + PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd", "3-clause bsd"}, + Header: `All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.`, + Text: `{{ .copyright }} +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go new file mode 100644 index 0000000..03e05b3 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go @@ -0,0 +1,376 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initGpl2() { + Licenses["gpl2"] = License{ + Name: "GNU General Public License 2.0", + PossibleMatches: []string{"gpl2", "gnu gpl2", "gplv2"}, + Header: ` +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with this program. If not, see .`, + Text: ` GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than 'show w' and 'show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + 'Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go new file mode 100644 index 0000000..ce07679 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go @@ -0,0 +1,711 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initGpl3() { + Licenses["gpl3"] = License{ + Name: "GNU General Public License 3.0", + PossibleMatches: []string{"gpl3", "gplv3", "gpl", "gnu gpl3", "gnu gpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see .`, + Text: ` GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go new file mode 100644 index 0000000..0f8b96c --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go @@ -0,0 +1,186 @@ +package cmd + +func initLgpl() { + Licenses["lgpl"] = License{ + Name: "GNU Lesser General Public License", + PossibleMatches: []string{"lgpl", "lesser gpl", "gnu lgpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with this program. If not, see .`, + Text: ` GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library.`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go new file mode 100644 index 0000000..bd2d0c4 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go @@ -0,0 +1,63 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initMit() { + Licenses["mit"] = License{ + Name: "MIT License", + PossibleMatches: []string{"mit"}, + Header: ` +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE.`, + Text: `The MIT License (MIT) + +{{ .copyright }} + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go new file mode 100644 index 0000000..a070134 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go @@ -0,0 +1,118 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +import ( + "strings" + "time" + + "github.com/spf13/viper" +) + +// Licenses contains all possible licenses a user can choose from. +var Licenses = make(map[string]License) + +// License represents a software license agreement, containing the Name of +// the license, its possible matches (on the command line as given to cobra), +// the header to be used with each file on the file's creating, and the text +// of the license +type License struct { + Name string // The type of license in use + PossibleMatches []string // Similar names to guess + Text string // License text data + Header string // License header for source files +} + +func init() { + // Allows a user to not use a license. + Licenses["none"] = License{"None", []string{"none", "false"}, "", ""} + + initApache2() + initMit() + initBsdClause3() + initBsdClause2() + initGpl2() + initGpl3() + initLgpl() + initAgpl() +} + +// getLicense returns license specified by user in flag or in config. +// If user didn't specify the license, it returns Apache License 2.0. +// +// TODO: Inspect project for existing license +func getLicense() License { + // If explicitly flagged, use that. + if userLicense != "" { + return findLicense(userLicense) + } + + // If user wants to have custom license, use that. + if viper.IsSet("license.header") || viper.IsSet("license.text") { + return License{Header: viper.GetString("license.header"), + Text: viper.GetString("license.text")} + } + + // If user wants to have built-in license, use that. + if viper.IsSet("license") { + return findLicense(viper.GetString("license")) + } + + // If user didn't set any license, use Apache 2.0 by default. + return Licenses["apache"] +} + +func copyrightLine() string { + author := viper.GetString("author") + + year := viper.GetString("year") // For tests. + if year == "" { + year = time.Now().Format("2006") + } + + return "Copyright © " + year + " " + author +} + +// findLicense looks for License object of built-in licenses. +// If it didn't find license, then the app will be terminated and +// error will be printed. +func findLicense(name string) License { + found := matchLicense(name) + if found == "" { + er("unknown license: " + name) + } + return Licenses[found] +} + +// matchLicense compares the given a license name +// to PossibleMatches of all built-in licenses. +// It returns blank string, if name is blank string or it didn't find +// then appropriate match to name. +func matchLicense(name string) string { + if name == "" { + return "" + } + + for key, lic := range Licenses { + for _, match := range lic.PossibleMatches { + if strings.EqualFold(name, match) { + return key + } + } + } + + return "" +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/project.go b/vendor/github.com/spf13/cobra/cobra/cmd/project.go new file mode 100644 index 0000000..7ddb825 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/project.go @@ -0,0 +1,200 @@ +package cmd + +import ( + "os" + "path/filepath" + "runtime" + "strings" +) + +// Project contains name, license and paths to projects. +type Project struct { + absPath string + cmdPath string + srcPath string + license License + name string +} + +// NewProject returns Project with specified project name. +func NewProject(projectName string) *Project { + if projectName == "" { + er("can't create project with blank name") + } + + p := new(Project) + p.name = projectName + + // 1. Find already created protect. + p.absPath = findPackage(projectName) + + // 2. If there are no created project with this path, and user is in GOPATH, + // then use GOPATH/src/projectName. + if p.absPath == "" { + wd, err := os.Getwd() + if err != nil { + er(err) + } + for _, srcPath := range srcPaths { + goPath := filepath.Dir(srcPath) + if filepathHasPrefix(wd, goPath) { + p.absPath = filepath.Join(srcPath, projectName) + break + } + } + } + + // 3. If user is not in GOPATH, then use (first GOPATH)/src/projectName. + if p.absPath == "" { + p.absPath = filepath.Join(srcPaths[0], projectName) + } + + return p +} + +// findPackage returns full path to existing go package in GOPATHs. +func findPackage(packageName string) string { + if packageName == "" { + return "" + } + + for _, srcPath := range srcPaths { + packagePath := filepath.Join(srcPath, packageName) + if exists(packagePath) { + return packagePath + } + } + + return "" +} + +// NewProjectFromPath returns Project with specified absolute path to +// package. +func NewProjectFromPath(absPath string) *Project { + if absPath == "" { + er("can't create project: absPath can't be blank") + } + if !filepath.IsAbs(absPath) { + er("can't create project: absPath is not absolute") + } + + // If absPath is symlink, use its destination. + fi, err := os.Lstat(absPath) + if err != nil { + er("can't read path info: " + err.Error()) + } + if fi.Mode()&os.ModeSymlink != 0 { + path, err := os.Readlink(absPath) + if err != nil { + er("can't read the destination of symlink: " + err.Error()) + } + absPath = path + } + + p := new(Project) + p.absPath = strings.TrimSuffix(absPath, findCmdDir(absPath)) + p.name = filepath.ToSlash(trimSrcPath(p.absPath, p.SrcPath())) + return p +} + +// trimSrcPath trims at the beginning of absPath the srcPath. +func trimSrcPath(absPath, srcPath string) string { + relPath, err := filepath.Rel(srcPath, absPath) + if err != nil { + er(err) + } + return relPath +} + +// License returns the License object of project. +func (p *Project) License() License { + if p.license.Text == "" && p.license.Name != "None" { + p.license = getLicense() + } + return p.license +} + +// Name returns the name of project, e.g. "github.com/spf13/cobra" +func (p Project) Name() string { + return p.name +} + +// CmdPath returns absolute path to directory, where all commands are located. +func (p *Project) CmdPath() string { + if p.absPath == "" { + return "" + } + if p.cmdPath == "" { + p.cmdPath = filepath.Join(p.absPath, findCmdDir(p.absPath)) + } + return p.cmdPath +} + +// findCmdDir checks if base of absPath is cmd dir and returns it or +// looks for existing cmd dir in absPath. +func findCmdDir(absPath string) string { + if !exists(absPath) || isEmpty(absPath) { + return "cmd" + } + + if isCmdDir(absPath) { + return filepath.Base(absPath) + } + + files, _ := filepath.Glob(filepath.Join(absPath, "c*")) + for _, file := range files { + if isCmdDir(file) { + return filepath.Base(file) + } + } + + return "cmd" +} + +// isCmdDir checks if base of name is one of cmdDir. +func isCmdDir(name string) bool { + name = filepath.Base(name) + for _, cmdDir := range []string{"cmd", "cmds", "command", "commands"} { + if name == cmdDir { + return true + } + } + return false +} + +// AbsPath returns absolute path of project. +func (p Project) AbsPath() string { + return p.absPath +} + +// SrcPath returns absolute path to $GOPATH/src where project is located. +func (p *Project) SrcPath() string { + if p.srcPath != "" { + return p.srcPath + } + if p.absPath == "" { + p.srcPath = srcPaths[0] + return p.srcPath + } + + for _, srcPath := range srcPaths { + if filepathHasPrefix(p.absPath, srcPath) { + p.srcPath = srcPath + break + } + } + + return p.srcPath +} + +func filepathHasPrefix(path string, prefix string) bool { + if len(path) <= len(prefix) { + return false + } + if runtime.GOOS == "windows" { + // Paths in windows are case-insensitive. + return strings.EqualFold(path[0:len(prefix)], prefix) + } + return path[0:len(prefix)] == prefix + +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/project_test.go b/vendor/github.com/spf13/cobra/cobra/cmd/project_test.go new file mode 100644 index 0000000..037f7c5 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/project_test.go @@ -0,0 +1,24 @@ +package cmd + +import ( + "testing" +) + +func TestFindExistingPackage(t *testing.T) { + path := findPackage("github.com/spf13/cobra") + if path == "" { + t.Fatal("findPackage didn't find the existing package") + } + if !hasGoPathPrefix(path) { + t.Fatalf("%q is not in GOPATH, but must be", path) + } +} + +func hasGoPathPrefix(path string) bool { + for _, srcPath := range srcPaths { + if filepathHasPrefix(path, srcPath) { + return true + } + } + return false +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/root.go b/vendor/github.com/spf13/cobra/cobra/cmd/root.go new file mode 100644 index 0000000..19568f9 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/root.go @@ -0,0 +1,79 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + + homedir "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + // Used for flags. + cfgFile, userLicense string + + rootCmd = &cobra.Command{ + Use: "cobra", + Short: "A generator for Cobra based Applications", + Long: `Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + } +) + +// Execute executes the root command. +func Execute() { + rootCmd.Execute() +} + +func init() { + cobra.OnInitialize(initConfig) + + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") + rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") + + rootCmd.AddCommand(addCmd) + rootCmd.AddCommand(initCmd) +} + +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + if err != nil { + er(err) + } + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigName(".cobra") + } + + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/main.go.golden b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/main.go.golden new file mode 100644 index 0000000..cdbe38d --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/main.go.golden @@ -0,0 +1,21 @@ +// Copyright © 2017 NAME HERE +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import "github.com/spf13/testproject/cmd" + +func main() { + cmd.Execute() +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/root.go.golden b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/root.go.golden new file mode 100644 index 0000000..d74f4cd --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/root.go.golden @@ -0,0 +1,89 @@ +// Copyright © 2017 NAME HERE +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var cfgFile string + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "testproject", + Short: "A brief description of your application", + Long: `A longer description that spans multiple lines and likely contains +examples and usage of using your application. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + // Uncomment the following line if your bare application + // has an action associated with it: + // Run: func(cmd *cobra.Command, args []string) { }, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func init() { + cobra.OnInitialize(initConfig) + + // Here you will define your flags and configuration settings. + // Cobra supports persistent flags, which, if defined here, + // will be global for your application. + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.testproject.yaml)") + + // Cobra also supports local flags, which will only run + // when this action is called directly. + rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} + +// initConfig reads in config file and ENV variables if set. +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Search config in home directory with name ".testproject" (without extension). + viper.AddConfigPath(home) + viper.SetConfigName(".testproject") + } + + viper.AutomaticEnv() // read in environment variables that match + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/test.go.golden b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/test.go.golden new file mode 100644 index 0000000..ed64427 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/test.go.golden @@ -0,0 +1,50 @@ +// Copyright © 2017 NAME HERE +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// testCmd represents the test command +var testCmd = &cobra.Command{ + Use: "test", + Short: "A brief description of your command", + Long: `A longer description that spans multiple lines and likely contains examples +and usage of using your command. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("test called") + }, +} + +func init() { + rootCmd.AddCommand(testCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // testCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // testCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/vendor/github.com/spf13/cobra/cobra/main.go b/vendor/github.com/spf13/cobra/cobra/main.go new file mode 100644 index 0000000..c3a9d9c --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/main.go @@ -0,0 +1,20 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import "github.com/spf13/cobra/cobra/cmd" + +func main() { + cmd.Execute() +} diff --git a/vendor/github.com/spf13/cobra/cobra_test.go b/vendor/github.com/spf13/cobra/cobra_test.go new file mode 100644 index 0000000..0d1755b --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra_test.go @@ -0,0 +1,22 @@ +package cobra + +import ( + "testing" + "text/template" +) + +func TestAddTemplateFunctions(t *testing.T) { + AddTemplateFunc("t", func() bool { return true }) + AddTemplateFuncs(template.FuncMap{ + "f": func() bool { return false }, + "h": func() string { return "Hello," }, + "w": func() string { return "world." }}) + + c := &Command{} + c.SetUsageTemplate(`{{if t}}{{h}}{{end}}{{if f}}{{h}}{{end}} {{w}}`) + + const expected = "Hello, world." + if got := c.UsageString(); got != expected { + t.Errorf("Expected UsageString: %v\nGot: %v", expected, got) + } +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 0000000..15b8112 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1507 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +// Command is just that, a command for your application. +// E.g. 'go run ...' - 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Use is the one-line usage message. + Use string + + // Aliases is an array of aliases that can be used instead of the first word in Use. + Aliases []string + + // SuggestFor is an array of command names for which this command will be suggested - + // similar to aliases but only suggests. + SuggestFor []string + + // Short is the short description shown in the 'help' output. + Short string + + // Long is the long message shown in the 'help ' output. + Long string + + // Example is examples of how to use the command. + Example string + + // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions + ValidArgs []string + + // Expected arguments + Args PositionalArgs + + // ArgAliases is List of aliases for ValidArgs. + // These are not suggested to the user in the bash completion, + // but accepted if entered manually. + ArgAliases []string + + // BashCompletionFunction is custom functions used by the bash autocompletion generator. + BashCompletionFunction string + + // Deprecated defines, if this command is deprecated and should print this string when used. + Deprecated string + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + Hidden bool + + // Annotations are key/value pairs that can be used by applications to identify or + // group commands. + Annotations map[string]string + + // Version defines the version for this command. If this value is non-empty and the command does not + // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, + // will print content of the "Version" variable. + Version string + + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name. + // + // PersistentPreRun: children of this command will inherit and execute. + PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error. + PersistentPreRunE func(cmd *Command, args []string) error + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error. + PreRunE func(cmd *Command, args []string) error + // Run: Typically the actual work function. Most commands will only implement this. + Run func(cmd *Command, args []string) + // RunE: Run but returns an error. + RunE func(cmd *Command, args []string) error + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error. + PostRunE func(cmd *Command, args []string) error + // PersistentPostRun: children of this command will inherit and execute after PostRun. + PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error. + PersistentPostRunE func(cmd *Command, args []string) error + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. + DisableAutoGenTag bool + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + // commands is the list of commands supported by this program. + commands []*Command + // parent is a parent command for this command. + parent *Command + // Max lengths of commands' string lengths for use in padding. + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + // commandsAreSorted defines, if command slice are sorted or not. + commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } + + // args is actual args parsed from flags. + args []string + // flagErrorBuf contains all error messages from pflag. + flagErrorBuf *bytes.Buffer + // flags is full set of flags. + flags *flag.FlagSet + // pflags contains persistent flags. + pflags *flag.FlagSet + // lflags contains local flags. + lflags *flag.FlagSet + // iflags contains inherited flags. + iflags *flag.FlagSet + // parentsPflags is all persistent flags of cmd's parents. + parentsPflags *flag.FlagSet + // globNormFunc is the global normalization function + // that we can use on every pflag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + + // output is an output writer defined by user. + output io.Writer + // usageFunc is usage func defined by user. + usageFunc func(*Command) error + // usageTemplate is usage template defined by user. + usageTemplate string + // flagErrorFunc is func defined by user and it's called when the parsing of + // flags returns an error. + flagErrorFunc func(*Command, error) error + // helpTemplate is help template defined by user. + helpTemplate string + // helpFunc is help func defined by user. + helpFunc func(*Command, []string) + // helpCommand is command with usage 'help'. If it's not defined by user, + // cobra uses default help command. + helpCommand *Command + // versionTemplate is the version template defined by user. + versionTemplate string +} + +// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (c *Command) SetOutput(output io.Writer) { + c.output = output +} + +// SetUsageFunc sets usage function. Usage can be defined by application. +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// SetUsageTemplate sets usage template. Can be defined by Application. +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// SetFlagErrorFunc sets a function to generate an error when flag parsing +// fails. +func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { + c.flagErrorFunc = f +} + +// SetHelpFunc sets help function. Can be defined by Application. +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +// SetHelpCommand sets help command. +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// SetHelpTemplate sets help template to be used. Application can use it to set custom template. +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetVersionTemplate sets version template to be used. Application can use it to set custom template. +func (c *Command) SetVersionTemplate(s string) { + c.versionTemplate = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +// OutOrStdout returns output to stdout. +func (c *Command) OutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +// OutOrStderr returns output to stderr +func (c *Command) OutOrStderr() io.Writer { + return c.getOut(os.Stderr) +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.output != nil { + return c.output + } + if c.HasParent() { + return c.parent.getOut(def) + } + return def +} + +// UsageFunc returns either the function set by SetUsageFunc for this command +// or a parent, or it returns a default usage function. +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + if c.HasParent() { + return c.Parent().UsageFunc() + } + return func(c *Command) error { + c.mergePersistentFlags() + err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + if err != nil { + c.Println(err) + } + return err + } +} + +// Usage puts out the usage for the command. +// Used when a user provides invalid input. +// Can be defined by user by overriding UsageFunc. +func (c *Command) Usage() error { + return c.UsageFunc()(c) +} + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function with default help behavior. +func (c *Command) HelpFunc() func(*Command, []string) { + if c.helpFunc != nil { + return c.helpFunc + } + if c.HasParent() { + return c.Parent().HelpFunc() + } + return func(c *Command, a []string) { + c.mergePersistentFlags() + err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + if err != nil { + c.Println(err) + } + } +} + +// Help puts out the help for the command. +// Used when a user calls help [command]. +// Can be defined by user by overriding HelpFunc. +func (c *Command) Help() error { + c.HelpFunc()(c, []string{}) + return nil +} + +// UsageString return usage string. +func (c *Command) UsageString() string { + tmpOutput := c.output + bb := new(bytes.Buffer) + c.SetOutput(bb) + c.Usage() + c.output = tmpOutput + return bb.String() +} + +// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this +// command or a parent, or it returns a function which returns the original +// error. +func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { + if c.flagErrorFunc != nil { + return c.flagErrorFunc + } + + if c.HasParent() { + return c.parent.FlagErrorFunc() + } + return func(c *Command, err error) error { + return err + } +} + +var minUsagePadding = 25 + +// UsagePadding return padding for the usage. +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } + return c.parent.commandsMaxUseLen +} + +var minCommandPathPadding = 11 + +// CommandPathPadding return padding for the command path. +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } + return c.parent.commandsMaxCommandPathLen +} + +var minNamePadding = 11 + +// NamePadding returns padding for the name. +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } + return c.parent.commandsMaxNameLen +} + +// UsageTemplate returns usage template for the command. +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } + return `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` +} + +// HelpTemplate return help template for the command. +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } + return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// VersionTemplate return version template for the command. +func (c *Command) VersionTemplate() string { + if c.versionTemplate != "" { + return c.versionTemplate + } + + if c.HasParent() { + return c.parent.VersionTemplate() + } + return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` +} + +func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { + flag := fs.Lookup(name) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" +} + +func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { + if len(name) == 0 { + return false + } + + flag := fs.ShorthandLookup(name[:1]) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" +} + +func stripFlags(args []string, c *Command) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + flags := c.Flags() + +Loop: + for len(args) > 0 { + s := args[0] + args = args[1:] + switch { + case s == "--": + // "--" terminates the flags + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + // If '--flag arg' then + // delete arg from args. + fallthrough // (do the same as below) + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // If '-f arg' then + // delete 'arg' from args or break the loop if len(args) <= 1. + if len(args) <= 1 { + break Loop + } else { + args = args[1:] + continue + } + case s != "" && !strings.HasPrefix(s, "-"): + commands = append(commands, s) + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +func argsMinusFirstX(args []string, x string) []string { + for i, y := range args { + if x == y { + ret := []string{} + ret = append(ret, args[:i]...) + ret = append(ret, args[i+1:]...) + return ret + } + } + return args +} + +func isFlagArg(arg string) bool { + return ((len(arg) >= 3 && arg[1] == '-') || + (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) +} + +// Find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) == 0 { + return c, innerArgs + } + nextSubCmd := argsWOflags[0] + + cmd := c.findNext(nextSubCmd) + if cmd != nil { + return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + } + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + if commandFound.Args == nil { + return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) + } + return commandFound, a, nil +} + +func (c *Command) findSuggestions(arg string) string { + if c.DisableSuggestions { + return "" + } + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + suggestionsString := "" + if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + return suggestionsString +} + +func (c *Command) findNext(next string) *Command { + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if cmd.Name() == next || cmd.HasAlias(next) { + cmd.commandCalledAs.name = next + return cmd + } + if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { + matches = append(matches, cmd) + } + } + + if len(matches) == 1 { + return matches[0] + } + + return nil +} + +// Traverse the command tree to find the command, and parse args for +// each parent. +func (c *Command) Traverse(args []string) (*Command, []string, error) { + flags := []string{} + inFlag := false + + for i, arg := range args { + switch { + // A long flag with a space separated value + case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) + flags = append(flags, arg) + continue + // A short flag with a space separated value + case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): + inFlag = true + flags = append(flags, arg) + continue + // The value for a flag + case inFlag: + inFlag = false + flags = append(flags, arg) + continue + // A flag without a value, or with an `=` separated value + case isFlagArg(arg): + flags = append(flags, arg) + continue + } + + cmd := c.findNext(arg) + if cmd == nil { + return c, args, nil + } + + if err := c.ParseFlags(flags); err != nil { + return nil, args, err + } + return cmd.Traverse(args[i+1:]) + } + return c, args, nil +} + +// SuggestionsFor provides suggestions for the typedName. +func (c *Command) SuggestionsFor(typedName string) []string { + suggestions := []string{} + for _, cmd := range c.commands { + if cmd.IsAvailableCommand() { + levenshteinDistance := ld(typedName, cmd.Name(), true) + suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance + suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) + if suggestByLevenshtein || suggestByPrefix { + suggestions = append(suggestions, cmd.Name()) + } + for _, explicitSuggestion := range cmd.SuggestFor { + if strings.EqualFold(typedName, explicitSuggestion) { + suggestions = append(suggestions, cmd.Name()) + } + } + } + } + return suggestions +} + +// VisitParents visits all parents of the command and invokes fn on each parent. +func (c *Command) VisitParents(fn func(*Command)) { + if c.HasParent() { + fn(c.Parent()) + c.Parent().VisitParents(fn) + } +} + +// Root finds root command. +func (c *Command) Root() *Command { + if c.HasParent() { + return c.Parent().Root() + } + return c +} + +// ArgsLenAtDash will return the length of c.Flags().Args at the moment +// when a -- was found during args parsing. +func (c *Command) ArgsLenAtDash() int { + return c.Flags().ArgsLenAtDash() +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + // initialize help and version flag at the last point possible to allow for user + // overriding + c.InitDefaultHelpFlag() + c.InitDefaultVersionFlag() + + err = c.ParseFlags(a) + if err != nil { + return c.FlagErrorFunc()(c, err) + } + + // If help is called, regardless of other flags, return we want help. + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in InitDefaultHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + + if helpVal { + return flag.ErrHelp + } + + // for back-compat, only add version flag behavior if version is defined + if c.Version != "" { + versionVal, err := c.Flags().GetBool("version") + if err != nil { + c.Println("\"version\" flag declared as non-bool. Please correct your code") + return err + } + if versionVal { + err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + if err != nil { + c.Println(err) + } + return err + } + } + + if !c.Runnable() { + return flag.ErrHelp + } + + c.preRun() + + argWoFlags := c.Flags().Args() + if c.DisableFlagParsing { + argWoFlags = a + } + + if err := c.ValidateArgs(argWoFlags); err != nil { + return err + } + + for p := c; p != nil; p = p.Parent() { + if p.PersistentPreRunE != nil { + if err := p.PersistentPreRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + break + } + } + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + if err := c.validateRequiredFlags(); err != nil { + return err + } + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + break + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +// Execute uses the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() error { + _, err := c.ExecuteC() + return err +} + +// ExecuteC executes the command. +func (c *Command) ExecuteC() (cmd *Command, err error) { + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().ExecuteC() + } + + // windows hook + if preExecHookFn != nil { + preExecHookFn(c) + } + + // initialize help as the last point possible to allow for user + // overriding + c.InitDefaultHelpCmd() + + var args []string + + // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 + if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { + args = os.Args[1:] + } else { + args = c.args + } + + var flags []string + if c.TraverseChildren { + cmd, flags, err = c.Traverse(args) + } else { + cmd, flags, err = c.Find(args) + } + if err != nil { + // If found parse to a subcommand and then failed, talk about the subcommand + if cmd != nil { + c = cmd + } + if !c.SilenceErrors { + c.Println("Error:", err.Error()) + c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) + } + return c, err + } + + cmd.commandCalledAs.called = true + if cmd.commandCalledAs.name == "" { + cmd.commandCalledAs.name = cmd.Name() + } + + err = cmd.execute(flags) + if err != nil { + // Always show help if requested, even if SilenceErrors is in + // effect + if err == flag.ErrHelp { + cmd.HelpFunc()(cmd, args) + return cmd, nil + } + + // If root command has SilentErrors flagged, + // all subcommands should respect it + if !cmd.SilenceErrors && !c.SilenceErrors { + c.Println("Error:", err.Error()) + } + + // If root command has SilentUsage flagged, + // all subcommands should respect it + if !cmd.SilenceUsage && !c.SilenceUsage { + c.Println(cmd.UsageString()) + } + } + return cmd, err +} + +func (c *Command) ValidateArgs(args []string) error { + if c.Args == nil { + return nil + } + return c.Args(c, args) +} + +func (c *Command) validateRequiredFlags() error { + flags := c.Flags() + missingFlagNames := []string{} + flags.VisitAll(func(pflag *flag.Flag) { + requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] + if !found { + return + } + if (requiredAnnotation[0] == "true") && !pflag.Changed { + missingFlagNames = append(missingFlagNames, pflag.Name) + } + }) + + if len(missingFlagNames) > 0 { + return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) + } + return nil +} + +// InitDefaultHelpFlag adds default help flag to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help flag, it will do nothing. +func (c *Command) InitDefaultHelpFlag() { + c.mergePersistentFlags() + if c.Flags().Lookup("help") == nil { + usage := "help for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + c.Flags().BoolP("help", "h", false, usage) + } +} + +// InitDefaultVersionFlag adds default version flag to c. +// It is called automatically by executing the c. +// If c already has a version flag, it will do nothing. +// If c.Version is empty, it will do nothing. +func (c *Command) InitDefaultVersionFlag() { + if c.Version == "" { + return + } + + c.mergePersistentFlags() + if c.Flags().Lookup("version") == nil { + usage := "version for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + c.Flags().Bool("version", false, usage) + } +} + +// InitDefaultHelpCmd adds default help command to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help command or c has no subcommands, it will do nothing. +func (c *Command) InitDefaultHelpCmd() { + if !c.HasSubCommands() { + return + } + + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. +Simply type ` + c.Name() + ` help [path to command] for full details.`, + + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q\n", args) + c.Root().Usage() + } else { + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown + cmd.Help() + } + }, + } + } + c.RemoveCommand(c.helpCommand) + c.AddCommand(c.helpCommand) +} + +// ResetCommands delete parent, subcommand and help command from c. +func (c *Command) ResetCommands() { + c.parent = nil + c.commands = nil + c.helpCommand = nil + c.parentsPflags = nil +} + +// Sorts commands by their names. +type commandSorterByName []*Command + +func (c commandSorterByName) Len() int { return len(c) } +func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } + +// Commands returns a sorted slice of child commands. +func (c *Command) Commands() []*Command { + // do not sort commands if it already sorted or sorting was disabled + if EnableCommandSorting && !c.commandsAreSorted { + sort.Sort(commandSorterByName(c.commands)) + c.commandsAreSorted = true + } + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If global normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + c.commandsAreSorted = false + } +} + +// RemoveCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.OutOrStderr(), i...) +} + +// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. +func (c *Command) Println(i ...interface{}) { + c.Print(fmt.Sprintln(i...)) +} + +// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. +func (c *Command) Printf(format string, i ...interface{}) { + c.Print(fmt.Sprintf(format, i...)) +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + if c.HasParent() { + return c.Parent().CommandPath() + " " + c.Name() + } + return c.Name() +} + +// UseLine puts out the full usage for a given command (including parents). +func (c *Command) UseLine() string { + var useline string + if c.HasParent() { + useline = c.parent.CommandPath() + " " + c.Use + } else { + useline = c.Use + } + if c.DisableFlagsInUseLine { + return useline + } + if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { + useline += " [flags]" + } + return useline +} + +// DebugFlags used to determine which flags have been assigned to which commands +// and which persist. +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// HasAlias determines if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if a == s { + return true + } + } + return false +} + +// CalledAs returns the command name or alias that was used to invoke +// this command or an empty string if the command has not been called. +func (c *Command) CalledAs() string { + if c.commandCalledAs.called { + return c.commandCalledAs.name + } + return "" +} + +// hasNameOrAliasPrefix returns true if the Name or any of aliases start +// with prefix +func (c *Command) hasNameOrAliasPrefix(prefix string) bool { + if strings.HasPrefix(c.Name(), prefix) { + c.commandCalledAs.name = c.Name() + return true + } + for _, alias := range c.Aliases { + if strings.HasPrefix(alias, prefix) { + c.commandCalledAs.name = alias + return true + } + } + return false +} + +// NameAndAliases returns a list of the command name and all aliases +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +// HasExample determines if the command has example. +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Runnable determines if the command is itself runnable. +func (c *Command) Runnable() bool { + return c.Run != nil || c.RunE != nil +} + +// HasSubCommands determines if the command has children commands. +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +// IsAvailableCommand determines if a command is available as a non-help command +// (this includes all non deprecated/hidden commands). +func (c *Command) IsAvailableCommand() bool { + if len(c.Deprecated) != 0 || c.Hidden { + return false + } + + if c.HasParent() && c.Parent().helpCommand == c { + return false + } + + if c.Runnable() || c.HasAvailableSubCommands() { + return true + } + + return false +} + +// IsAdditionalHelpTopicCommand determines if a command is an additional +// help topic command; additional help topic command is determined by the +// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that +// are runnable/hidden/deprecated. +// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. +func (c *Command) IsAdditionalHelpTopicCommand() bool { + // if a command is runnable, deprecated, or hidden it is not a 'help' command + if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { + return false + } + + // if any non-help sub commands are found, the command is not a 'help' command + for _, sub := range c.commands { + if !sub.IsAdditionalHelpTopicCommand() { + return false + } + } + + // the command either has no sub commands, or no non-help sub commands + return true +} + +// HasHelpSubCommands determines if a command has any available 'help' sub commands +// that need to be shown in the usage/help default template under 'additional help +// topics'. +func (c *Command) HasHelpSubCommands() bool { + // return true on the first found available 'help' sub command + for _, sub := range c.commands { + if sub.IsAdditionalHelpTopicCommand() { + return true + } + } + + // the command either has no sub commands, or no available 'help' sub commands + return false +} + +// HasAvailableSubCommands determines if a command has available sub commands that +// need to be shown in the usage/help default template under 'available commands'. +func (c *Command) HasAvailableSubCommands() bool { + // return true on the first found available (non deprecated/help/hidden) + // sub command + for _, sub := range c.commands { + if sub.IsAvailableCommand() { + return true + } + } + + // the command either has no sub commands, or no available (non deprecated/help/hidden) + // sub commands + return false +} + +// HasParent determines if the command is a child command. +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Flags returns the complete FlagSet that applies +// to this command (local and persistent declared here and by all parents). +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + } + + return c.flags +} + +// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { + persistentFlags := c.PersistentFlags() + + out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.LocalFlags().VisitAll(func(f *flag.Flag) { + if persistentFlags.Lookup(f.Name) == nil { + out.AddFlag(f) + } + }) + return out +} + +// LocalFlags returns the local FlagSet specifically set in the current command. +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + } + c.lflags.SortFlags = c.Flags().SortFlags + if c.globNormFunc != nil { + c.lflags.SetNormalizeFunc(c.globNormFunc) + } + + addToLocal := func(f *flag.Flag) { + if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil { + c.lflags.AddFlag(f) + } + } + c.Flags().VisitAll(addToLocal) + c.PersistentFlags().VisitAll(addToLocal) + return c.lflags +} + +// InheritedFlags returns all flags which were inherited from parents commands. +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + if c.iflags == nil { + c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.iflags.SetOutput(c.flagErrorBuf) + } + + local := c.LocalFlags() + if c.globNormFunc != nil { + c.iflags.SetNormalizeFunc(c.globNormFunc) + } + + c.parentsPflags.VisitAll(func(f *flag.Flag) { + if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + c.iflags.AddFlag(f) + } + }) + return c.iflags +} + +// NonInheritedFlags returns all flags which were not inherited from parent commands. +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// PersistentFlags returns the persistent FlagSet specifically set in the current command. +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// ResetFlags deletes all flags from command. +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) + + c.lflags = nil + c.iflags = nil + c.parentsPflags = nil +} + +// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// HasPersistentFlags checks if the command contains persistent flags. +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// HasLocalFlags checks if the command has flags specifically declared locally. +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +// HasInheritedFlags checks if the command has flags inherited from its parent command. +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire +// structure) which are not hidden or deprecated. +func (c *Command) HasAvailableFlags() bool { + return c.Flags().HasAvailableFlags() +} + +// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. +func (c *Command) HasAvailablePersistentFlags() bool { + return c.PersistentFlags().HasAvailableFlags() +} + +// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden +// or deprecated. +func (c *Command) HasAvailableLocalFlags() bool { + return c.LocalFlags().HasAvailableFlags() +} + +// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are +// not hidden or deprecated. +func (c *Command) HasAvailableInheritedFlags() bool { + return c.InheritedFlags().HasAvailableFlags() +} + +// Flag climbs up the command tree looking for matching flag. +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// Recursively find matching persistent flag. +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil { + c.updateParentsPflags() + flag = c.parentsPflags.Lookup(name) + } + return +} + +// ParseFlags parses persistent flag tree and local flags. +func (c *Command) ParseFlags(args []string) error { + if c.DisableFlagParsing { + return nil + } + + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + beforeErrorBufLen := c.flagErrorBuf.Len() + c.mergePersistentFlags() + err := c.Flags().Parse(args) + // Print warnings if they occurred (e.g. deprecated flag messages). + if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { + c.Print(c.flagErrorBuf.String()) + } + + return err +} + +// Parent returns a commands parent command. +func (c *Command) Parent() *Command { + return c.parent +} + +// mergePersistentFlags merges c.PersistentFlags() to c.Flags() +// and adds missing persistent flags of all parents. +func (c *Command) mergePersistentFlags() { + c.updateParentsPflags() + c.Flags().AddFlagSet(c.PersistentFlags()) + c.Flags().AddFlagSet(c.parentsPflags) +} + +// updateParentsPflags updates c.parentsPflags by adding +// new persistent flags of all parents. +// If c.parentsPflags == nil, it makes new. +func (c *Command) updateParentsPflags() { + if c.parentsPflags == nil { + c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags.SetOutput(c.flagErrorBuf) + c.parentsPflags.SortFlags = false + } + + if c.globNormFunc != nil { + c.parentsPflags.SetNormalizeFunc(c.globNormFunc) + } + + c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) + + c.VisitParents(func(parent *Command) { + c.parentsPflags.AddFlagSet(parent.PersistentFlags()) + }) +} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go new file mode 100644 index 0000000..6159c1c --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -0,0 +1,5 @@ +// +build !windows + +package cobra + +var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_test.go b/vendor/github.com/spf13/cobra/command_test.go new file mode 100644 index 0000000..d874a9a --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_test.go @@ -0,0 +1,1628 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "reflect" + "strings" + "testing" + + "github.com/spf13/pflag" +) + +func emptyRun(*Command, []string) {} + +func executeCommand(root *Command, args ...string) (output string, err error) { + _, output, err = executeCommandC(root, args...) + return output, err +} + +func executeCommandC(root *Command, args ...string) (c *Command, output string, err error) { + buf := new(bytes.Buffer) + root.SetOutput(buf) + root.SetArgs(args) + + c, err = root.ExecuteC() + + return c, buf.String(), err +} + +func resetCommandLineFlagSet() { + pflag.CommandLine = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) +} + +func checkStringContains(t *testing.T, got, expected string) { + if !strings.Contains(got, expected) { + t.Errorf("Expected to contain: \n %v\nGot:\n %v\n", expected, got) + } +} + +func checkStringOmits(t *testing.T, got, expected string) { + if strings.Contains(got, expected) { + t.Errorf("Expected to not contain: \n %v\nGot: %v", expected, got) + } +} + +func TestSingleCommand(t *testing.T) { + var rootCmdArgs []string + rootCmd := &Command{ + Use: "root", + Args: ExactArgs(2), + Run: func(_ *Command, args []string) { rootCmdArgs = args }, + } + aCmd := &Command{Use: "a", Args: NoArgs, Run: emptyRun} + bCmd := &Command{Use: "b", Args: NoArgs, Run: emptyRun} + rootCmd.AddCommand(aCmd, bCmd) + + output, err := executeCommand(rootCmd, "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(rootCmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("rootCmdArgs expected: %q, got: %q", expected, got) + } +} + +func TestChildCommand(t *testing.T) { + var child1CmdArgs []string + rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} + child1Cmd := &Command{ + Use: "child1", + Args: ExactArgs(2), + Run: func(_ *Command, args []string) { child1CmdArgs = args }, + } + child2Cmd := &Command{Use: "child2", Args: NoArgs, Run: emptyRun} + rootCmd.AddCommand(child1Cmd, child2Cmd) + + output, err := executeCommand(rootCmd, "child1", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(child1CmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("child1CmdArgs expected: %q, got: %q", expected, got) + } +} + +func TestCallCommandWithoutSubcommands(t *testing.T) { + rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} + _, err := executeCommand(rootCmd) + if err != nil { + t.Errorf("Calling command without subcommands should not have error: %v", err) + } +} + +func TestRootExecuteUnknownCommand(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun}) + + output, _ := executeCommand(rootCmd, "unknown") + + expected := "Error: unknown command \"unknown\" for \"root\"\nRun 'root --help' for usage.\n" + + if output != expected { + t.Errorf("Expected:\n %q\nGot:\n %q\n", expected, output) + } +} + +func TestSubcommandExecuteC(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + c, output, err := executeCommandC(rootCmd, "child") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if c.Name() != "child" { + t.Errorf(`invalid command returned from ExecuteC: expected "child"', got %q`, c.Name()) + } +} + +func TestRootUnknownCommandSilenced(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + rootCmd.SilenceErrors = true + rootCmd.SilenceUsage = true + rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun}) + + output, _ := executeCommand(rootCmd, "unknown") + if output != "" { + t.Errorf("Expected blank output, because of silenced usage.\nGot:\n %q\n", output) + } +} + +func TestCommandAlias(t *testing.T) { + var timesCmdArgs []string + rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} + echoCmd := &Command{ + Use: "echo", + Aliases: []string{"say", "tell"}, + Args: NoArgs, + Run: emptyRun, + } + timesCmd := &Command{ + Use: "times", + Args: ExactArgs(2), + Run: func(_ *Command, args []string) { timesCmdArgs = args }, + } + echoCmd.AddCommand(timesCmd) + rootCmd.AddCommand(echoCmd) + + output, err := executeCommand(rootCmd, "tell", "times", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(timesCmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("timesCmdArgs expected: %v, got: %v", expected, got) + } +} + +func TestEnablePrefixMatching(t *testing.T) { + EnablePrefixMatching = true + + var aCmdArgs []string + rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} + aCmd := &Command{ + Use: "aCmd", + Args: ExactArgs(2), + Run: func(_ *Command, args []string) { aCmdArgs = args }, + } + bCmd := &Command{Use: "bCmd", Args: NoArgs, Run: emptyRun} + rootCmd.AddCommand(aCmd, bCmd) + + output, err := executeCommand(rootCmd, "a", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(aCmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("aCmdArgs expected: %q, got: %q", expected, got) + } + + EnablePrefixMatching = false +} + +func TestAliasPrefixMatching(t *testing.T) { + EnablePrefixMatching = true + + var timesCmdArgs []string + rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} + echoCmd := &Command{ + Use: "echo", + Aliases: []string{"say", "tell"}, + Args: NoArgs, + Run: emptyRun, + } + timesCmd := &Command{ + Use: "times", + Args: ExactArgs(2), + Run: func(_ *Command, args []string) { timesCmdArgs = args }, + } + echoCmd.AddCommand(timesCmd) + rootCmd.AddCommand(echoCmd) + + output, err := executeCommand(rootCmd, "sa", "times", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(timesCmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("timesCmdArgs expected: %v, got: %v", expected, got) + } + + EnablePrefixMatching = false +} + +// TestChildSameName checks the correct behaviour of cobra in cases, +// when an application with name "foo" and with subcommand "foo" +// is executed with args "foo foo". +func TestChildSameName(t *testing.T) { + var fooCmdArgs []string + rootCmd := &Command{Use: "foo", Args: NoArgs, Run: emptyRun} + fooCmd := &Command{ + Use: "foo", + Args: ExactArgs(2), + Run: func(_ *Command, args []string) { fooCmdArgs = args }, + } + barCmd := &Command{Use: "bar", Args: NoArgs, Run: emptyRun} + rootCmd.AddCommand(fooCmd, barCmd) + + output, err := executeCommand(rootCmd, "foo", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(fooCmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("fooCmdArgs expected: %v, got: %v", expected, got) + } +} + +// TestGrandChildSameName checks the correct behaviour of cobra in cases, +// when user has a root command and a grand child +// with the same name. +func TestGrandChildSameName(t *testing.T) { + var fooCmdArgs []string + rootCmd := &Command{Use: "foo", Args: NoArgs, Run: emptyRun} + barCmd := &Command{Use: "bar", Args: NoArgs, Run: emptyRun} + fooCmd := &Command{ + Use: "foo", + Args: ExactArgs(2), + Run: func(_ *Command, args []string) { fooCmdArgs = args }, + } + barCmd.AddCommand(fooCmd) + rootCmd.AddCommand(barCmd) + + output, err := executeCommand(rootCmd, "bar", "foo", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(fooCmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("fooCmdArgs expected: %v, got: %v", expected, got) + } +} + +func TestFlagLong(t *testing.T) { + var cArgs []string + c := &Command{ + Use: "c", + Args: ArbitraryArgs, + Run: func(_ *Command, args []string) { cArgs = args }, + } + + var intFlagValue int + var stringFlagValue string + c.Flags().IntVar(&intFlagValue, "intf", -1, "") + c.Flags().StringVar(&stringFlagValue, "sf", "", "") + + output, err := executeCommand(c, "--intf=7", "--sf=abc", "one", "--", "two") + if output != "" { + t.Errorf("Unexpected output: %v", err) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if c.ArgsLenAtDash() != 1 { + t.Errorf("Expected ArgsLenAtDash: %v but got %v", 1, c.ArgsLenAtDash()) + } + if intFlagValue != 7 { + t.Errorf("Expected intFlagValue: %v, got %v", 7, intFlagValue) + } + if stringFlagValue != "abc" { + t.Errorf("Expected stringFlagValue: %q, got %q", "abc", stringFlagValue) + } + + got := strings.Join(cArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("Expected arguments: %q, got %q", expected, got) + } +} + +func TestFlagShort(t *testing.T) { + var cArgs []string + c := &Command{ + Use: "c", + Args: ArbitraryArgs, + Run: func(_ *Command, args []string) { cArgs = args }, + } + + var intFlagValue int + var stringFlagValue string + c.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "") + c.Flags().StringVarP(&stringFlagValue, "sf", "s", "", "") + + output, err := executeCommand(c, "-i", "7", "-sabc", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", err) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if intFlagValue != 7 { + t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue) + } + if stringFlagValue != "abc" { + t.Errorf("Expected stringFlagValue: %q, got %q", "abc", stringFlagValue) + } + + got := strings.Join(cArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("Expected arguments: %q, got %q", expected, got) + } +} + +func TestChildFlag(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + var intFlagValue int + childCmd.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "") + + output, err := executeCommand(rootCmd, "child", "-i7") + if output != "" { + t.Errorf("Unexpected output: %v", err) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if intFlagValue != 7 { + t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue) + } +} + +func TestChildFlagWithParentLocalFlag(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + var intFlagValue int + rootCmd.Flags().StringP("sf", "s", "", "") + childCmd.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "") + + _, err := executeCommand(rootCmd, "child", "-i7", "-sabc") + if err == nil { + t.Errorf("Invalid flag should generate error") + } + + checkStringContains(t, err.Error(), "unknown shorthand") + + if intFlagValue != 7 { + t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue) + } +} + +func TestFlagInvalidInput(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + rootCmd.Flags().IntP("intf", "i", -1, "") + + _, err := executeCommand(rootCmd, "-iabc") + if err == nil { + t.Errorf("Invalid flag value should generate error") + } + + checkStringContains(t, err.Error(), "invalid syntax") +} + +func TestFlagBeforeCommand(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + var flagValue int + childCmd.Flags().IntVarP(&flagValue, "intf", "i", -1, "") + + // With short flag. + _, err := executeCommand(rootCmd, "-i7", "child") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if flagValue != 7 { + t.Errorf("Expected flag value: %v, got %v", 7, flagValue) + } + + // With long flag. + _, err = executeCommand(rootCmd, "--intf=8", "child") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if flagValue != 8 { + t.Errorf("Expected flag value: %v, got %v", 9, flagValue) + } +} + +func TestStripFlags(t *testing.T) { + tests := []struct { + input []string + output []string + }{ + { + []string{"foo", "bar"}, + []string{"foo", "bar"}, + }, + { + []string{"foo", "--str", "-s"}, + []string{"foo"}, + }, + { + []string{"-s", "foo", "--str", "bar"}, + []string{}, + }, + { + []string{"-i10", "echo"}, + []string{"echo"}, + }, + { + []string{"-i=10", "echo"}, + []string{"echo"}, + }, + { + []string{"--int=100", "echo"}, + []string{"echo"}, + }, + { + []string{"-ib", "echo", "-sfoo", "baz"}, + []string{"echo", "baz"}, + }, + { + []string{"-i=baz", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"--int=baz", "-sbar", "-i", "foo", "blah"}, + []string{"blah"}, + }, + { + []string{"--bool", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"-b", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"--persist", "bar"}, + []string{"bar"}, + }, + { + []string{"-p", "bar"}, + []string{"bar"}, + }, + } + + c := &Command{Use: "c", Run: emptyRun} + c.PersistentFlags().BoolP("persist", "p", false, "") + c.Flags().IntP("int", "i", -1, "") + c.Flags().StringP("str", "s", "", "") + c.Flags().BoolP("bool", "b", false, "") + + for i, test := range tests { + got := stripFlags(test.input, c) + if !reflect.DeepEqual(test.output, got) { + t.Errorf("(%v) Expected: %v, got: %v", i, test.output, got) + } + } +} + +func TestDisableFlagParsing(t *testing.T) { + var cArgs []string + c := &Command{ + Use: "c", + DisableFlagParsing: true, + Run: func(_ *Command, args []string) { + cArgs = args + }, + } + + args := []string{"cmd", "-v", "-race", "-file", "foo.go"} + output, err := executeCommand(c, args...) + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(args, cArgs) { + t.Errorf("Expected: %v, got: %v", args, cArgs) + } +} + +func TestPersistentFlagsOnSameCommand(t *testing.T) { + var rootCmdArgs []string + rootCmd := &Command{ + Use: "root", + Args: ArbitraryArgs, + Run: func(_ *Command, args []string) { rootCmdArgs = args }, + } + + var flagValue int + rootCmd.PersistentFlags().IntVarP(&flagValue, "intf", "i", -1, "") + + output, err := executeCommand(rootCmd, "-i7", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(rootCmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("rootCmdArgs expected: %q, got %q", expected, got) + } + if flagValue != 7 { + t.Errorf("flagValue expected: %v, got %v", 7, flagValue) + } +} + +// TestEmptyInputs checks, +// if flags correctly parsed with blank strings in args. +func TestEmptyInputs(t *testing.T) { + c := &Command{Use: "c", Run: emptyRun} + + var flagValue int + c.Flags().IntVarP(&flagValue, "intf", "i", -1, "") + + output, err := executeCommand(c, "", "-i7", "") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if flagValue != 7 { + t.Errorf("flagValue expected: %v, got %v", 7, flagValue) + } +} + +func TestOverwrittenFlag(t *testing.T) { + // TODO: This test fails, but should work. + t.Skip() + + parent := &Command{Use: "parent", Run: emptyRun} + child := &Command{Use: "child", Run: emptyRun} + + parent.PersistentFlags().Bool("boolf", false, "") + parent.PersistentFlags().Int("intf", -1, "") + child.Flags().String("strf", "", "") + child.Flags().Int("intf", -1, "") + + parent.AddCommand(child) + + childInherited := child.InheritedFlags() + childLocal := child.LocalFlags() + + if childLocal.Lookup("strf") == nil { + t.Error(`LocalFlags expected to contain "strf", got "nil"`) + } + if childInherited.Lookup("boolf") == nil { + t.Error(`InheritedFlags expected to contain "boolf", got "nil"`) + } + + if childInherited.Lookup("intf") != nil { + t.Errorf(`InheritedFlags should not contain overwritten flag "intf"`) + } + if childLocal.Lookup("intf") == nil { + t.Error(`LocalFlags expected to contain "intf", got "nil"`) + } +} + +func TestPersistentFlagsOnChild(t *testing.T) { + var childCmdArgs []string + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{ + Use: "child", + Args: ArbitraryArgs, + Run: func(_ *Command, args []string) { childCmdArgs = args }, + } + rootCmd.AddCommand(childCmd) + + var parentFlagValue int + var childFlagValue int + rootCmd.PersistentFlags().IntVarP(&parentFlagValue, "parentf", "p", -1, "") + childCmd.Flags().IntVarP(&childFlagValue, "childf", "c", -1, "") + + output, err := executeCommand(rootCmd, "child", "-c7", "-p8", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + got := strings.Join(childCmdArgs, " ") + expected := "one two" + if got != expected { + t.Errorf("childCmdArgs expected: %q, got %q", expected, got) + } + if parentFlagValue != 8 { + t.Errorf("parentFlagValue expected: %v, got %v", 8, parentFlagValue) + } + if childFlagValue != 7 { + t.Errorf("childFlagValue expected: %v, got %v", 7, childFlagValue) + } +} + +func TestRequiredFlags(t *testing.T) { + c := &Command{Use: "c", Run: emptyRun} + c.Flags().String("foo1", "", "") + c.MarkFlagRequired("foo1") + c.Flags().String("foo2", "", "") + c.MarkFlagRequired("foo2") + c.Flags().String("bar", "", "") + + expected := fmt.Sprintf("required flag(s) %q, %q not set", "foo1", "foo2") + + _, err := executeCommand(c) + got := err.Error() + + if got != expected { + t.Errorf("Expected error: %q, got: %q", expected, got) + } +} + +func TestPersistentRequiredFlags(t *testing.T) { + parent := &Command{Use: "parent", Run: emptyRun} + parent.PersistentFlags().String("foo1", "", "") + parent.MarkPersistentFlagRequired("foo1") + parent.PersistentFlags().String("foo2", "", "") + parent.MarkPersistentFlagRequired("foo2") + parent.Flags().String("foo3", "", "") + + child := &Command{Use: "child", Run: emptyRun} + child.Flags().String("bar1", "", "") + child.MarkFlagRequired("bar1") + child.Flags().String("bar2", "", "") + child.MarkFlagRequired("bar2") + child.Flags().String("bar3", "", "") + + parent.AddCommand(child) + + expected := fmt.Sprintf("required flag(s) %q, %q, %q, %q not set", "bar1", "bar2", "foo1", "foo2") + + _, err := executeCommand(parent, "child") + if err.Error() != expected { + t.Errorf("Expected %q, got %q", expected, err.Error()) + } +} + +func TestInitHelpFlagMergesFlags(t *testing.T) { + usage := "custom flag" + rootCmd := &Command{Use: "root"} + rootCmd.PersistentFlags().Bool("help", false, "custom flag") + childCmd := &Command{Use: "child"} + rootCmd.AddCommand(childCmd) + + childCmd.InitDefaultHelpFlag() + got := childCmd.Flags().Lookup("help").Usage + if got != usage { + t.Errorf("Expected the help flag from the root command with usage: %v\nGot the default with usage: %v", usage, got) + } +} + +func TestHelpCommandExecuted(t *testing.T) { + rootCmd := &Command{Use: "root", Long: "Long description", Run: emptyRun} + rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun}) + + output, err := executeCommand(rootCmd, "help") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, rootCmd.Long) +} + +func TestHelpCommandExecutedOnChild(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Long: "Long description", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + output, err := executeCommand(rootCmd, "help", "child") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, childCmd.Long) +} + +func TestSetHelpCommand(t *testing.T) { + c := &Command{Use: "c", Run: emptyRun} + c.AddCommand(&Command{Use: "empty", Run: emptyRun}) + + expected := "WORKS" + c.SetHelpCommand(&Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: func(c *Command, _ []string) { c.Print(expected) }, + }) + + got, err := executeCommand(c, "help") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if got != expected { + t.Errorf("Expected to contain %q, got %q", expected, got) + } +} + +func TestHelpFlagExecuted(t *testing.T) { + rootCmd := &Command{Use: "root", Long: "Long description", Run: emptyRun} + + output, err := executeCommand(rootCmd, "--help") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, rootCmd.Long) +} + +func TestHelpFlagExecutedOnChild(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Long: "Long description", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + output, err := executeCommand(rootCmd, "child", "--help") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, childCmd.Long) +} + +// TestHelpFlagInHelp checks, +// if '--help' flag is shown in help for child (executing `parent help child`), +// that has no other flags. +// Related to https://github.com/spf13/cobra/issues/302. +func TestHelpFlagInHelp(t *testing.T) { + parentCmd := &Command{Use: "parent", Run: func(*Command, []string) {}} + + childCmd := &Command{Use: "child", Run: func(*Command, []string) {}} + parentCmd.AddCommand(childCmd) + + output, err := executeCommand(parentCmd, "help", "child") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, "[flags]") +} + +func TestFlagsInUsage(t *testing.T) { + rootCmd := &Command{Use: "root", Args: NoArgs, Run: func(*Command, []string) {}} + output, err := executeCommand(rootCmd, "--help") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, "[flags]") +} + +func TestHelpExecutedOnNonRunnableChild(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Long: "Long description"} + rootCmd.AddCommand(childCmd) + + output, err := executeCommand(rootCmd, "child") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, childCmd.Long) +} + +func TestVersionFlagExecuted(t *testing.T) { + rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun} + + output, err := executeCommand(rootCmd, "--version", "arg1") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, "root version 1.0.0") +} + +func TestVersionTemplate(t *testing.T) { + rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun} + rootCmd.SetVersionTemplate(`customized version: {{.Version}}`) + + output, err := executeCommand(rootCmd, "--version", "arg1") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, "customized version: 1.0.0") +} + +func TestVersionFlagExecutedOnSubcommand(t *testing.T) { + rootCmd := &Command{Use: "root", Version: "1.0.0"} + rootCmd.AddCommand(&Command{Use: "sub", Run: emptyRun}) + + output, err := executeCommand(rootCmd, "--version", "sub") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, "root version 1.0.0") +} + +func TestVersionFlagOnlyAddedToRoot(t *testing.T) { + rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun} + rootCmd.AddCommand(&Command{Use: "sub", Run: emptyRun}) + + _, err := executeCommand(rootCmd, "sub", "--version") + if err == nil { + t.Errorf("Expected error") + } + + checkStringContains(t, err.Error(), "unknown flag: --version") +} + +func TestVersionFlagOnlyExistsIfVersionNonEmpty(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + + _, err := executeCommand(rootCmd, "--version") + if err == nil { + t.Errorf("Expected error") + } + checkStringContains(t, err.Error(), "unknown flag: --version") +} + +func TestUsageIsNotPrintedTwice(t *testing.T) { + var cmd = &Command{Use: "root"} + var sub = &Command{Use: "sub"} + cmd.AddCommand(sub) + + output, _ := executeCommand(cmd, "") + if strings.Count(output, "Usage:") != 1 { + t.Error("Usage output is not printed exactly once") + } +} + +func TestVisitParents(t *testing.T) { + c := &Command{Use: "app"} + sub := &Command{Use: "sub"} + dsub := &Command{Use: "dsub"} + sub.AddCommand(dsub) + c.AddCommand(sub) + + total := 0 + add := func(x *Command) { + total++ + } + sub.VisitParents(add) + if total != 1 { + t.Errorf("Should have visited 1 parent but visited %d", total) + } + + total = 0 + dsub.VisitParents(add) + if total != 2 { + t.Errorf("Should have visited 2 parents but visited %d", total) + } + + total = 0 + c.VisitParents(add) + if total != 0 { + t.Errorf("Should have visited no parents but visited %d", total) + } +} + +func TestSuggestions(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + timesCmd := &Command{ + Use: "times", + SuggestFor: []string{"counts"}, + Run: emptyRun, + } + rootCmd.AddCommand(timesCmd) + + templateWithSuggestions := "Error: unknown command \"%s\" for \"root\"\n\nDid you mean this?\n\t%s\n\nRun 'root --help' for usage.\n" + templateWithoutSuggestions := "Error: unknown command \"%s\" for \"root\"\nRun 'root --help' for usage.\n" + + tests := map[string]string{ + "time": "times", + "tiems": "times", + "tims": "times", + "timeS": "times", + "rimes": "times", + "ti": "times", + "t": "times", + "timely": "times", + "ri": "", + "timezone": "", + "foo": "", + "counts": "times", + } + + for typo, suggestion := range tests { + for _, suggestionsDisabled := range []bool{true, false} { + rootCmd.DisableSuggestions = suggestionsDisabled + + var expected string + output, _ := executeCommand(rootCmd, typo) + + if suggestion == "" || suggestionsDisabled { + expected = fmt.Sprintf(templateWithoutSuggestions, typo) + } else { + expected = fmt.Sprintf(templateWithSuggestions, typo, suggestion) + } + + if output != expected { + t.Errorf("Unexpected response.\nExpected:\n %q\nGot:\n %q\n", expected, output) + } + } + } +} + +func TestRemoveCommand(t *testing.T) { + rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun} + childCmd := &Command{Use: "child", Run: emptyRun} + rootCmd.AddCommand(childCmd) + rootCmd.RemoveCommand(childCmd) + + _, err := executeCommand(rootCmd, "child") + if err == nil { + t.Error("Expected error on calling removed command. Got nil.") + } +} + +func TestReplaceCommandWithRemove(t *testing.T) { + childUsed := 0 + rootCmd := &Command{Use: "root", Run: emptyRun} + child1Cmd := &Command{ + Use: "child", + Run: func(*Command, []string) { childUsed = 1 }, + } + child2Cmd := &Command{ + Use: "child", + Run: func(*Command, []string) { childUsed = 2 }, + } + rootCmd.AddCommand(child1Cmd) + rootCmd.RemoveCommand(child1Cmd) + rootCmd.AddCommand(child2Cmd) + + output, err := executeCommand(rootCmd, "child") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if childUsed == 1 { + t.Error("Removed command shouldn't be called") + } + if childUsed != 2 { + t.Error("Replacing command should have been called but didn't") + } +} + +func TestDeprecatedCommand(t *testing.T) { + rootCmd := &Command{Use: "root", Run: emptyRun} + deprecatedCmd := &Command{ + Use: "deprecated", + Deprecated: "This command is deprecated", + Run: emptyRun, + } + rootCmd.AddCommand(deprecatedCmd) + + output, err := executeCommand(rootCmd, "deprecated") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + checkStringContains(t, output, deprecatedCmd.Deprecated) +} + +func TestHooks(t *testing.T) { + var ( + persPreArgs string + preArgs string + runArgs string + postArgs string + persPostArgs string + ) + + c := &Command{ + Use: "c", + PersistentPreRun: func(_ *Command, args []string) { + persPreArgs = strings.Join(args, " ") + }, + PreRun: func(_ *Command, args []string) { + preArgs = strings.Join(args, " ") + }, + Run: func(_ *Command, args []string) { + runArgs = strings.Join(args, " ") + }, + PostRun: func(_ *Command, args []string) { + postArgs = strings.Join(args, " ") + }, + PersistentPostRun: func(_ *Command, args []string) { + persPostArgs = strings.Join(args, " ") + }, + } + + output, err := executeCommand(c, "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if persPreArgs != "one two" { + t.Errorf("Expected persPreArgs %q, got %q", "one two", persPreArgs) + } + if preArgs != "one two" { + t.Errorf("Expected preArgs %q, got %q", "one two", preArgs) + } + if runArgs != "one two" { + t.Errorf("Expected runArgs %q, got %q", "one two", runArgs) + } + if postArgs != "one two" { + t.Errorf("Expected postArgs %q, got %q", "one two", postArgs) + } + if persPostArgs != "one two" { + t.Errorf("Expected persPostArgs %q, got %q", "one two", persPostArgs) + } +} + +func TestPersistentHooks(t *testing.T) { + var ( + parentPersPreArgs string + parentPreArgs string + parentRunArgs string + parentPostArgs string + parentPersPostArgs string + ) + + var ( + childPersPreArgs string + childPreArgs string + childRunArgs string + childPostArgs string + childPersPostArgs string + ) + + parentCmd := &Command{ + Use: "parent", + PersistentPreRun: func(_ *Command, args []string) { + parentPersPreArgs = strings.Join(args, " ") + }, + PreRun: func(_ *Command, args []string) { + parentPreArgs = strings.Join(args, " ") + }, + Run: func(_ *Command, args []string) { + parentRunArgs = strings.Join(args, " ") + }, + PostRun: func(_ *Command, args []string) { + parentPostArgs = strings.Join(args, " ") + }, + PersistentPostRun: func(_ *Command, args []string) { + parentPersPostArgs = strings.Join(args, " ") + }, + } + + childCmd := &Command{ + Use: "child", + PersistentPreRun: func(_ *Command, args []string) { + childPersPreArgs = strings.Join(args, " ") + }, + PreRun: func(_ *Command, args []string) { + childPreArgs = strings.Join(args, " ") + }, + Run: func(_ *Command, args []string) { + childRunArgs = strings.Join(args, " ") + }, + PostRun: func(_ *Command, args []string) { + childPostArgs = strings.Join(args, " ") + }, + PersistentPostRun: func(_ *Command, args []string) { + childPersPostArgs = strings.Join(args, " ") + }, + } + parentCmd.AddCommand(childCmd) + + output, err := executeCommand(parentCmd, "child", "one", "two") + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // TODO: This test fails, but should not. + // Related to https://github.com/spf13/cobra/issues/252. + // + // if parentPersPreArgs != "one two" { + // t.Errorf("Expected parentPersPreArgs %q, got %q", "one two", parentPersPreArgs) + // } + if parentPreArgs != "" { + t.Errorf("Expected blank parentPreArgs, got %q", parentPreArgs) + } + if parentRunArgs != "" { + t.Errorf("Expected blank parentRunArgs, got %q", parentRunArgs) + } + if parentPostArgs != "" { + t.Errorf("Expected blank parentPostArgs, got %q", parentPostArgs) + } + // TODO: This test fails, but should not. + // Related to https://github.com/spf13/cobra/issues/252. + // + // if parentPersPostArgs != "one two" { + // t.Errorf("Expected parentPersPostArgs %q, got %q", "one two", parentPersPostArgs) + // } + + if childPersPreArgs != "one two" { + t.Errorf("Expected childPersPreArgs %q, got %q", "one two", childPersPreArgs) + } + if childPreArgs != "one two" { + t.Errorf("Expected childPreArgs %q, got %q", "one two", childPreArgs) + } + if childRunArgs != "one two" { + t.Errorf("Expected childRunArgs %q, got %q", "one two", childRunArgs) + } + if childPostArgs != "one two" { + t.Errorf("Expected childPostArgs %q, got %q", "one two", childPostArgs) + } + if childPersPostArgs != "one two" { + t.Errorf("Expected childPersPostArgs %q, got %q", "one two", childPersPostArgs) + } +} + +// Related to https://github.com/spf13/cobra/issues/521. +func TestGlobalNormFuncPropagation(t *testing.T) { + normFunc := func(f *pflag.FlagSet, name string) pflag.NormalizedName { + return pflag.NormalizedName(name) + } + + rootCmd := &Command{Use: "root", Run: emptyRun} + childCmd := &Command{Use: "child", Run: emptyRun} + rootCmd.AddCommand(childCmd) + + rootCmd.SetGlobalNormalizationFunc(normFunc) + if reflect.ValueOf(normFunc).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() { + t.Error("rootCmd seems to have a wrong normalization function") + } + + if reflect.ValueOf(normFunc).Pointer() != reflect.ValueOf(childCmd.GlobalNormalizationFunc()).Pointer() { + t.Error("childCmd should have had the normalization function of rootCmd") + } +} + +// Related to https://github.com/spf13/cobra/issues/521. +func TestNormPassedOnLocal(t *testing.T) { + toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName { + return pflag.NormalizedName(strings.ToUpper(name)) + } + + c := &Command{} + c.Flags().Bool("flagname", true, "this is a dummy flag") + c.SetGlobalNormalizationFunc(toUpper) + if c.LocalFlags().Lookup("flagname") != c.LocalFlags().Lookup("FLAGNAME") { + t.Error("Normalization function should be passed on to Local flag set") + } +} + +// Related to https://github.com/spf13/cobra/issues/521. +func TestNormPassedOnInherited(t *testing.T) { + toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName { + return pflag.NormalizedName(strings.ToUpper(name)) + } + + c := &Command{} + c.SetGlobalNormalizationFunc(toUpper) + + child1 := &Command{} + c.AddCommand(child1) + + c.PersistentFlags().Bool("flagname", true, "") + + child2 := &Command{} + c.AddCommand(child2) + + inherited := child1.InheritedFlags() + if inherited.Lookup("flagname") == nil || inherited.Lookup("flagname") != inherited.Lookup("FLAGNAME") { + t.Error("Normalization function should be passed on to inherited flag set in command added before flag") + } + + inherited = child2.InheritedFlags() + if inherited.Lookup("flagname") == nil || inherited.Lookup("flagname") != inherited.Lookup("FLAGNAME") { + t.Error("Normalization function should be passed on to inherited flag set in command added after flag") + } +} + +// Related to https://github.com/spf13/cobra/issues/521. +func TestConsistentNormalizedName(t *testing.T) { + toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName { + return pflag.NormalizedName(strings.ToUpper(name)) + } + n := func(f *pflag.FlagSet, name string) pflag.NormalizedName { + return pflag.NormalizedName(name) + } + + c := &Command{} + c.Flags().Bool("flagname", true, "") + c.SetGlobalNormalizationFunc(toUpper) + c.SetGlobalNormalizationFunc(n) + + if c.LocalFlags().Lookup("flagname") == c.LocalFlags().Lookup("FLAGNAME") { + t.Error("Normalizing flag names should not result in duplicate flags") + } +} + +func TestFlagOnPflagCommandLine(t *testing.T) { + flagName := "flagOnCommandLine" + pflag.String(flagName, "", "about my flag") + + c := &Command{Use: "c", Run: emptyRun} + c.AddCommand(&Command{Use: "child", Run: emptyRun}) + + output, _ := executeCommand(c, "--help") + checkStringContains(t, output, flagName) + + resetCommandLineFlagSet() +} + +// TestHiddenCommandExecutes checks, +// if hidden commands run as intended. +func TestHiddenCommandExecutes(t *testing.T) { + executed := false + c := &Command{ + Use: "c", + Hidden: true, + Run: func(*Command, []string) { executed = true }, + } + + output, err := executeCommand(c) + if output != "" { + t.Errorf("Unexpected output: %v", output) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !executed { + t.Error("Hidden command should have been executed") + } +} + +// test to ensure hidden commands do not show up in usage/help text +func TestHiddenCommandIsHidden(t *testing.T) { + c := &Command{Use: "c", Hidden: true, Run: emptyRun} + if c.IsAvailableCommand() { + t.Errorf("Hidden command should be unavailable") + } +} + +func TestCommandsAreSorted(t *testing.T) { + EnableCommandSorting = true + + originalNames := []string{"middle", "zlast", "afirst"} + expectedNames := []string{"afirst", "middle", "zlast"} + + var rootCmd = &Command{Use: "root"} + + for _, name := range originalNames { + rootCmd.AddCommand(&Command{Use: name}) + } + + for i, c := range rootCmd.Commands() { + got := c.Name() + if expectedNames[i] != got { + t.Errorf("Expected: %s, got: %s", expectedNames[i], got) + } + } + + EnableCommandSorting = true +} + +func TestEnableCommandSortingIsDisabled(t *testing.T) { + EnableCommandSorting = false + + originalNames := []string{"middle", "zlast", "afirst"} + + var rootCmd = &Command{Use: "root"} + + for _, name := range originalNames { + rootCmd.AddCommand(&Command{Use: name}) + } + + for i, c := range rootCmd.Commands() { + got := c.Name() + if originalNames[i] != got { + t.Errorf("expected: %s, got: %s", originalNames[i], got) + } + } + + EnableCommandSorting = true +} + +func TestSetOutput(t *testing.T) { + c := &Command{} + c.SetOutput(nil) + if out := c.OutOrStdout(); out != os.Stdout { + t.Errorf("Expected setting output to nil to revert back to stdout") + } +} + +func TestFlagErrorFunc(t *testing.T) { + c := &Command{Use: "c", Run: emptyRun} + + expectedFmt := "This is expected: %v" + c.SetFlagErrorFunc(func(_ *Command, err error) error { + return fmt.Errorf(expectedFmt, err) + }) + + _, err := executeCommand(c, "--unknown-flag") + + got := err.Error() + expected := fmt.Sprintf(expectedFmt, "unknown flag: --unknown-flag") + if got != expected { + t.Errorf("Expected %v, got %v", expected, got) + } +} + +// TestSortedFlags checks, +// if cmd.LocalFlags() is unsorted when cmd.Flags().SortFlags set to false. +// Related to https://github.com/spf13/cobra/issues/404. +func TestSortedFlags(t *testing.T) { + c := &Command{} + c.Flags().SortFlags = false + names := []string{"C", "B", "A", "D"} + for _, name := range names { + c.Flags().Bool(name, false, "") + } + + i := 0 + c.LocalFlags().VisitAll(func(f *pflag.Flag) { + if i == len(names) { + return + } + if stringInSlice(f.Name, names) { + if names[i] != f.Name { + t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name) + } + i++ + } + }) +} + +// TestMergeCommandLineToFlags checks, +// if pflag.CommandLine is correctly merged to c.Flags() after first call +// of c.mergePersistentFlags. +// Related to https://github.com/spf13/cobra/issues/443. +func TestMergeCommandLineToFlags(t *testing.T) { + pflag.Bool("boolflag", false, "") + c := &Command{Use: "c", Run: emptyRun} + c.mergePersistentFlags() + if c.Flags().Lookup("boolflag") == nil { + t.Fatal("Expecting to have flag from CommandLine in c.Flags()") + } + + resetCommandLineFlagSet() +} + +// TestUseDeprecatedFlags checks, +// if cobra.Execute() prints a message, if a deprecated flag is used. +// Related to https://github.com/spf13/cobra/issues/463. +func TestUseDeprecatedFlags(t *testing.T) { + c := &Command{Use: "c", Run: emptyRun} + c.Flags().BoolP("deprecated", "d", false, "deprecated flag") + c.Flags().MarkDeprecated("deprecated", "This flag is deprecated") + + output, err := executeCommand(c, "c", "-d") + if err != nil { + t.Error("Unexpected error:", err) + } + checkStringContains(t, output, "This flag is deprecated") +} + +func TestTraverseWithParentFlags(t *testing.T) { + rootCmd := &Command{Use: "root", TraverseChildren: true} + rootCmd.Flags().String("str", "", "") + rootCmd.Flags().BoolP("bool", "b", false, "") + + childCmd := &Command{Use: "child"} + childCmd.Flags().Int("int", -1, "") + + rootCmd.AddCommand(childCmd) + + c, args, err := rootCmd.Traverse([]string{"-b", "--str", "ok", "child", "--int"}) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(args) != 1 && args[0] != "--add" { + t.Errorf("Wrong args: %v", args) + } + if c.Name() != childCmd.Name() { + t.Errorf("Expected command: %q, got: %q", childCmd.Name(), c.Name()) + } +} + +func TestTraverseNoParentFlags(t *testing.T) { + rootCmd := &Command{Use: "root", TraverseChildren: true} + rootCmd.Flags().String("foo", "", "foo things") + + childCmd := &Command{Use: "child"} + childCmd.Flags().String("str", "", "") + rootCmd.AddCommand(childCmd) + + c, args, err := rootCmd.Traverse([]string{"child"}) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(args) != 0 { + t.Errorf("Wrong args %v", args) + } + if c.Name() != childCmd.Name() { + t.Errorf("Expected command: %q, got: %q", childCmd.Name(), c.Name()) + } +} + +func TestTraverseWithBadParentFlags(t *testing.T) { + rootCmd := &Command{Use: "root", TraverseChildren: true} + + childCmd := &Command{Use: "child"} + childCmd.Flags().String("str", "", "") + rootCmd.AddCommand(childCmd) + + expected := "unknown flag: --str" + + c, _, err := rootCmd.Traverse([]string{"--str", "ok", "child"}) + if err == nil || !strings.Contains(err.Error(), expected) { + t.Errorf("Expected error, %q, got %q", expected, err) + } + if c != nil { + t.Errorf("Expected nil command") + } +} + +func TestTraverseWithBadChildFlag(t *testing.T) { + rootCmd := &Command{Use: "root", TraverseChildren: true} + rootCmd.Flags().String("str", "", "") + + childCmd := &Command{Use: "child"} + rootCmd.AddCommand(childCmd) + + // Expect no error because the last commands args shouldn't be parsed in + // Traverse. + c, args, err := rootCmd.Traverse([]string{"child", "--str"}) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(args) != 1 && args[0] != "--str" { + t.Errorf("Wrong args: %v", args) + } + if c.Name() != childCmd.Name() { + t.Errorf("Expected command %q, got: %q", childCmd.Name(), c.Name()) + } +} + +func TestTraverseWithTwoSubcommands(t *testing.T) { + rootCmd := &Command{Use: "root", TraverseChildren: true} + + subCmd := &Command{Use: "sub", TraverseChildren: true} + rootCmd.AddCommand(subCmd) + + subsubCmd := &Command{ + Use: "subsub", + } + subCmd.AddCommand(subsubCmd) + + c, _, err := rootCmd.Traverse([]string{"sub", "subsub"}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if c.Name() != subsubCmd.Name() { + t.Fatalf("Expected command: %q, got %q", subsubCmd.Name(), c.Name()) + } +} + +// TestUpdateName checks if c.Name() updates on changed c.Use. +// Related to https://github.com/spf13/cobra/pull/422#discussion_r143918343. +func TestUpdateName(t *testing.T) { + c := &Command{Use: "name xyz"} + originalName := c.Name() + + c.Use = "changedName abc" + if originalName == c.Name() || c.Name() != "changedName" { + t.Error("c.Name() should be updated on changed c.Use") + } +} + +type calledAsTestcase struct { + args []string + call string + want string + epm bool + tc bool +} + +func (tc *calledAsTestcase) test(t *testing.T) { + defer func(ov bool) { EnablePrefixMatching = ov }(EnablePrefixMatching) + EnablePrefixMatching = tc.epm + + var called *Command + run := func(c *Command, _ []string) { t.Logf("called: %q", c.Name()); called = c } + + parent := &Command{Use: "parent", Run: run} + child1 := &Command{Use: "child1", Run: run, Aliases: []string{"this"}} + child2 := &Command{Use: "child2", Run: run, Aliases: []string{"that"}} + + parent.AddCommand(child1) + parent.AddCommand(child2) + parent.SetArgs(tc.args) + + output := new(bytes.Buffer) + parent.SetOutput(output) + + parent.Execute() + + if called == nil { + if tc.call != "" { + t.Errorf("missing expected call to command: %s", tc.call) + } + return + } + + if called.Name() != tc.call { + t.Errorf("called command == %q; Wanted %q", called.Name(), tc.call) + } else if got := called.CalledAs(); got != tc.want { + t.Errorf("%s.CalledAs() == %q; Wanted: %q", tc.call, got, tc.want) + } +} + +func TestCalledAs(t *testing.T) { + tests := map[string]calledAsTestcase{ + "find/no-args": {nil, "parent", "parent", false, false}, + "find/real-name": {[]string{"child1"}, "child1", "child1", false, false}, + "find/full-alias": {[]string{"that"}, "child2", "that", false, false}, + "find/part-no-prefix": {[]string{"thi"}, "", "", false, false}, + "find/part-alias": {[]string{"thi"}, "child1", "this", true, false}, + "find/conflict": {[]string{"th"}, "", "", true, false}, + "traverse/no-args": {nil, "parent", "parent", false, true}, + "traverse/real-name": {[]string{"child1"}, "child1", "child1", false, true}, + "traverse/full-alias": {[]string{"that"}, "child2", "that", false, true}, + "traverse/part-no-prefix": {[]string{"thi"}, "", "", false, true}, + "traverse/part-alias": {[]string{"thi"}, "child1", "this", true, true}, + "traverse/conflict": {[]string{"th"}, "", "", true, true}, + } + + for name, tc := range tests { + t.Run(name, tc.test) + } +} diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go new file mode 100644 index 0000000..edec728 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -0,0 +1,20 @@ +// +build windows + +package cobra + +import ( + "os" + "time" + + "github.com/inconshreveable/mousetrap" +) + +var preExecHookFn = preExecHook + +func preExecHook(c *Command) { + if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + time.Sleep(5 * time.Second) + os.Exit(1) + } +} diff --git a/vendor/github.com/spf13/cobra/doc/cmd_test.go b/vendor/github.com/spf13/cobra/doc/cmd_test.go new file mode 100644 index 0000000..d29c577 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/cmd_test.go @@ -0,0 +1,86 @@ +package doc + +import ( + "strings" + "testing" + + "github.com/spf13/cobra" +) + +func emptyRun(*cobra.Command, []string) {} + +func init() { + rootCmd.PersistentFlags().StringP("rootflag", "r", "two", "") + rootCmd.PersistentFlags().StringP("strtwo", "t", "two", "help message for parent flag strtwo") + + echoCmd.PersistentFlags().StringP("strone", "s", "one", "help message for flag strone") + echoCmd.PersistentFlags().BoolP("persistentbool", "p", false, "help message for flag persistentbool") + echoCmd.Flags().IntP("intone", "i", 123, "help message for flag intone") + echoCmd.Flags().BoolP("boolone", "b", true, "help message for flag boolone") + + timesCmd.PersistentFlags().StringP("strtwo", "t", "2", "help message for child flag strtwo") + timesCmd.Flags().IntP("inttwo", "j", 234, "help message for flag inttwo") + timesCmd.Flags().BoolP("booltwo", "c", false, "help message for flag booltwo") + + printCmd.PersistentFlags().StringP("strthree", "s", "three", "help message for flag strthree") + printCmd.Flags().IntP("intthree", "i", 345, "help message for flag intthree") + printCmd.Flags().BoolP("boolthree", "b", true, "help message for flag boolthree") + + echoCmd.AddCommand(timesCmd, echoSubCmd, deprecatedCmd) + rootCmd.AddCommand(printCmd, echoCmd) +} + +var rootCmd = &cobra.Command{ + Use: "root", + Short: "Root short description", + Long: "Root long description", + Run: emptyRun, +} + +var echoCmd = &cobra.Command{ + Use: "echo [string to echo]", + Aliases: []string{"say"}, + Short: "Echo anything to the screen", + Long: "an utterly useless command for testing", + Example: "Just run cobra-test echo", +} + +var echoSubCmd = &cobra.Command{ + Use: "echosub [string to print]", + Short: "second sub command for echo", + Long: "an absolutely utterly useless command for testing gendocs!.", + Run: emptyRun, +} + +var timesCmd = &cobra.Command{ + Use: "times [# times] [string to echo]", + SuggestFor: []string{"counts"}, + Short: "Echo anything to the screen more times", + Long: `a slightly useless command for testing.`, + Run: emptyRun, +} + +var deprecatedCmd = &cobra.Command{ + Use: "deprecated [can't do anything here]", + Short: "A command which is deprecated", + Long: `an absolutely utterly useless command for testing deprecation!.`, + Deprecated: "Please use echo instead", +} + +var printCmd = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `an absolutely utterly useless command for testing.`, +} + +func checkStringContains(t *testing.T, got, expected string) { + if !strings.Contains(got, expected) { + t.Errorf("Expected to contain: \n %v\nGot:\n %v\n", expected, got) + } +} + +func checkStringOmits(t *testing.T, got, expected string) { + if strings.Contains(got, expected) { + t.Errorf("Expected to not contain: \n %v\nGot: %v", expected, got) + } +} diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go new file mode 100644 index 0000000..ce92332 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/man_docs.go @@ -0,0 +1,236 @@ +// Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/cpuguy83/go-md2man/md2man" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// GenManTree will generate a man page for this command and all descendants +// in the directory given. The header may be nil. This function may not work +// correctly if your command names have `-` in them. If you have `cmd` with two +// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` +// it is undefined which help output will be in the file `cmd-sub-third.1`. +func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error { + return GenManTreeFromOpts(cmd, GenManTreeOptions{ + Header: header, + Path: dir, + CommandSeparator: "-", + }) +} + +// GenManTreeFromOpts generates a man page for the command and all descendants. +// The pages are written to the opts.Path directory. +func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { + header := opts.Header + if header == nil { + header = &GenManHeader{} + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenManTreeFromOpts(c, opts); err != nil { + return err + } + } + section := "1" + if header.Section != "" { + section = header.Section + } + + separator := "_" + if opts.CommandSeparator != "" { + separator = opts.CommandSeparator + } + basename := strings.Replace(cmd.CommandPath(), " ", separator, -1) + filename := filepath.Join(opts.Path, basename+"."+section) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + headerCopy := *header + return GenMan(cmd, &headerCopy, f) +} + +// GenManTreeOptions is the options for generating the man pages. +// Used only in GenManTreeFromOpts. +type GenManTreeOptions struct { + Header *GenManHeader + Path string + CommandSeparator string +} + +// GenManHeader is a lot like the .TH header at the start of man pages. These +// include the title, section, date, source, and manual. We will use the +// current time if Date if unset and will use "Auto generated by spf13/cobra" +// if the Source is unset. +type GenManHeader struct { + Title string + Section string + Date *time.Time + date string + Source string + Manual string +} + +// GenMan will generate a man page for the given command and write it to +// w. The header argument may be nil, however obviously w may not. +func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error { + if header == nil { + header = &GenManHeader{} + } + fillHeader(header, cmd.CommandPath()) + + b := genMan(cmd, header) + _, err := w.Write(md2man.Render(b)) + return err +} + +func fillHeader(header *GenManHeader, name string) { + if header.Title == "" { + header.Title = strings.ToUpper(strings.Replace(name, " ", "\\-", -1)) + } + if header.Section == "" { + header.Section = "1" + } + if header.Date == nil { + now := time.Now() + header.Date = &now + } + header.date = (*header.Date).Format("Jan 2006") + if header.Source == "" { + header.Source = "Auto generated by spf13/cobra" + } +} + +func manPreamble(buf *bytes.Buffer, header *GenManHeader, cmd *cobra.Command, dashedName string) { + description := cmd.Long + if len(description) == 0 { + description = cmd.Short + } + + buf.WriteString(fmt.Sprintf(`%% %s(%s)%s +%% %s +%% %s +# NAME +`, header.Title, header.Section, header.date, header.Source, header.Manual)) + buf.WriteString(fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short)) + buf.WriteString("# SYNOPSIS\n") + buf.WriteString(fmt.Sprintf("**%s**\n\n", cmd.UseLine())) + buf.WriteString("# DESCRIPTION\n") + buf.WriteString(description + "\n\n") +} + +func manPrintFlags(buf *bytes.Buffer, flags *pflag.FlagSet) { + flags.VisitAll(func(flag *pflag.Flag) { + if len(flag.Deprecated) > 0 || flag.Hidden { + return + } + format := "" + if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { + format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name) + } else { + format = fmt.Sprintf("**--%s**", flag.Name) + } + if len(flag.NoOptDefVal) > 0 { + format += "[" + } + if flag.Value.Type() == "string" { + // put quotes on the value + format += "=%q" + } else { + format += "=%s" + } + if len(flag.NoOptDefVal) > 0 { + format += "]" + } + format += "\n\t%s\n\n" + buf.WriteString(fmt.Sprintf(format, flag.DefValue, flag.Usage)) + }) +} + +func manPrintOptions(buf *bytes.Buffer, command *cobra.Command) { + flags := command.NonInheritedFlags() + if flags.HasFlags() { + buf.WriteString("# OPTIONS\n") + manPrintFlags(buf, flags) + buf.WriteString("\n") + } + flags = command.InheritedFlags() + if flags.HasFlags() { + buf.WriteString("# OPTIONS INHERITED FROM PARENT COMMANDS\n") + manPrintFlags(buf, flags) + buf.WriteString("\n") + } +} + +func genMan(cmd *cobra.Command, header *GenManHeader) []byte { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + // something like `rootcmd-subcmd1-subcmd2` + dashCommandName := strings.Replace(cmd.CommandPath(), " ", "-", -1) + + buf := new(bytes.Buffer) + + manPreamble(buf, header, cmd, dashCommandName) + manPrintOptions(buf, cmd) + if len(cmd.Example) > 0 { + buf.WriteString("# EXAMPLE\n") + buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) + } + if hasSeeAlso(cmd) { + buf.WriteString("# SEE ALSO\n") + seealsos := make([]string, 0) + if cmd.HasParent() { + parentPath := cmd.Parent().CommandPath() + dashParentPath := strings.Replace(parentPath, " ", "-", -1) + seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section) + seealsos = append(seealsos, seealso) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + children := cmd.Commands() + sort.Sort(byName(children)) + for _, c := range children { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section) + seealsos = append(seealsos, seealso) + } + buf.WriteString(strings.Join(seealsos, ", ") + "\n") + } + if !cmd.DisableAutoGenTag { + buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) + } + return buf.Bytes() +} diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.md b/vendor/github.com/spf13/cobra/doc/man_docs.md new file mode 100644 index 0000000..3709160 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/man_docs.md @@ -0,0 +1,31 @@ +# Generating Man Pages For Your Own cobra.Command + +Generating man pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + header := &doc.GenManHeader{ + Title: "MINE", + Section: "3", + } + err := doc.GenManTree(cmd, header, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a man page `/tmp/test.3` diff --git a/vendor/github.com/spf13/cobra/doc/man_docs_test.go b/vendor/github.com/spf13/cobra/doc/man_docs_test.go new file mode 100644 index 0000000..62f85e4 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/man_docs_test.go @@ -0,0 +1,177 @@ +package doc + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/spf13/cobra" +) + +func translate(in string) string { + return strings.Replace(in, "-", "\\-", -1) +} + +func TestGenManDoc(t *testing.T) { + header := &GenManHeader{ + Title: "Project", + Section: "2", + } + + // We generate on a subcommand so we have both subcommands and parents + buf := new(bytes.Buffer) + if err := GenMan(echoCmd, header, buf); err != nil { + t.Fatal(err) + } + output := buf.String() + + // Make sure parent has - in CommandPath() in SEE ALSO: + parentPath := echoCmd.Parent().CommandPath() + dashParentPath := strings.Replace(parentPath, " ", "-", -1) + expected := translate(dashParentPath) + expected = expected + "(" + header.Section + ")" + checkStringContains(t, output, expected) + + checkStringContains(t, output, translate(echoCmd.Name())) + checkStringContains(t, output, translate(echoCmd.Name())) + checkStringContains(t, output, "boolone") + checkStringContains(t, output, "rootflag") + checkStringContains(t, output, translate(rootCmd.Name())) + checkStringContains(t, output, translate(echoSubCmd.Name())) + checkStringOmits(t, output, translate(deprecatedCmd.Name())) + checkStringContains(t, output, translate("Auto generated")) +} + +func TestGenManNoGenTag(t *testing.T) { + echoCmd.DisableAutoGenTag = true + defer func() { echoCmd.DisableAutoGenTag = false }() + + header := &GenManHeader{ + Title: "Project", + Section: "2", + } + + // We generate on a subcommand so we have both subcommands and parents + buf := new(bytes.Buffer) + if err := GenMan(echoCmd, header, buf); err != nil { + t.Fatal(err) + } + output := buf.String() + + unexpected := translate("#HISTORY") + checkStringOmits(t, output, unexpected) +} + +func TestGenManSeeAlso(t *testing.T) { + rootCmd := &cobra.Command{Use: "root", Run: emptyRun} + aCmd := &cobra.Command{Use: "aaa", Run: emptyRun, Hidden: true} // #229 + bCmd := &cobra.Command{Use: "bbb", Run: emptyRun} + cCmd := &cobra.Command{Use: "ccc", Run: emptyRun} + rootCmd.AddCommand(aCmd, bCmd, cCmd) + + buf := new(bytes.Buffer) + header := &GenManHeader{} + if err := GenMan(rootCmd, header, buf); err != nil { + t.Fatal(err) + } + scanner := bufio.NewScanner(buf) + + if err := assertLineFound(scanner, ".SH SEE ALSO"); err != nil { + t.Fatalf("Couldn't find SEE ALSO section header: %v", err) + } + if err := assertNextLineEquals(scanner, ".PP"); err != nil { + t.Fatalf("First line after SEE ALSO wasn't break-indent: %v", err) + } + if err := assertNextLineEquals(scanner, `\fBroot\-bbb(1)\fP, \fBroot\-ccc(1)\fP`); err != nil { + t.Fatalf("Second line after SEE ALSO wasn't correct: %v", err) + } +} + +func TestManPrintFlagsHidesShortDeperecated(t *testing.T) { + c := &cobra.Command{} + c.Flags().StringP("foo", "f", "default", "Foo flag") + c.Flags().MarkShorthandDeprecated("foo", "don't use it no more") + + buf := new(bytes.Buffer) + manPrintFlags(buf, c.Flags()) + + got := buf.String() + expected := "**--foo**=\"default\"\n\tFoo flag\n\n" + if got != expected { + t.Errorf("Expected %v, got %v", expected, got) + } +} + +func TestGenManTree(t *testing.T) { + c := &cobra.Command{Use: "do [OPTIONS] arg1 arg2"} + header := &GenManHeader{Section: "2"} + tmpdir, err := ioutil.TempDir("", "test-gen-man-tree") + if err != nil { + t.Fatalf("Failed to create tmpdir: %s", err.Error()) + } + defer os.RemoveAll(tmpdir) + + if err := GenManTree(c, header, tmpdir); err != nil { + t.Fatalf("GenManTree failed: %s", err.Error()) + } + + if _, err := os.Stat(filepath.Join(tmpdir, "do.2")); err != nil { + t.Fatalf("Expected file 'do.2' to exist") + } + + if header.Title != "" { + t.Fatalf("Expected header.Title to be unmodified") + } +} + +func assertLineFound(scanner *bufio.Scanner, expectedLine string) error { + for scanner.Scan() { + line := scanner.Text() + if line == expectedLine { + return nil + } + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("scan failed: %s", err) + } + + return fmt.Errorf("hit EOF before finding %v", expectedLine) +} + +func assertNextLineEquals(scanner *bufio.Scanner, expectedLine string) error { + if scanner.Scan() { + line := scanner.Text() + if line == expectedLine { + return nil + } + return fmt.Errorf("got %v, not %v", line, expectedLine) + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("scan failed: %v", err) + } + + return fmt.Errorf("hit EOF before finding %v", expectedLine) +} + +func BenchmarkGenManToFile(b *testing.B) { + file, err := ioutil.TempFile("", "") + if err != nil { + b.Fatal(err) + } + defer os.Remove(file.Name()) + defer file.Close() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := GenMan(rootCmd, nil, file); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/spf13/cobra/doc/man_examples_test.go b/vendor/github.com/spf13/cobra/doc/man_examples_test.go new file mode 100644 index 0000000..db66042 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/man_examples_test.go @@ -0,0 +1,35 @@ +package doc_test + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func ExampleGenManTree() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + header := &doc.GenManHeader{ + Title: "MINE", + Section: "3", + } + doc.GenManTree(cmd, header, "/tmp") +} + +func ExampleGenMan() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + header := &doc.GenManHeader{ + Title: "MINE", + Section: "3", + } + out := new(bytes.Buffer) + doc.GenMan(cmd, header, out) + fmt.Print(out.String()) +} diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go new file mode 100644 index 0000000..d7a2c2b --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -0,0 +1,159 @@ +//Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/spf13/cobra" +) + +func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error { + flags := cmd.NonInheritedFlags() + flags.SetOutput(buf) + if flags.HasFlags() { + buf.WriteString("### Options\n\n```\n") + flags.PrintDefaults() + buf.WriteString("```\n\n") + } + + parentFlags := cmd.InheritedFlags() + parentFlags.SetOutput(buf) + if parentFlags.HasFlags() { + buf.WriteString("### Options inherited from parent commands\n\n```\n") + parentFlags.PrintDefaults() + buf.WriteString("```\n\n") + } + return nil +} + +// GenMarkdown creates markdown output. +func GenMarkdown(cmd *cobra.Command, w io.Writer) error { + return GenMarkdownCustom(cmd, w, func(s string) string { return s }) +} + +// GenMarkdownCustom creates custom markdown output. +func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + buf := new(bytes.Buffer) + name := cmd.CommandPath() + + short := cmd.Short + long := cmd.Long + if len(long) == 0 { + long = short + } + + buf.WriteString("## " + name + "\n\n") + buf.WriteString(short + "\n\n") + buf.WriteString("### Synopsis\n\n") + buf.WriteString(long + "\n\n") + + if cmd.Runnable() { + buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) + } + + if len(cmd.Example) > 0 { + buf.WriteString("### Examples\n\n") + buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) + } + + if err := printOptions(buf, cmd, name); err != nil { + return err + } + if hasSeeAlso(cmd) { + buf.WriteString("### SEE ALSO\n\n") + if cmd.HasParent() { + parent := cmd.Parent() + pname := parent.CommandPath() + link := pname + ".md" + link = strings.Replace(link, " ", "_", -1) + buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { + continue + } + cname := name + " " + child.Name() + link := cname + ".md" + link = strings.Replace(link, " ", "_", -1) + buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) + } + buf.WriteString("\n") + } + if !cmd.DisableAutoGenTag { + buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n") + } + _, err := buf.WriteTo(w) + return err +} + +// GenMarkdownTree will generate a markdown page for this command and all +// descendants in the directory given. The header may be nil. +// This function may not work correctly if your command names have `-` in them. +// If you have `cmd` with two subcmds, `sub` and `sub-third`, +// and `sub` has a subcommand called `third`, it is undefined which +// help output will be in the file `cmd-sub-third.1`. +func GenMarkdownTree(cmd *cobra.Command, dir string) error { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) +} + +// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but +// with custom filePrepender and linkHandler. +func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".md" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.md b/vendor/github.com/spf13/cobra/doc/md_docs.md new file mode 100644 index 0000000..56ce9fe --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/md_docs.md @@ -0,0 +1,115 @@ +# Generating Markdown Docs For Your Own cobra.Command + +Generating man pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + err := doc.GenMarkdownTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a Markdown document `/tmp/test.md` + +## Generate markdown docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "log" + "io/ioutil" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenMarkdownTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate markdown docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` + +```go + out := new(bytes.Buffer) + err := doc.GenMarkdown(cmd, out) + if err != nil { + log.Fatal(err) + } +``` + +This will write the markdown doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { + //... +} +``` + +```go +func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` + +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: + +```go +linkHandler := func(name string) string { + base := strings.TrimSuffix(name, path.Ext(name)) + return "/commands/" + strings.ToLower(base) + "/" +} +``` diff --git a/vendor/github.com/spf13/cobra/doc/md_docs_test.go b/vendor/github.com/spf13/cobra/doc/md_docs_test.go new file mode 100644 index 0000000..b0fa68c --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/md_docs_test.go @@ -0,0 +1,74 @@ +package doc + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/spf13/cobra" +) + +func TestGenMdDoc(t *testing.T) { + // We generate on subcommand so we have both subcommands and parents. + buf := new(bytes.Buffer) + if err := GenMarkdown(echoCmd, buf); err != nil { + t.Fatal(err) + } + output := buf.String() + + checkStringContains(t, output, echoCmd.Long) + checkStringContains(t, output, echoCmd.Example) + checkStringContains(t, output, "boolone") + checkStringContains(t, output, "rootflag") + checkStringContains(t, output, rootCmd.Short) + checkStringContains(t, output, echoSubCmd.Short) + checkStringOmits(t, output, deprecatedCmd.Short) +} + +func TestGenMdNoTag(t *testing.T) { + rootCmd.DisableAutoGenTag = true + defer func() { rootCmd.DisableAutoGenTag = false }() + + buf := new(bytes.Buffer) + if err := GenMarkdown(rootCmd, buf); err != nil { + t.Fatal(err) + } + output := buf.String() + + checkStringOmits(t, output, "Auto generated") +} + +func TestGenMdTree(t *testing.T) { + c := &cobra.Command{Use: "do [OPTIONS] arg1 arg2"} + tmpdir, err := ioutil.TempDir("", "test-gen-md-tree") + if err != nil { + t.Fatalf("Failed to create tmpdir: %v", err) + } + defer os.RemoveAll(tmpdir) + + if err := GenMarkdownTree(c, tmpdir); err != nil { + t.Fatalf("GenMarkdownTree failed: %v", err) + } + + if _, err := os.Stat(filepath.Join(tmpdir, "do.md")); err != nil { + t.Fatalf("Expected file 'do.md' to exist") + } +} + +func BenchmarkGenMarkdownToFile(b *testing.B) { + file, err := ioutil.TempFile("", "") + if err != nil { + b.Fatal(err) + } + defer os.Remove(file.Name()) + defer file.Close() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := GenMarkdown(rootCmd, file); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go new file mode 100644 index 0000000..4913e3e --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go @@ -0,0 +1,185 @@ +//Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/spf13/cobra" +) + +func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error { + flags := cmd.NonInheritedFlags() + flags.SetOutput(buf) + if flags.HasFlags() { + buf.WriteString("Options\n") + buf.WriteString("~~~~~~~\n\n::\n\n") + flags.PrintDefaults() + buf.WriteString("\n") + } + + parentFlags := cmd.InheritedFlags() + parentFlags.SetOutput(buf) + if parentFlags.HasFlags() { + buf.WriteString("Options inherited from parent commands\n") + buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n") + parentFlags.PrintDefaults() + buf.WriteString("\n") + } + return nil +} + +// linkHandler for default ReST hyperlink markup +func defaultLinkHandler(name, ref string) string { + return fmt.Sprintf("`%s <%s.rst>`_", name, ref) +} + +// GenReST creates reStructured Text output. +func GenReST(cmd *cobra.Command, w io.Writer) error { + return GenReSTCustom(cmd, w, defaultLinkHandler) +} + +// GenReSTCustom creates custom reStructured Text output. +func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + buf := new(bytes.Buffer) + name := cmd.CommandPath() + + short := cmd.Short + long := cmd.Long + if len(long) == 0 { + long = short + } + ref := strings.Replace(name, " ", "_", -1) + + buf.WriteString(".. _" + ref + ":\n\n") + buf.WriteString(name + "\n") + buf.WriteString(strings.Repeat("-", len(name)) + "\n\n") + buf.WriteString(short + "\n\n") + buf.WriteString("Synopsis\n") + buf.WriteString("~~~~~~~~\n\n") + buf.WriteString("\n" + long + "\n\n") + + if cmd.Runnable() { + buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) + } + + if len(cmd.Example) > 0 { + buf.WriteString("Examples\n") + buf.WriteString("~~~~~~~~\n\n") + buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) + } + + if err := printOptionsReST(buf, cmd, name); err != nil { + return err + } + if hasSeeAlso(cmd) { + buf.WriteString("SEE ALSO\n") + buf.WriteString("~~~~~~~~\n\n") + if cmd.HasParent() { + parent := cmd.Parent() + pname := parent.CommandPath() + ref = strings.Replace(pname, " ", "_", -1) + buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { + continue + } + cname := name + " " + child.Name() + ref = strings.Replace(cname, " ", "_", -1) + buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) + } + buf.WriteString("\n") + } + if !cmd.DisableAutoGenTag { + buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n") + } + _, err := buf.WriteTo(w) + return err +} + +// GenReSTTree will generate a ReST page for this command and all +// descendants in the directory given. +// This function may not work correctly if your command names have `-` in them. +// If you have `cmd` with two subcmds, `sub` and `sub-third`, +// and `sub` has a subcommand called `third`, it is undefined which +// help output will be in the file `cmd-sub-third.1`. +func GenReSTTree(cmd *cobra.Command, dir string) error { + emptyStr := func(s string) string { return "" } + return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler) +} + +// GenReSTTreeCustom is the the same as GenReSTTree, but +// with custom filePrepender and linkHandler. +func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".rst" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenReSTCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} + +// adapted from: https://github.com/kr/text/blob/main/indent.go +func indentString(s, p string) string { + var res []byte + b := []byte(s) + prefix := []byte(p) + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return string(res) +} diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.md b/vendor/github.com/spf13/cobra/doc/rest_docs.md new file mode 100644 index 0000000..6098430 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.md @@ -0,0 +1,114 @@ +# Generating ReStructured Text Docs For Your Own cobra.Command + +Generating ReST pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + err := doc.GenReSTTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a ReST document `/tmp/test.rst` + +## Generate ReST docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "log" + "io/ioutil" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenReSTTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate ReST docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenReST` instead of `GenReSTTree` + +```go + out := new(bytes.Buffer) + err := doc.GenReST(cmd, out) + if err != nil { + log.Fatal(err) + } +``` + +This will write the ReST doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenReST` and `GenReSTTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenReSTTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { + //... +} +``` + +```go +func GenReSTCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string, string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered ReST file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered links to the commands, given a command name and reference. This is useful while converting rst to html or while generating documentation with tools like Sphinx where `:ref:` is used: + +```go +// Sphinx cross-referencing format +linkHandler := func(name, ref string) string { + return fmt.Sprintf(":ref:`%s <%s>`", name, ref) +} +``` diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs_test.go b/vendor/github.com/spf13/cobra/doc/rest_docs_test.go new file mode 100644 index 0000000..aa3186e --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/rest_docs_test.go @@ -0,0 +1,76 @@ +package doc + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/spf13/cobra" +) + +func TestGenRSTDoc(t *testing.T) { + // We generate on a subcommand so we have both subcommands and parents + buf := new(bytes.Buffer) + if err := GenReST(echoCmd, buf); err != nil { + t.Fatal(err) + } + output := buf.String() + + checkStringContains(t, output, echoCmd.Long) + checkStringContains(t, output, echoCmd.Example) + checkStringContains(t, output, "boolone") + checkStringContains(t, output, "rootflag") + checkStringContains(t, output, rootCmd.Short) + checkStringContains(t, output, echoSubCmd.Short) + checkStringOmits(t, output, deprecatedCmd.Short) +} + +func TestGenRSTNoTag(t *testing.T) { + rootCmd.DisableAutoGenTag = true + defer func() { rootCmd.DisableAutoGenTag = false }() + + buf := new(bytes.Buffer) + if err := GenReST(rootCmd, buf); err != nil { + t.Fatal(err) + } + output := buf.String() + + unexpected := "Auto generated" + checkStringOmits(t, output, unexpected) +} + +func TestGenRSTTree(t *testing.T) { + c := &cobra.Command{Use: "do [OPTIONS] arg1 arg2"} + + tmpdir, err := ioutil.TempDir("", "test-gen-rst-tree") + if err != nil { + t.Fatalf("Failed to create tmpdir: %s", err.Error()) + } + defer os.RemoveAll(tmpdir) + + if err := GenReSTTree(c, tmpdir); err != nil { + t.Fatalf("GenReSTTree failed: %s", err.Error()) + } + + if _, err := os.Stat(filepath.Join(tmpdir, "do.rst")); err != nil { + t.Fatalf("Expected file 'do.rst' to exist") + } +} + +func BenchmarkGenReSTToFile(b *testing.B) { + file, err := ioutil.TempFile("", "") + if err != nil { + b.Fatal(err) + } + defer os.Remove(file.Name()) + defer file.Close() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := GenReST(rootCmd, file); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go new file mode 100644 index 0000000..8d3dbec --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/util.go @@ -0,0 +1,51 @@ +// Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "strings" + + "github.com/spf13/cobra" +) + +// Test to see if we have a reason to print See Also information in docs +// Basically this is a test for a parent commend or a subcommand which is +// both not deprecated and not the autogenerated help command. +func hasSeeAlso(cmd *cobra.Command) bool { + if cmd.HasParent() { + return true + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + return true + } + return false +} + +// Temporary workaround for yaml lib generating incorrect yaml with long strings +// that do not contain \n. +func forceMultiLine(s string) string { + if len(s) > 60 && !strings.Contains(s, "\n") { + s = s + "\n" + } + return s +} + +type byName []*cobra.Command + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go new file mode 100644 index 0000000..ea00af0 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go @@ -0,0 +1,169 @@ +// Copyright 2016 French Ben. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "gopkg.in/yaml.v2" +) + +type cmdOption struct { + Name string + Shorthand string `yaml:",omitempty"` + DefaultValue string `yaml:"default_value,omitempty"` + Usage string `yaml:",omitempty"` +} + +type cmdDoc struct { + Name string + Synopsis string `yaml:",omitempty"` + Description string `yaml:",omitempty"` + Options []cmdOption `yaml:",omitempty"` + InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` + Example string `yaml:",omitempty"` + SeeAlso []string `yaml:"see_also,omitempty"` +} + +// GenYamlTree creates yaml structured ref files for this command and all descendants +// in the directory given. This function may not work +// correctly if your command names have `-` in them. If you have `cmd` with two +// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` +// it is undefined which help output will be in the file `cmd-sub-third.1`. +func GenYamlTree(cmd *cobra.Command, dir string) error { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + return GenYamlTreeCustom(cmd, dir, emptyStr, identity) +} + +// GenYamlTreeCustom creates yaml structured ref files. +func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenYamlCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} + +// GenYaml creates yaml output. +func GenYaml(cmd *cobra.Command, w io.Writer) error { + return GenYamlCustom(cmd, w, func(s string) string { return s }) +} + +// GenYamlCustom creates custom yaml output. +func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + yamlDoc := cmdDoc{} + yamlDoc.Name = cmd.CommandPath() + + yamlDoc.Synopsis = forceMultiLine(cmd.Short) + yamlDoc.Description = forceMultiLine(cmd.Long) + + if len(cmd.Example) > 0 { + yamlDoc.Example = cmd.Example + } + + flags := cmd.NonInheritedFlags() + if flags.HasFlags() { + yamlDoc.Options = genFlagResult(flags) + } + flags = cmd.InheritedFlags() + if flags.HasFlags() { + yamlDoc.InheritedOptions = genFlagResult(flags) + } + + if hasSeeAlso(cmd) { + result := []string{} + if cmd.HasParent() { + parent := cmd.Parent() + result = append(result, parent.CommandPath()+" - "+parent.Short) + } + children := cmd.Commands() + sort.Sort(byName(children)) + for _, child := range children { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { + continue + } + result = append(result, child.Name()+" - "+child.Short) + } + yamlDoc.SeeAlso = result + } + + final, err := yaml.Marshal(&yamlDoc) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + if _, err := w.Write(final); err != nil { + return err + } + return nil +} + +func genFlagResult(flags *pflag.FlagSet) []cmdOption { + var result []cmdOption + + flags.VisitAll(func(flag *pflag.Flag) { + // Todo, when we mark a shorthand is deprecated, but specify an empty message. + // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. + // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. + if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { + opt := cmdOption{ + flag.Name, + flag.Shorthand, + flag.DefValue, + forceMultiLine(flag.Usage), + } + result = append(result, opt) + } else { + opt := cmdOption{ + Name: flag.Name, + DefaultValue: forceMultiLine(flag.DefValue), + Usage: forceMultiLine(flag.Usage), + } + result = append(result, opt) + } + }) + + return result +} diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.md b/vendor/github.com/spf13/cobra/doc/yaml_docs.md new file mode 100644 index 0000000..1a9b7c6 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.md @@ -0,0 +1,112 @@ +# Generating Yaml Docs For Your Own cobra.Command + +Generating yaml files from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + err := doc.GenYamlTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a Yaml document `/tmp/test.yaml` + +## Generate yaml docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "io/ioutil" + "log" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenYamlTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate yaml docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree` + +```go + out := new(bytes.Buffer) + doc.GenYaml(cmd, out) +``` + +This will write the yaml doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { + //... +} +``` + +```go +func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` + +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: + +```go +linkHandler := func(name string) string { + base := strings.TrimSuffix(name, path.Ext(name)) + return "/commands/" + strings.ToLower(base) + "/" +} +``` diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs_test.go b/vendor/github.com/spf13/cobra/doc/yaml_docs_test.go new file mode 100644 index 0000000..c5a6359 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs_test.go @@ -0,0 +1,74 @@ +package doc + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/spf13/cobra" +) + +func TestGenYamlDoc(t *testing.T) { + // We generate on s subcommand so we have both subcommands and parents + buf := new(bytes.Buffer) + if err := GenYaml(echoCmd, buf); err != nil { + t.Fatal(err) + } + output := buf.String() + + checkStringContains(t, output, echoCmd.Long) + checkStringContains(t, output, echoCmd.Example) + checkStringContains(t, output, "boolone") + checkStringContains(t, output, "rootflag") + checkStringContains(t, output, rootCmd.Short) + checkStringContains(t, output, echoSubCmd.Short) +} + +func TestGenYamlNoTag(t *testing.T) { + rootCmd.DisableAutoGenTag = true + defer func() { rootCmd.DisableAutoGenTag = false }() + + buf := new(bytes.Buffer) + if err := GenYaml(rootCmd, buf); err != nil { + t.Fatal(err) + } + output := buf.String() + + checkStringOmits(t, output, "Auto generated") +} + +func TestGenYamlTree(t *testing.T) { + c := &cobra.Command{Use: "do [OPTIONS] arg1 arg2"} + + tmpdir, err := ioutil.TempDir("", "test-gen-yaml-tree") + if err != nil { + t.Fatalf("Failed to create tmpdir: %s", err.Error()) + } + defer os.RemoveAll(tmpdir) + + if err := GenYamlTree(c, tmpdir); err != nil { + t.Fatalf("GenYamlTree failed: %s", err.Error()) + } + + if _, err := os.Stat(filepath.Join(tmpdir, "do.yaml")); err != nil { + t.Fatalf("Expected file 'do.yaml' to exist") + } +} + +func BenchmarkGenYamlToFile(b *testing.B) { + file, err := ioutil.TempFile("", "") + if err != nil { + b.Fatal(err) + } + defer os.Remove(file.Name()) + defer file.Close() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := GenYaml(rootCmd, file); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go new file mode 100644 index 0000000..889c22e --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -0,0 +1,126 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +// GenZshCompletionFile generates zsh completion file. +func (c *Command) GenZshCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenZshCompletion(outFile) +} + +// GenZshCompletion generates a zsh completion file and writes to the passed writer. +func (c *Command) GenZshCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + + writeHeader(buf, c) + maxDepth := maxDepth(c) + writeLevelMapping(buf, maxDepth) + writeLevelCases(buf, maxDepth, c) + + _, err := buf.WriteTo(w) + return err +} + +func writeHeader(w io.Writer, cmd *Command) { + fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name()) +} + +func maxDepth(c *Command) int { + if len(c.Commands()) == 0 { + return 0 + } + maxDepthSub := 0 + for _, s := range c.Commands() { + subDepth := maxDepth(s) + if subDepth > maxDepthSub { + maxDepthSub = subDepth + } + } + return 1 + maxDepthSub +} + +func writeLevelMapping(w io.Writer, numLevels int) { + fmt.Fprintln(w, `_arguments \`) + for i := 1; i <= numLevels; i++ { + fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i) + fmt.Fprintln(w) + } + fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files") + fmt.Fprintln(w) +} + +func writeLevelCases(w io.Writer, maxDepth int, root *Command) { + fmt.Fprintln(w, "case $state in") + defer fmt.Fprintln(w, "esac") + + for i := 1; i <= maxDepth; i++ { + fmt.Fprintf(w, " level%d)\n", i) + writeLevel(w, root, i) + fmt.Fprintln(w, " ;;") + } + fmt.Fprintln(w, " *)") + fmt.Fprintln(w, " _arguments '*: :_files'") + fmt.Fprintln(w, " ;;") +} + +func writeLevel(w io.Writer, root *Command, i int) { + fmt.Fprintf(w, " case $words[%d] in\n", i) + defer fmt.Fprintln(w, " esac") + + commands := filterByLevel(root, i) + byParent := groupByParent(commands) + + for p, c := range byParent { + names := names(c) + fmt.Fprintf(w, " %s)\n", p) + fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " ")) + fmt.Fprintln(w, " ;;") + } + fmt.Fprintln(w, " *)") + fmt.Fprintln(w, " _arguments '*: :_files'") + fmt.Fprintln(w, " ;;") + +} + +func filterByLevel(c *Command, l int) []*Command { + cs := make([]*Command, 0) + if l == 0 { + cs = append(cs, c) + return cs + } + for _, s := range c.Commands() { + cs = append(cs, filterByLevel(s, l-1)...) + } + return cs +} + +func groupByParent(commands []*Command) map[string][]*Command { + m := make(map[string][]*Command) + for _, c := range commands { + parent := c.Parent() + if parent == nil { + continue + } + m[parent.Name()] = append(m[parent.Name()], c) + } + return m +} + +func names(commands []*Command) []string { + ns := make([]string, len(commands)) + for i, c := range commands { + ns[i] = c.Name() + } + return ns +} diff --git a/vendor/github.com/spf13/cobra/zsh_completions_test.go b/vendor/github.com/spf13/cobra/zsh_completions_test.go new file mode 100644 index 0000000..34e6949 --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions_test.go @@ -0,0 +1,89 @@ +package cobra + +import ( + "bytes" + "strings" + "testing" +) + +func TestZshCompletion(t *testing.T) { + tcs := []struct { + name string + root *Command + expectedExpressions []string + }{ + { + name: "trivial", + root: &Command{Use: "trivialapp"}, + expectedExpressions: []string{"#compdef trivial"}, + }, + { + name: "linear", + root: func() *Command { + r := &Command{Use: "linear"} + + sub1 := &Command{Use: "sub1"} + r.AddCommand(sub1) + + sub2 := &Command{Use: "sub2"} + sub1.AddCommand(sub2) + + sub3 := &Command{Use: "sub3"} + sub2.AddCommand(sub3) + return r + }(), + expectedExpressions: []string{"sub1", "sub2", "sub3"}, + }, + { + name: "flat", + root: func() *Command { + r := &Command{Use: "flat"} + r.AddCommand(&Command{Use: "c1"}) + r.AddCommand(&Command{Use: "c2"}) + return r + }(), + expectedExpressions: []string{"(c1 c2)"}, + }, + { + name: "tree", + root: func() *Command { + r := &Command{Use: "tree"} + + sub1 := &Command{Use: "sub1"} + r.AddCommand(sub1) + + sub11 := &Command{Use: "sub11"} + sub12 := &Command{Use: "sub12"} + + sub1.AddCommand(sub11) + sub1.AddCommand(sub12) + + sub2 := &Command{Use: "sub2"} + r.AddCommand(sub2) + + sub21 := &Command{Use: "sub21"} + sub22 := &Command{Use: "sub22"} + + sub2.AddCommand(sub21) + sub2.AddCommand(sub22) + + return r + }(), + expectedExpressions: []string{"(sub11 sub12)", "(sub21 sub22)"}, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + buf := new(bytes.Buffer) + tc.root.GenZshCompletion(buf) + output := buf.String() + + for _, expectedExpression := range tc.expectedExpressions { + if !strings.Contains(output, expectedExpression) { + t.Errorf("Expected completion to contain %q somewhere; got %q", expectedExpression, output) + } + } + }) + } +} diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore new file mode 100644 index 0000000..c3da290 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.gitignore @@ -0,0 +1,2 @@ +.idea/* + diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml new file mode 100644 index 0000000..f8a63b3 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -0,0 +1,21 @@ +sudo: false + +language: go + +go: + - 1.7.3 + - 1.8.1 + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get github.com/golang/lint/golint + - export PATH=$GOPATH/bin:$PATH + - go install ./... + +script: + - verify/all.sh -v + - go test ./... diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE new file mode 100644 index 0000000..63ed1cf --- /dev/null +++ b/vendor/github.com/spf13/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md new file mode 100644 index 0000000..b052414 --- /dev/null +++ b/vendor/github.com/spf13/pflag/README.md @@ -0,0 +1,296 @@ +[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) +[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) + +## Description + +pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the [GNU extensions to the POSIX recommendations +for command-line options][1]. For a more precise description, see the +"Command-line flag syntax" section below. + +[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +pflag is available under the same style of BSD license as the Go language, +which can be found in the LICENSE file. + +## Installation + +pflag is available using the standard `go get` command. + +Install by running: + + go get github.com/spf13/pflag + +Run tests by running: + + go test github.com/spf13/pflag + +## Usage + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + +``` go +import flag "github.com/spf13/pflag" +``` + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + +``` go +var ip *int = flag.Int("flagname", 1234, "help message for flagname") +``` + +If you like, you can bind the flag to a variable using the Var() functions. + +``` go +var flagvar int +func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") +} +``` + +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + +``` go +flag.Var(&flagVal, "name", "help message for flagname") +``` + +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + +``` go +flag.Parse() +``` + +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + +``` go +fmt.Println("ip has value ", *ip) +fmt.Println("flagvar has value ", flagvar) +``` + +There are helpers function to get values later if you have the FlagSet but +it was difficult to keep up with all of the flag pointers in your code. +If you have a pflag.FlagSet with a flag called 'flagname' of type int you +can use GetInt() to get the int value. But notice that 'flagname' must exist +and it must be an int. GetString("flagname") will fail. + +``` go +i, err := flagset.GetInt("flagname") +``` + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +var flagvar bool +func init() { + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") +} +flag.VarP(&flagVal, "varname", "v", "help message") +``` + +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. + +## Setting no option default values for flags + +After you create a flag it is possible to set the pflag.NoOptDefVal for +the given flag. Doing this changes the meaning of the flag slightly. If +a flag has a NoOptDefVal and the flag is set on the command line without +an option the flag will be set to the NoOptDefVal. For example given: + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +flag.Lookup("flagname").NoOptDefVal = "4321" +``` + +Would result in something like + +| Parsed Arguments | Resulting Value | +| ------------- | ------------- | +| --flagname=1357 | ip=1357 | +| --flagname | ip=4321 | +| [nothing] | ip=1234 | + +## Command line flag syntax + +``` +--flag // boolean flags, or flags with no option default values +--flag x // only on flags without a default value +--flag=x +``` + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags +or a flag with a default value + +``` +// boolean or flags where the 'no option default value' is set +-f +-f=true +-abc +but +-b true is INVALID + +// non-boolean and flags without a 'no option default value' +-n 1234 +-n=1234 +-n1234 + +// mixed +-abcs "hello" +-absd="hello" +-abcs1234 +``` + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +## Mutating or "Normalizing" Flag names + +It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. + +**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag + +``` go +func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + from := []string{"-", "_"} + to := "." + for _, sep := range from { + name = strings.Replace(name, sep, to, -1) + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) +``` + +**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name + +``` go +func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "old-flag-name": + name = "new-flag-name" + break + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) +``` + +## Deprecating a flag or its shorthand +It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. + +**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. +```go +// deprecate a flag by specifying its name and a usage message +flags.MarkDeprecated("badflag", "please use --good-flag instead") +``` +This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. + +**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". +```go +// deprecate a flag shorthand by specifying its flag name and a usage message +flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") +``` +This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. + +Note that usage message is essential here, and it should not be empty. + +## Hidden flags +It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. + +**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. +```go +// hide a flag by specifying its name +flags.MarkHidden("secretFlag") +``` + +## Disable sorting of flags +`pflag` allows you to disable sorting of flags for help and usage message. + +**Example**: +```go +flags.BoolP("verbose", "v", false, "verbose output") +flags.String("coolflag", "yeaah", "it's really cool flag") +flags.Int("usefulflag", 777, "sometimes it's very useful") +flags.SortFlags = false +flags.PrintDefaults() +``` +**Output**: +``` + -v, --verbose verbose output + --coolflag string it's really cool flag (default "yeaah") + --usefulflag int sometimes it's very useful (default 777) +``` + + +## Supporting Go flags when using pflag +In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary +to support flags defined by third-party dependencies (e.g. `golang/glog`). + +**Example**: You want to add the Go flags to the `CommandLine` flagset +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.Parse() +} +``` + +## More info + +You can see the full reference documentation of the pflag package +[at godoc.org][3], or through go's standard documentation system by +running `godoc -http=:6060` and browsing to +[http://localhost:6060/pkg/github.com/spf13/pflag][2] after +installation. + +[2]: http://localhost:6060/pkg/github.com/spf13/pflag +[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go new file mode 100644 index 0000000..c4c5c0b --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool.go @@ -0,0 +1,94 @@ +package pflag + +import "strconv" + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Type() string { + return "bool" +} + +func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +func boolConv(sval string) (interface{}, error) { + return strconv.ParseBool(sval) +} + +// GetBool return the bool value of a flag with the given name +func (f *FlagSet) GetBool(name string) (bool, error) { + val, err := f.getFlagType(name, "bool", boolConv) + if err != nil { + return false, err + } + return val.(bool), nil +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + return f.BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, shorthand, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func BoolP(name, shorthand string, value bool, usage string) *bool { + b := CommandLine.BoolP(name, shorthand, value, usage) + return b +} diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go new file mode 100644 index 0000000..5af02f1 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_slice.go @@ -0,0 +1,147 @@ +package pflag + +import ( + "io" + "strconv" + "strings" +) + +// -- boolSlice Value +type boolSliceValue struct { + value *[]bool + changed bool +} + +func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { + bsv := new(boolSliceValue) + bsv.value = p + *bsv.value = val + return bsv +} + +// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. +// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. +func (s *boolSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse boolean values into slice + out := make([]bool, 0, len(boolStrSlice)) + for _, boolStr := range boolStrSlice { + b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) + if err != nil { + return err + } + out = append(out, b) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *boolSliceValue) Type() string { + return "boolSlice" +} + +// String defines a "native" format for this boolean slice flag value. +func (s *boolSliceValue) String() string { + + boolStrSlice := make([]string, len(*s.value)) + for i, b := range *s.value { + boolStrSlice[i] = strconv.FormatBool(b) + } + + out, _ := writeAsCSV(boolStrSlice) + + return "[" + out + "]" +} + +func boolSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []bool{}, nil + } + ss := strings.Split(val, ",") + out := make([]bool, len(ss)) + for i, t := range ss { + var err error + out[i], err = strconv.ParseBool(t) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetBoolSlice returns the []bool value of a flag with the given name. +func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { + val, err := f.getFlagType(name, "boolSlice", boolSliceConv) + if err != nil { + return []bool{}, err + } + return val.([]bool), nil +} + +// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. +// The argument p points to a []bool variable in which to store the value of the flag. +func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { + f.VarP(newBoolSliceValue(value, p), name, "", usage) +} + +// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { + f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) +} + +// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. +// The argument p points to a []bool variable in which to store the value of the flag. +func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { + CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) +} + +// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. +func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { + CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) +} + +// BoolSlice defines a []bool flag with specified name, default value, and usage string. +// The return value is the address of a []bool variable that stores the value of the flag. +func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { + p := []bool{} + f.BoolSliceVarP(&p, name, "", value, usage) + return &p +} + +// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { + p := []bool{} + f.BoolSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// BoolSlice defines a []bool flag with specified name, default value, and usage string. +// The return value is the address of a []bool variable that stores the value of the flag. +func BoolSlice(name string, value []bool, usage string) *[]bool { + return CommandLine.BoolSliceP(name, "", value, usage) +} + +// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. +func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { + return CommandLine.BoolSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/bool_slice_test.go b/vendor/github.com/spf13/pflag/bool_slice_test.go new file mode 100644 index 0000000..b617dd2 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_slice_test.go @@ -0,0 +1,215 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpBSFlagSet(bsp *[]bool) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.BoolSliceVar(bsp, "bs", []bool{}, "Command separated list!") + return f +} + +func setUpBSFlagSetWithDefault(bsp *[]bool) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.BoolSliceVar(bsp, "bs", []bool{false, true}, "Command separated list!") + return f +} + +func TestEmptyBS(t *testing.T) { + var bs []bool + f := setUpBSFlagSet(&bs) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getBS, err := f.GetBoolSlice("bs") + if err != nil { + t.Fatal("got an error from GetBoolSlice():", err) + } + if len(getBS) != 0 { + t.Fatalf("got bs %v with len=%d but expected length=0", getBS, len(getBS)) + } +} + +func TestBS(t *testing.T) { + var bs []bool + f := setUpBSFlagSet(&bs) + + vals := []string{"1", "F", "TRUE", "0"} + arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range bs { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected is[%d] to be %s but got: %t", i, vals[i], v) + } + } + getBS, err := f.GetBoolSlice("bs") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getBS { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %s but got: %t from GetBoolSlice", i, vals[i], v) + } + } +} + +func TestBSDefault(t *testing.T) { + var bs []bool + f := setUpBSFlagSetWithDefault(&bs) + + vals := []string{"false", "T"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range bs { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) + } + } + + getBS, err := f.GetBoolSlice("bs") + if err != nil { + t.Fatal("got an error from GetBoolSlice():", err) + } + for i, v := range getBS { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatal("got an error from GetBoolSlice():", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) + } + } +} + +func TestBSWithDefault(t *testing.T) { + var bs []bool + f := setUpBSFlagSetWithDefault(&bs) + + vals := []string{"FALSE", "1"} + arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range bs { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %t but got: %t", i, b, v) + } + } + + getBS, err := f.GetBoolSlice("bs") + if err != nil { + t.Fatal("got an error from GetBoolSlice():", err) + } + for i, v := range getBS { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) + } + } +} + +func TestBSCalledTwice(t *testing.T) { + var bs []bool + f := setUpBSFlagSet(&bs) + + in := []string{"T,F", "T"} + expected := []bool{true, false, true} + argfmt := "--bs=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range bs { + if expected[i] != v { + t.Fatalf("expected bs[%d] to be %t but got %t", i, expected[i], v) + } + } +} + +func TestBSBadQuoting(t *testing.T) { + + tests := []struct { + Want []bool + FlagArg []string + }{ + { + Want: []bool{true, false, true}, + FlagArg: []string{"1", "0", "true"}, + }, + { + Want: []bool{true, false}, + FlagArg: []string{"True", "F"}, + }, + { + Want: []bool{true, false}, + FlagArg: []string{"T", "0"}, + }, + { + Want: []bool{true, false}, + FlagArg: []string{"1", "0"}, + }, + { + Want: []bool{true, false, false}, + FlagArg: []string{"true,false", "false"}, + }, + { + Want: []bool{true, false, false, true, false, true, false}, + FlagArg: []string{`"true,false,false,1,0, T"`, " false "}, + }, + { + Want: []bool{false, false, true, false, true, false, true}, + FlagArg: []string{`"0, False, T,false , true,F"`, "true"}, + }, + } + + for i, test := range tests { + + var bs []bool + f := setUpBSFlagSet(&bs) + + if err := f.Parse([]string{fmt.Sprintf("--bs=%s", strings.Join(test.FlagArg, ","))}); err != nil { + t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%#v", + err, test.FlagArg, test.Want[i]) + } + + for j, b := range bs { + if b != test.Want[j] { + t.Fatalf("bad value parsed for test %d on bool %d:\nwant:\t%t\ngot:\t%t", i, j, test.Want[j], b) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/bool_test.go b/vendor/github.com/spf13/pflag/bool_test.go new file mode 100644 index 0000000..a4319e7 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_test.go @@ -0,0 +1,179 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "bytes" + "strconv" + "testing" +) + +// This value can be a boolean ("true", "false") or "maybe" +type triStateValue int + +const ( + triStateFalse triStateValue = 0 + triStateTrue triStateValue = 1 + triStateMaybe triStateValue = 2 +) + +const strTriStateMaybe = "maybe" + +func (v *triStateValue) IsBoolFlag() bool { + return true +} + +func (v *triStateValue) Get() interface{} { + return triStateValue(*v) +} + +func (v *triStateValue) Set(s string) error { + if s == strTriStateMaybe { + *v = triStateMaybe + return nil + } + boolVal, err := strconv.ParseBool(s) + if boolVal { + *v = triStateTrue + } else { + *v = triStateFalse + } + return err +} + +func (v *triStateValue) String() string { + if *v == triStateMaybe { + return strTriStateMaybe + } + return strconv.FormatBool(*v == triStateTrue) +} + +// The type of the flag as required by the pflag.Value interface +func (v *triStateValue) Type() string { + return "version" +} + +func setUpFlagSet(tristate *triStateValue) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + *tristate = triStateFalse + flag := f.VarPF(tristate, "tristate", "t", "tristate value (true, maybe or false)") + flag.NoOptDefVal = "true" + return f +} + +func TestExplicitTrue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=true"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestImplicitTrue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestShortFlag(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"-t"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestShortFlagExtraArgument(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + // The"maybe"turns into an arg, since short boolean options will only do true/false + err := f.Parse([]string{"-t", "maybe"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } + args := f.Args() + if len(args) != 1 || args[0] != "maybe" { + t.Fatal("expected an extra 'maybe' argument to stick around") + } +} + +func TestExplicitMaybe(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=maybe"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateMaybe { + t.Fatal("expected", triStateMaybe, "(triStateMaybe) but got", tristate, "instead") + } +} + +func TestExplicitFalse(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=false"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateFalse { + t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") + } +} + +func TestImplicitFalse(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateFalse { + t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") + } +} + +func TestInvalidValue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + var buf bytes.Buffer + f.SetOutput(&buf) + err := f.Parse([]string{"--tristate=invalid"}) + if err == nil { + t.Fatal("expected an error but did not get any, tristate has value", tristate) + } +} + +func TestBoolP(t *testing.T) { + b := BoolP("bool", "b", false, "bool value in CommandLine") + c := BoolP("c", "c", false, "other bool value") + args := []string{"--bool"} + if err := CommandLine.Parse(args); err != nil { + t.Error("expected no error, got ", err) + } + if *b != true { + t.Errorf("expected b=true got b=%v", *b) + } + if *c != false { + t.Errorf("expect c=false got c=%v", *c) + } +} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go new file mode 100644 index 0000000..250a438 --- /dev/null +++ b/vendor/github.com/spf13/pflag/count.go @@ -0,0 +1,96 @@ +package pflag + +import "strconv" + +// -- count Value +type countValue int + +func newCountValue(val int, p *int) *countValue { + *p = val + return (*countValue)(p) +} + +func (i *countValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + // -1 means that no specific value was passed, so increment + if v == -1 { + *i = countValue(*i + 1) + } else { + *i = countValue(v) + } + return err +} + +func (i *countValue) Type() string { + return "count" +} + +func (i *countValue) String() string { return strconv.Itoa(int(*i)) } + +func countConv(sval string) (interface{}, error) { + i, err := strconv.Atoi(sval) + if err != nil { + return nil, err + } + return i, nil +} + +// GetCount return the int value of a flag with the given name +func (f *FlagSet) GetCount(name string) (int, error) { + val, err := f.getFlagType(name, "count", countConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// CountVar defines a count flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) CountVar(p *int, name string, usage string) { + f.CountVarP(p, name, "", usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { + flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) + flag.NoOptDefVal = "-1" +} + +// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set +func CountVar(p *int, name string, usage string) { + CommandLine.CountVar(p, name, usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func CountVarP(p *int, name, shorthand string, usage string) { + CommandLine.CountVarP(p, name, shorthand, usage) +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) Count(name string, usage string) *int { + p := new(int) + f.CountVarP(p, name, "", usage) + return p +} + +// CountP is like Count only takes a shorthand for the flag name. +func (f *FlagSet) CountP(name, shorthand string, usage string) *int { + p := new(int) + f.CountVarP(p, name, shorthand, usage) + return p +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func Count(name string, usage string) *int { + return CommandLine.CountP(name, "", usage) +} + +// CountP is like Count only takes a shorthand for the flag name. +func CountP(name, shorthand string, usage string) *int { + return CommandLine.CountP(name, shorthand, usage) +} diff --git a/vendor/github.com/spf13/pflag/count_test.go b/vendor/github.com/spf13/pflag/count_test.go new file mode 100644 index 0000000..460d96a --- /dev/null +++ b/vendor/github.com/spf13/pflag/count_test.go @@ -0,0 +1,52 @@ +package pflag + +import ( + "os" + "testing" +) + +func setUpCount(c *int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.CountVarP(c, "verbose", "v", "a counter") + return f +} + +func TestCount(t *testing.T) { + testCases := []struct { + input []string + success bool + expected int + }{ + {[]string{"-vvv"}, true, 3}, + {[]string{"-v", "-v", "-v"}, true, 3}, + {[]string{"-v", "--verbose", "-v"}, true, 3}, + {[]string{"-v=3", "-v"}, true, 4}, + {[]string{"-v=a"}, false, 0}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var count int + f := setUpCount(&count) + + tc := &testCases[i] + + err := f.Parse(tc.input) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure, got success") + continue + } else if tc.success { + c, err := f.GetCount("verbose") + if err != nil { + t.Errorf("Got error trying to fetch the counter flag") + } + if c != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, c) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go new file mode 100644 index 0000000..e9debef --- /dev/null +++ b/vendor/github.com/spf13/pflag/duration.go @@ -0,0 +1,86 @@ +package pflag + +import ( + "time" +) + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Type() string { + return "duration" +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +func durationConv(sval string) (interface{}, error) { + return time.ParseDuration(sval) +} + +// GetDuration return the duration value of a flag with the given name +func (f *FlagSet) GetDuration(name string) (time.Duration, error) { + val, err := f.getFlagType(name, "duration", durationConv) + if err != nil { + return 0, err + } + return val.(time.Duration), nil +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, "", value, usage) + return p +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, shorthand, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, "", value, usage) +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/example_test.go b/vendor/github.com/spf13/pflag/example_test.go new file mode 100644 index 0000000..abd7806 --- /dev/null +++ b/vendor/github.com/spf13/pflag/example_test.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag_test + +import ( + "fmt" + + "github.com/spf13/pflag" +) + +func ExampleShorthandLookup() { + name := "verbose" + short := name[:1] + + pflag.BoolP(name, short, false, "verbose output") + + // len(short) must be == 1 + flag := pflag.ShorthandLookup(short) + + fmt.Println(flag.Name) +} + +func ExampleFlagSet_ShorthandLookup() { + name := "verbose" + short := name[:1] + + fs := pflag.NewFlagSet("Example", pflag.ContinueOnError) + fs.BoolP(name, short, false, "verbose output") + + // len(short) must be == 1 + flag := fs.ShorthandLookup(short) + + fmt.Println(flag.Name) +} diff --git a/vendor/github.com/spf13/pflag/export_test.go b/vendor/github.com/spf13/pflag/export_test.go new file mode 100644 index 0000000..9318fee --- /dev/null +++ b/vendor/github.com/spf13/pflag/export_test.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "io/ioutil" + "os" +) + +// Additional routines compiled into the package only during testing. + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = &FlagSet{ + name: os.Args[0], + errorHandling: ContinueOnError, + output: ioutil.Discard, + } + Usage = usage +} + +// GetCommandLine returns the default FlagSet. +func GetCommandLine() *FlagSet { + return CommandLine +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go new file mode 100644 index 0000000..6f1fc30 --- /dev/null +++ b/vendor/github.com/spf13/pflag/flag.go @@ -0,0 +1,1128 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the GNU extensions to the POSIX recommendations +for command-line options. See +http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +Usage: + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + + import flag "github.com/spf13/pflag" + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") +If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + flag.Parse() +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP("boolname", "b", true, "help message") + } + flag.VarP(&flagVar, "varname", "v", 1234, "help message") +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +Command line flag syntax: + --flag // boolean flags only + --flag=x + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. +*/ +package pflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("pflag: help requested") + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + // ContinueOnError will return an err from Parse() if an error is found + ContinueOnError ErrorHandling = iota + // ExitOnError will call os.Exit(2) if an error is found when parsing + ExitOnError + // PanicOnError will panic() if an error is found when parsing flags + PanicOnError +) + +// NormalizedName is a flag name that has been normalized according to rules +// for the FlagSet (e.g. making '-' and '_' equivalent). +type NormalizedName string + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + // SortFlags is used to indicate, if user wants to have sorted flags in + // help/usage messages. + SortFlags bool + + name string + parsed bool + actual map[NormalizedName]*Flag + orderedActual []*Flag + sortedActual []*Flag + formal map[NormalizedName]*Flag + orderedFormal []*Flag + sortedFormal []*Flag + shorthands map[byte]*Flag + args []string // arguments after flags + argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor + interspersed bool // allow interspersed option/non-option args + normalizeNameFunc func(f *FlagSet, name string) NormalizedName +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + NoOptDefVal string // default value (as text); if the flag is on the command line without any options + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text + ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code +} + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Type() string +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[NormalizedName]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for k := range flags { + list[i] = string(k) + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[NormalizedName(name)] + } + return result +} + +// SetNormalizeFunc allows you to add a function which can translate flag names. +// Flags added to the FlagSet will be translated and then when anything tries to +// look up the flag that will also be translated. So it would be possible to create +// a flag named "getURL" and have it translated to "geturl". A user could then pass +// "--getUrl" which may also be translated to "geturl" and everything will work. +func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { + f.normalizeNameFunc = n + f.sortedFormal = f.sortedFormal[:0] + for k, v := range f.orderedFormal { + delete(f.formal, NormalizedName(v.Name)) + nname := f.normalizeFlagName(v.Name) + v.Name = string(nname) + f.formal[nname] = v + f.orderedFormal[k] = v + } +} + +// GetNormalizeFunc returns the previously set NormalizeFunc of a function which +// does no translation, if not set previously. +func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { + if f.normalizeNameFunc != nil { + return f.normalizeNameFunc + } + return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } +} + +func (f *FlagSet) normalizeFlagName(name string) NormalizedName { + n := f.GetNormalizeFunc() + return n(f, name) +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + if len(f.formal) == 0 { + return + } + + var flags []*Flag + if f.SortFlags { + if len(f.formal) != len(f.sortedFormal) { + f.sortedFormal = sortFlags(f.formal) + } + flags = f.sortedFormal + } else { + flags = f.orderedFormal + } + + for _, flag := range flags { + fn(flag) + } +} + +// HasFlags returns a bool to indicate if the FlagSet has any flags definied. +func (f *FlagSet) HasFlags() bool { + return len(f.formal) > 0 +} + +// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags +// definied that are not hidden or deprecated. +func (f *FlagSet) HasAvailableFlags() bool { + for _, flag := range f.formal { + if !flag.Hidden && len(flag.Deprecated) == 0 { + return true + } + } + return false +} + +// VisitAll visits the command-line flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + if len(f.actual) == 0 { + return + } + + var flags []*Flag + if f.SortFlags { + if len(f.actual) != len(f.sortedActual) { + f.sortedActual = sortFlags(f.actual) + } + flags = f.sortedActual + } else { + flags = f.orderedActual + } + + for _, flag := range flags { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.lookup(f.normalizeFlagName(name)) +} + +// ShorthandLookup returns the Flag structure of the short handed flag, +// returning nil if none exists. +// It panics, if len(name) > 1. +func (f *FlagSet) ShorthandLookup(name string) *Flag { + if name == "" { + return nil + } + if len(name) > 1 { + msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) + fmt.Fprintf(f.out(), msg) + panic(msg) + } + c := name[0] + return f.shorthands[c] +} + +// lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) lookup(name NormalizedName) *Flag { + return f.formal[name] +} + +// func to return a given type for a given flag name +func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return nil, err + } + + if flag.Value.Type() != ftype { + err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) + return nil, err + } + + sval := flag.Value.String() + result, err := convFunc(sval) + if err != nil { + return nil, err + } + return result, nil +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. +func (f *FlagSet) ArgsLenAtDash() int { + return f.argsLenAtDash +} + +// MarkDeprecated indicated that a flag is deprecated in your program. It will +// continue to function but will not show up in help or usage messages. Using +// this flag will also print the given usageMessage. +func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if usageMessage == "" { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.Deprecated = usageMessage + return nil +} + +// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your +// program. It will continue to function but will not show up in help or usage +// messages. Using this flag will also print the given usageMessage. +func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if usageMessage == "" { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.ShorthandDeprecated = usageMessage + return nil +} + +// MarkHidden sets a flag to 'hidden' in your program. It will continue to +// function but will not show up in help or usage messages. +func (f *FlagSet) MarkHidden(name string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Hidden = true + return nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.Lookup(name) +} + +// ShorthandLookup returns the Flag structure of the short handed flag, +// returning nil if none exists. +func ShorthandLookup(name string) *Flag { + return CommandLine.ShorthandLookup(name) +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + + err := flag.Value.Set(value) + if err != nil { + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) + } + + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + f.orderedActual = append(f.orderedActual, flag) + + flag.Changed = true + + if flag.Deprecated != "" { + fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. +// This is sometimes used by spf13/cobra programs which want to generate additional +// bash completion information. +func (f *FlagSet) SetAnnotation(name, key string, values []string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[key] = values + return nil +} + +// Changed returns true if the flag was explicitly set during Parse() and false +// otherwise +func (f *FlagSet) Changed(name string) bool { + flag := f.Lookup(name) + // If a flag doesn't exist, it wasn't changed.... + if flag == nil { + return false + } + return flag.Changed +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + usages := f.FlagUsages() + fmt.Fprint(f.out(), usages) +} + +// defaultIsZeroValue returns true if the default value for this flag represents +// a zero value. +func (f *Flag) defaultIsZeroValue() bool { + switch f.Value.(type) { + case boolFlag: + return f.DefValue == "false" + case *durationValue: + // Beginning in Go 1.7, duration zero values are "0s" + return f.DefValue == "0" || f.DefValue == "0s" + case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: + return f.DefValue == "0" + case *stringValue: + return f.DefValue == "" + case *ipValue, *ipMaskValue, *ipNetValue: + return f.DefValue == "" + case *intSliceValue, *stringSliceValue, *stringArrayValue: + return f.DefValue == "[]" + default: + switch f.Value.String() { + case "false": + return true + case "": + return true + case "": + return true + case "0": + return true + } + return false + } +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + + name = flag.Value.Type() + switch name { + case "bool": + name = "" + case "float64": + name = "float" + case "int64": + name = "int" + case "uint64": + name = "uint" + } + + return +} + +// Splits the string `s` on whitespace into an initial substring up to +// `i` runes in length and the remainder. Will go `slop` over `i` if +// that encompasses the entire string (which allows the caller to +// avoid short orphan words on the final line). +func wrapN(i, slop int, s string) (string, string) { + if i+slop > len(s) { + return s, "" + } + + w := strings.LastIndexAny(s[:i], " \t") + if w <= 0 { + return s, "" + } + + return s[:w], s[w+1:] +} + +// Wraps the string `s` to a maximum width `w` with leading indent +// `i`. The first line is not indented (this is assumed to be done by +// caller). Pass `w` == 0 to do no wrapping +func wrap(i, w int, s string) string { + if w == 0 { + return s + } + + // space between indent i and end of line width w into which + // we should wrap the text. + wrap := w - i + + var r, l string + + // Not enough space for sensible wrapping. Wrap as a block on + // the next line instead. + if wrap < 24 { + i = 16 + wrap = w - i + r += "\n" + strings.Repeat(" ", i) + } + // If still not enough space then don't even try to wrap. + if wrap < 24 { + return s + } + + // Try to avoid short orphan words on the final line, by + // allowing wrapN to go a bit over if that would fit in the + // remainder of the line. + slop := 5 + wrap = wrap - slop + + // Handle first line, which is indented by the caller (or the + // special case above) + l, s = wrapN(wrap, slop, s) + r = r + l + + // Now wrap the rest + for s != "" { + var t string + + t, s = wrapN(wrap, slop, s) + r = r + "\n" + strings.Repeat(" ", i) + t + } + + return r + +} + +// FlagUsagesWrapped returns a string containing the usage information +// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no +// wrapping) +func (f *FlagSet) FlagUsagesWrapped(cols int) string { + buf := new(bytes.Buffer) + + lines := make([]string, 0, len(f.formal)) + + maxlen := 0 + f.VisitAll(func(flag *Flag) { + if flag.Deprecated != "" || flag.Hidden { + return + } + + line := "" + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) + } else { + line = fmt.Sprintf(" --%s", flag.Name) + } + + varname, usage := UnquoteUsage(flag) + if varname != "" { + line += " " + varname + } + if flag.NoOptDefVal != "" { + switch flag.Value.Type() { + case "string": + line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) + case "bool": + if flag.NoOptDefVal != "true" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + default: + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + } + + // This special character will be replaced with spacing once the + // correct alignment is calculated + line += "\x00" + if len(line) > maxlen { + maxlen = len(line) + } + + line += usage + if !flag.defaultIsZeroValue() { + if flag.Value.Type() == "string" { + line += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + line += fmt.Sprintf(" (default %s)", flag.DefValue) + } + } + + lines = append(lines, line) + }) + + for _, line := range lines { + sidx := strings.Index(line, "\x00") + spacing := strings.Repeat(" ", maxlen-sidx) + // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx + fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) + } + + return buf.String() +} + +// FlagUsages returns a string containing the usage information for all flags in +// the FlagSet +func (f *FlagSet) FlagUsages() string { + return f.FlagUsagesWrapped(0) +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + f.VarP(value, name, "", usage) +} + +// VarPF is like VarP, but returns the flag created +func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: name, + Shorthand: shorthand, + Usage: usage, + Value: value, + DefValue: value.String(), + } + f.AddFlag(flag) + return flag +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { + f.VarPF(value, name, shorthand, usage) +} + +// AddFlag will add the flag to the FlagSet +func (f *FlagSet) AddFlag(flag *Flag) { + normalizedFlagName := f.normalizeFlagName(flag.Name) + + _, alreadyThere := f.formal[normalizedFlagName] + if alreadyThere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[NormalizedName]*Flag) + } + + flag.Name = string(normalizedFlagName) + f.formal[normalizedFlagName] = flag + f.orderedFormal = append(f.orderedFormal, flag) + + if flag.Shorthand == "" { + return + } + if len(flag.Shorthand) > 1 { + msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) + fmt.Fprintf(f.out(), msg) + panic(msg) + } + if f.shorthands == nil { + f.shorthands = make(map[byte]*Flag) + } + c := flag.Shorthand[0] + used, alreadyThere := f.shorthands[c] + if alreadyThere { + msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) + fmt.Fprintf(f.out(), msg) + panic(msg) + } + f.shorthands[c] = flag +} + +// AddFlagSet adds one FlagSet to another. If a flag is already present in f +// the flag from newSet will be ignored. +func (f *FlagSet) AddFlagSet(newSet *FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(flag *Flag) { + if f.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.VarP(value, name, "", usage) +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func VarP(value Value, name, shorthand, usage string) { + CommandLine.VarP(value, name, shorthand, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { + a = args + name := s[2:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + err = f.failf("bad flag syntax: %s", s) + return + } + + split := strings.SplitN(name, "=", 2) + name = split[0] + flag, exists := f.formal[f.normalizeFlagName(name)] + if !exists { + if name == "help" { // special case for nice help message. + f.usage() + return a, ErrHelp + } + err = f.failf("unknown flag: --%s", name) + return + } + + var value string + if len(split) == 2 { + // '--flag=arg' + value = split[1] + } else if flag.NoOptDefVal != "" { + // '--flag' (arg was optional) + value = flag.NoOptDefVal + } else if len(a) > 0 { + // '--flag arg' + value = a[0] + a = a[1:] + } else { + // '--flag' (arg was required) + err = f.failf("flag needs an argument: %s", s) + return + } + + err = fn(flag, value) + return +} + +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { + if strings.HasPrefix(shorthands, "test.") { + return + } + + outArgs = args + outShorts = shorthands[1:] + c := shorthands[0] + + flag, exists := f.shorthands[c] + if !exists { + if c == 'h' { // special case for nice help message. + f.usage() + err = ErrHelp + return + } + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return + } + + var value string + if len(shorthands) > 2 && shorthands[1] == '=' { + // '-f=arg' + value = shorthands[2:] + outShorts = "" + } else if flag.NoOptDefVal != "" { + // '-f' (arg was optional) + value = flag.NoOptDefVal + } else if len(shorthands) > 1 { + // '-farg' + value = shorthands[1:] + outShorts = "" + } else if len(args) > 0 { + // '-f arg' + value = args[0] + outArgs = args[1:] + } else { + // '-f' (arg was required) + err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + return + } + + if flag.ShorthandDeprecated != "" { + fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + } + + err = fn(flag, value) + return +} + +func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { + a = args + shorthands := s[1:] + + // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). + for len(shorthands) > 0 { + shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) + if err != nil { + return + } + } + + return +} + +func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { + for len(args) > 0 { + s := args[0] + args = args[1:] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + if !f.interspersed { + f.args = append(f.args, s) + f.args = append(f.args, args...) + return nil + } + f.args = append(f.args, s) + continue + } + + if s[1] == '-' { + if len(s) == 2 { // "--" terminates the flags + f.argsLenAtDash = len(f.args) + f.args = append(f.args, args...) + break + } + args, err = f.parseLongArg(s, args, fn) + } else { + args, err = f.parseShortArg(s, args, fn) + } + if err != nil { + return + } + } + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + + if len(arguments) < 0 { + return nil + } + + f.args = make([]string, 0, len(arguments)) + + set := func(flag *Flag, value string) error { + return f.Set(flag.Name, value) + } + + err := f.parseArgs(arguments, set) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +type parseFunc func(flag *Flag, value string) error + +// ParseAll parses flag definitions from the argument list, which should not +// include the command name. The arguments for fn are flag and value. Must be +// called after all flags in the FlagSet are defined and before flags are +// accessed by the program. The return value will be ErrHelp if -help was set +// but not defined. +func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + + err := f.parseArgs(arguments, fn) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. +// The arguments for fn are flag and value. Must be called after all flags are +// defined and before flags are accessed by the program. +func ParseAll(fn func(flag *Flag, value string) error) { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.ParseAll(os.Args[1:], fn) +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func SetInterspersed(interspersed bool) { + CommandLine.SetInterspersed(interspersed) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name, +// error handling property and SortFlags set to true. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + argsLenAtDash: -1, + interspersed: true, + SortFlags: true, + } + return f +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func (f *FlagSet) SetInterspersed(interspersed bool) { + f.interspersed = interspersed +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling + f.argsLenAtDash = -1 +} diff --git a/vendor/github.com/spf13/pflag/flag_test.go b/vendor/github.com/spf13/pflag/flag_test.go new file mode 100644 index 0000000..c3def0f --- /dev/null +++ b/vendor/github.com/spf13/pflag/flag_test.go @@ -0,0 +1,1085 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +var ( + testBool = Bool("test_bool", false, "bool value") + testInt = Int("test_int", 0, "int value") + testInt64 = Int64("test_int64", 0, "int64 value") + testUint = Uint("test_uint", 0, "uint value") + testUint64 = Uint64("test_uint64", 0, "uint64 value") + testString = String("test_string", "0", "string value") + testFloat = Float64("test_float64", 0, "float64 value") + testDuration = Duration("test_duration", 0, "time.Duration value") + testOptionalInt = Int("test_optional_int", 0, "optional int value") + normalizeFlagNameInvocations = 0 +) + +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + if len(f.Name) > 5 && f.Name[0:5] == "test_" { + m[f.Name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case f.Name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case f.Name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", f.Name) + } + } + } + VisitAll(visitor) + if len(m) != 9 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + Set("test_optional_int", "1") + desired = "1" + Visit(visitor) + if len(m) != 9 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + if GetCommandLine().Parse([]string{"--x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if !called { + t.Error("did not call Usage for unknown flag") + } +} + +func TestAddFlagSet(t *testing.T) { + oldSet := NewFlagSet("old", ContinueOnError) + newSet := NewFlagSet("new", ContinueOnError) + + oldSet.String("flag1", "flag1", "flag1") + oldSet.String("flag2", "flag2", "flag2") + + newSet.String("flag2", "flag2", "flag2") + newSet.String("flag3", "flag3", "flag3") + + oldSet.AddFlagSet(newSet) + + if len(oldSet.formal) != 3 { + t.Errorf("Unexpected result adding a FlagSet to a FlagSet %v", oldSet) + } +} + +func TestAnnotation(t *testing.T) { + f := NewFlagSet("shorthand", ContinueOnError) + + if err := f.SetAnnotation("missing-flag", "key", nil); err == nil { + t.Errorf("Expected error setting annotation on non-existent flag") + } + + f.StringP("stringa", "a", "", "string value") + if err := f.SetAnnotation("stringa", "key", nil); err != nil { + t.Errorf("Unexpected error setting new nil annotation: %v", err) + } + if annotation := f.Lookup("stringa").Annotations["key"]; annotation != nil { + t.Errorf("Unexpected annotation: %v", annotation) + } + + f.StringP("stringb", "b", "", "string2 value") + if err := f.SetAnnotation("stringb", "key", []string{"value1"}); err != nil { + t.Errorf("Unexpected error setting new annotation: %v", err) + } + if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value1"}) { + t.Errorf("Unexpected annotation: %v", annotation) + } + + if err := f.SetAnnotation("stringb", "key", []string{"value2"}); err != nil { + t.Errorf("Unexpected error updating annotation: %v", err) + } + if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value2"}) { + t.Errorf("Unexpected annotation: %v", annotation) + } +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool("bool", false, "bool value") + bool2Flag := f.Bool("bool2", false, "bool2 value") + bool3Flag := f.Bool("bool3", false, "bool3 value") + intFlag := f.Int("int", 0, "int value") + int8Flag := f.Int8("int8", 0, "int value") + int32Flag := f.Int32("int32", 0, "int value") + int64Flag := f.Int64("int64", 0, "int64 value") + uintFlag := f.Uint("uint", 0, "uint value") + uint8Flag := f.Uint8("uint8", 0, "uint value") + uint16Flag := f.Uint16("uint16", 0, "uint value") + uint32Flag := f.Uint32("uint32", 0, "uint value") + uint64Flag := f.Uint64("uint64", 0, "uint64 value") + stringFlag := f.String("string", "0", "string value") + float32Flag := f.Float32("float32", 0, "float32 value") + float64Flag := f.Float64("float64", 0, "float64 value") + ipFlag := f.IP("ip", net.ParseIP("127.0.0.1"), "ip value") + maskFlag := f.IPMask("mask", ParseIPv4Mask("0.0.0.0"), "mask value") + durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value") + optionalIntNoValueFlag := f.Int("optional-int-no-value", 0, "int value") + f.Lookup("optional-int-no-value").NoOptDefVal = "9" + optionalIntWithValueFlag := f.Int("optional-int-with-value", 0, "int value") + f.Lookup("optional-int-no-value").NoOptDefVal = "9" + extra := "one-extra-argument" + args := []string{ + "--bool", + "--bool2=true", + "--bool3=false", + "--int=22", + "--int8=-8", + "--int32=-32", + "--int64=0x23", + "--uint", "24", + "--uint8=8", + "--uint16=16", + "--uint32=32", + "--uint64=25", + "--string=hello", + "--float32=-172e12", + "--float64=2718e28", + "--ip=10.11.12.13", + "--mask=255.255.255.0", + "--duration=2m", + "--optional-int-no-value", + "--optional-int-with-value=42", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if v, err := f.GetBool("bool"); err != nil || v != *boolFlag { + t.Error("GetBool does not work.") + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if *bool3Flag != false { + t.Error("bool3 flag should be false, is ", *bool2Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if v, err := f.GetInt("int"); err != nil || v != *intFlag { + t.Error("GetInt does not work.") + } + if *int8Flag != -8 { + t.Error("int8 flag should be 0x23, is ", *int8Flag) + } + if v, err := f.GetInt8("int8"); err != nil || v != *int8Flag { + t.Error("GetInt8 does not work.") + } + if *int32Flag != -32 { + t.Error("int32 flag should be 0x23, is ", *int32Flag) + } + if v, err := f.GetInt32("int32"); err != nil || v != *int32Flag { + t.Error("GetInt32 does not work.") + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if v, err := f.GetInt64("int64"); err != nil || v != *int64Flag { + t.Error("GetInt64 does not work.") + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if v, err := f.GetUint("uint"); err != nil || v != *uintFlag { + t.Error("GetUint does not work.") + } + if *uint8Flag != 8 { + t.Error("uint8 flag should be 8, is ", *uint8Flag) + } + if v, err := f.GetUint8("uint8"); err != nil || v != *uint8Flag { + t.Error("GetUint8 does not work.") + } + if *uint16Flag != 16 { + t.Error("uint16 flag should be 16, is ", *uint16Flag) + } + if v, err := f.GetUint16("uint16"); err != nil || v != *uint16Flag { + t.Error("GetUint16 does not work.") + } + if *uint32Flag != 32 { + t.Error("uint32 flag should be 32, is ", *uint32Flag) + } + if v, err := f.GetUint32("uint32"); err != nil || v != *uint32Flag { + t.Error("GetUint32 does not work.") + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if v, err := f.GetUint64("uint64"); err != nil || v != *uint64Flag { + t.Error("GetUint64 does not work.") + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if v, err := f.GetString("string"); err != nil || v != *stringFlag { + t.Error("GetString does not work.") + } + if *float32Flag != -172e12 { + t.Error("float32 flag should be -172e12, is ", *float32Flag) + } + if v, err := f.GetFloat32("float32"); err != nil || v != *float32Flag { + t.Errorf("GetFloat32 returned %v but float32Flag was %v", v, *float32Flag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if v, err := f.GetFloat64("float64"); err != nil || v != *float64Flag { + t.Errorf("GetFloat64 returned %v but float64Flag was %v", v, *float64Flag) + } + if !(*ipFlag).Equal(net.ParseIP("10.11.12.13")) { + t.Error("ip flag should be 10.11.12.13, is ", *ipFlag) + } + if v, err := f.GetIP("ip"); err != nil || !v.Equal(*ipFlag) { + t.Errorf("GetIP returned %v but ipFlag was %v", v, *ipFlag) + } + if (*maskFlag).String() != ParseIPv4Mask("255.255.255.0").String() { + t.Error("mask flag should be 255.255.255.0, is ", (*maskFlag).String()) + } + if v, err := f.GetIPv4Mask("mask"); err != nil || v.String() != (*maskFlag).String() { + t.Errorf("GetIP returned %v maskFlag was %v error was %v", v, *maskFlag, err) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if v, err := f.GetDuration("duration"); err != nil || v != *durationFlag { + t.Error("GetDuration does not work.") + } + if _, err := f.GetInt("duration"); err == nil { + t.Error("GetInt parsed a time.Duration?!?!") + } + if *optionalIntNoValueFlag != 9 { + t.Error("optional int flag should be the default value, is ", *optionalIntNoValueFlag) + } + if *optionalIntWithValueFlag != 42 { + t.Error("optional int flag should be 42, is ", *optionalIntWithValueFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func testParseAll(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + f.BoolP("boola", "a", false, "bool value") + f.BoolP("boolb", "b", false, "bool2 value") + f.BoolP("boolc", "c", false, "bool3 value") + f.BoolP("boold", "d", false, "bool4 value") + f.StringP("stringa", "s", "0", "string value") + f.StringP("stringz", "z", "0", "string value") + f.StringP("stringx", "x", "0", "string value") + f.StringP("stringy", "y", "0", "string value") + f.Lookup("stringx").NoOptDefVal = "1" + args := []string{ + "-ab", + "-cs=xx", + "--stringz=something", + "-d=true", + "-x", + "-y", + "ee", + } + want := []string{ + "boola", "true", + "boolb", "true", + "boolc", "true", + "stringa", "xx", + "stringz", "something", + "boold", "true", + "stringx", "1", + "stringy", "ee", + } + got := []string{} + store := func(flag *Flag, value string) error { + got = append(got, flag.Name) + if len(value) > 0 { + got = append(got, value) + } + return nil + } + if err := f.ParseAll(args, store); err != nil { + t.Errorf("expected no error, got %s", err) + } + if !f.Parsed() { + t.Errorf("f.Parse() = false after Parse") + } + if !reflect.DeepEqual(got, want) { + t.Errorf("f.ParseAll() fail to restore the args") + t.Errorf("Got: %v", got) + t.Errorf("Want: %v", want) + } +} + +func TestShorthand(t *testing.T) { + f := NewFlagSet("shorthand", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolaFlag := f.BoolP("boola", "a", false, "bool value") + boolbFlag := f.BoolP("boolb", "b", false, "bool2 value") + boolcFlag := f.BoolP("boolc", "c", false, "bool3 value") + booldFlag := f.BoolP("boold", "d", false, "bool4 value") + stringaFlag := f.StringP("stringa", "s", "0", "string value") + stringzFlag := f.StringP("stringz", "z", "0", "string value") + extra := "interspersed-argument" + notaflag := "--i-look-like-a-flag" + args := []string{ + "-ab", + extra, + "-cs", + "hello", + "-z=something", + "-d=true", + "--", + notaflag, + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Error("expected no error, got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolaFlag != true { + t.Error("boola flag should be true, is ", *boolaFlag) + } + if *boolbFlag != true { + t.Error("boolb flag should be true, is ", *boolbFlag) + } + if *boolcFlag != true { + t.Error("boolc flag should be true, is ", *boolcFlag) + } + if *booldFlag != true { + t.Error("boold flag should be true, is ", *booldFlag) + } + if *stringaFlag != "hello" { + t.Error("stringa flag should be `hello`, is ", *stringaFlag) + } + if *stringzFlag != "something" { + t.Error("stringz flag should be `something`, is ", *stringzFlag) + } + if len(f.Args()) != 2 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } else if f.Args()[1] != notaflag { + t.Errorf("expected argument %q got %q", notaflag, f.Args()[1]) + } + if f.ArgsLenAtDash() != 1 { + t.Errorf("expected argsLenAtDash %d got %d", f.ArgsLenAtDash(), 1) + } +} + +func TestShorthandLookup(t *testing.T) { + f := NewFlagSet("shorthand", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + f.BoolP("boola", "a", false, "bool value") + f.BoolP("boolb", "b", false, "bool2 value") + args := []string{ + "-ab", + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Error("expected no error, got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + flag := f.ShorthandLookup("a") + if flag == nil { + t.Errorf("f.ShorthandLookup(\"a\") returned nil") + } + if flag.Name != "boola" { + t.Errorf("f.ShorthandLookup(\"a\") found %q instead of \"boola\"", flag.Name) + } + flag = f.ShorthandLookup("") + if flag != nil { + t.Errorf("f.ShorthandLookup(\"\") did not return nil") + } + defer func() { + recover() + }() + flag = f.ShorthandLookup("ab") + // should NEVER get here. lookup should panic. defer'd func should recover it. + t.Errorf("f.ShorthandLookup(\"ab\") did not panic") +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(GetCommandLine(), t) +} + +func TestParseAll(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParseAll(GetCommandLine(), t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +func TestChangedHelper(t *testing.T) { + f := NewFlagSet("changedtest", ContinueOnError) + f.Bool("changed", false, "changed bool") + f.Bool("settrue", true, "true to true") + f.Bool("setfalse", false, "false to false") + f.Bool("unchanged", false, "unchanged bool") + + args := []string{"--changed", "--settrue", "--setfalse=false"} + if err := f.Parse(args); err != nil { + t.Error("f.Parse() = false after Parse") + } + if !f.Changed("changed") { + t.Errorf("--changed wasn't changed!") + } + if !f.Changed("settrue") { + t.Errorf("--settrue wasn't changed!") + } + if !f.Changed("setfalse") { + t.Errorf("--setfalse wasn't changed!") + } + if f.Changed("unchanged") { + t.Errorf("--unchanged was changed!") + } + if f.Changed("invalid") { + t.Errorf("--invalid was changed!") + } + if f.ArgsLenAtDash() != -1 { + t.Errorf("Expected argsLenAtDash: %d but got %d", -1, f.ArgsLenAtDash()) + } +} + +func replaceSeparators(name string, from []string, to string) string { + result := name + for _, sep := range from { + result = strings.Replace(result, sep, to, -1) + } + // Type convert to indicate normalization has been done. + return result +} + +func wordSepNormalizeFunc(f *FlagSet, name string) NormalizedName { + seps := []string{"-", "_"} + name = replaceSeparators(name, seps, ".") + normalizeFlagNameInvocations++ + + return NormalizedName(name) +} + +func testWordSepNormalizedNames(args []string, t *testing.T) { + f := NewFlagSet("normalized", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + withDashFlag := f.Bool("with-dash-flag", false, "bool value") + // Set this after some flags have been added and before others. + f.SetNormalizeFunc(wordSepNormalizeFunc) + withUnderFlag := f.Bool("with_under_flag", false, "bool value") + withBothFlag := f.Bool("with-both_flag", false, "bool value") + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *withDashFlag != true { + t.Error("withDashFlag flag should be true, is ", *withDashFlag) + } + if *withUnderFlag != true { + t.Error("withUnderFlag flag should be true, is ", *withUnderFlag) + } + if *withBothFlag != true { + t.Error("withBothFlag flag should be true, is ", *withBothFlag) + } +} + +func TestWordSepNormalizedNames(t *testing.T) { + args := []string{ + "--with-dash-flag", + "--with-under-flag", + "--with-both-flag", + } + testWordSepNormalizedNames(args, t) + + args = []string{ + "--with_dash_flag", + "--with_under_flag", + "--with_both_flag", + } + testWordSepNormalizedNames(args, t) + + args = []string{ + "--with-dash_flag", + "--with-under_flag", + "--with-both_flag", + } + testWordSepNormalizedNames(args, t) +} + +func aliasAndWordSepFlagNames(f *FlagSet, name string) NormalizedName { + seps := []string{"-", "_"} + + oldName := replaceSeparators("old-valid_flag", seps, ".") + newName := replaceSeparators("valid-flag", seps, ".") + + name = replaceSeparators(name, seps, ".") + switch name { + case oldName: + name = newName + break + } + + return NormalizedName(name) +} + +func TestCustomNormalizedNames(t *testing.T) { + f := NewFlagSet("normalized", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + + validFlag := f.Bool("valid-flag", false, "bool value") + f.SetNormalizeFunc(aliasAndWordSepFlagNames) + someOtherFlag := f.Bool("some-other-flag", false, "bool value") + + args := []string{"--old_valid_flag", "--some-other_flag"} + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + + if *validFlag != true { + t.Errorf("validFlag is %v even though we set the alias --old_valid_falg", *validFlag) + } + if *someOtherFlag != true { + t.Error("someOtherFlag should be true, is ", *someOtherFlag) + } +} + +// Every flag we add, the name (displayed also in usage) should normalized +func TestNormalizationFuncShouldChangeFlagName(t *testing.T) { + // Test normalization after addition + f := NewFlagSet("normalized", ContinueOnError) + + f.Bool("valid_flag", false, "bool value") + if f.Lookup("valid_flag").Name != "valid_flag" { + t.Error("The new flag should have the name 'valid_flag' instead of ", f.Lookup("valid_flag").Name) + } + + f.SetNormalizeFunc(wordSepNormalizeFunc) + if f.Lookup("valid_flag").Name != "valid.flag" { + t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) + } + + // Test normalization before addition + f = NewFlagSet("normalized", ContinueOnError) + f.SetNormalizeFunc(wordSepNormalizeFunc) + + f.Bool("valid_flag", false, "bool value") + if f.Lookup("valid_flag").Name != "valid.flag" { + t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) + } +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func (f *flagVar) Type() string { + return "flagVar" +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.VarP(&v, "v", "v", "usage") + if err := flags.Parse([]string{"--v=1", "-v2", "-v", "3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"--unknown"}) + if out := buf.String(); !strings.Contains(out, "--unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "--before", "subcmd"} + before := Bool("before", false, "") + if err := GetCommandLine().Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = []string{"subcmd", "--after", "args"} + after := Bool("after", false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, "flag", false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"--flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by --flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"--help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, "help", false, "help flag") + helpCalled = false + err = fs.Parse([]string{"--help"}) + if err != nil { + t.Fatal("expected no error for defined --help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +func TestNoInterspersed(t *testing.T) { + f := NewFlagSet("test", ContinueOnError) + f.SetInterspersed(false) + f.Bool("true", true, "always true") + f.Bool("false", false, "always false") + err := f.Parse([]string{"--true", "break", "--false"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + args := f.Args() + if len(args) != 2 || args[0] != "break" || args[1] != "--false" { + t.Fatal("expected interspersed options/non-options to fail") + } +} + +func TestTermination(t *testing.T) { + f := NewFlagSet("termination", ContinueOnError) + boolFlag := f.BoolP("bool", "l", false, "bool value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + arg1 := "ls" + arg2 := "-l" + args := []string{ + "--", + arg1, + arg2, + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Fatal("expected no error; got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag { + t.Error("expected boolFlag=false, got true") + } + if len(f.Args()) != 2 { + t.Errorf("expected 2 arguments, got %d: %v", len(f.Args()), f.Args()) + } + if f.Args()[0] != arg1 { + t.Errorf("expected argument %q got %q", arg1, f.Args()[0]) + } + if f.Args()[1] != arg2 { + t.Errorf("expected argument %q got %q", arg2, f.Args()[1]) + } + if f.ArgsLenAtDash() != 0 { + t.Errorf("expected argsLenAtDash %d got %d", 0, f.ArgsLenAtDash()) + } +} + +func TestDeprecatedFlagInDocs(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("badflag", true, "always true") + f.MarkDeprecated("badflag", "use --good-flag instead") + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "badflag") { + t.Errorf("found deprecated flag in usage!") + } +} + +func TestDeprecatedFlagShorthandInDocs(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + name := "noshorthandflag" + f.BoolP(name, "n", true, "always true") + f.MarkShorthandDeprecated("noshorthandflag", fmt.Sprintf("use --%s instead", name)) + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "-n,") { + t.Errorf("found deprecated flag shorthand in usage!") + } +} + +func parseReturnStderr(t *testing.T, f *FlagSet, args []string) (string, error) { + oldStderr := os.Stderr + r, w, _ := os.Pipe() + os.Stderr = w + + err := f.Parse(args) + + outC := make(chan string) + // copy the output in a separate goroutine so printing can't block indefinitely + go func() { + var buf bytes.Buffer + io.Copy(&buf, r) + outC <- buf.String() + }() + + w.Close() + os.Stderr = oldStderr + out := <-outC + + return out, err +} + +func TestDeprecatedFlagUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("badflag", true, "always true") + usageMsg := "use --good-flag instead" + f.MarkDeprecated("badflag", usageMsg) + + args := []string{"--badflag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +func TestDeprecatedFlagShorthandUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + name := "noshorthandflag" + f.BoolP(name, "n", true, "always true") + usageMsg := fmt.Sprintf("use --%s instead", name) + f.MarkShorthandDeprecated(name, usageMsg) + + args := []string{"-n"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +func TestDeprecatedFlagUsageNormalized(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("bad-double_flag", true, "always true") + f.SetNormalizeFunc(wordSepNormalizeFunc) + usageMsg := "use --good-flag instead" + f.MarkDeprecated("bad_double-flag", usageMsg) + + args := []string{"--bad_double_flag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +// Name normalization function should be called only once on flag addition +func TestMultipleNormalizeFlagNameInvocations(t *testing.T) { + normalizeFlagNameInvocations = 0 + + f := NewFlagSet("normalized", ContinueOnError) + f.SetNormalizeFunc(wordSepNormalizeFunc) + f.Bool("with_under_flag", false, "bool value") + + if normalizeFlagNameInvocations != 1 { + t.Fatal("Expected normalizeFlagNameInvocations to be 1; got ", normalizeFlagNameInvocations) + } +} + +// +func TestHiddenFlagInUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("secretFlag", true, "shhh") + f.MarkHidden("secretFlag") + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "secretFlag") { + t.Errorf("found hidden flag in usage!") + } +} + +// +func TestHiddenFlagUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("secretFlag", true, "shhh") + f.MarkHidden("secretFlag") + + args := []string{"--secretFlag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if strings.Contains(out, "shhh") { + t.Errorf("usage message printed when using a hidden flag!") + } +} + +const defaultOutput = ` --A for bootstrapping, allow 'any' type + --Alongflagname disable bounds checking + -C, --CCC a boolean defaulting to true (default true) + --D path set relative path for local imports + -E, --EEE num[=1234] a num with NoOptDefVal (default 4321) + --F number a non-zero number (default 2.7) + --G float a float that defaults to zero + --IP ip IP address with no default + --IPMask ipMask Netmask address with no default + --IPNet ipNet IP network with no default + --Ints intSlice int slice with zero default + --N int a non-zero int (default 27) + --ND1 string[="bar"] a string with NoOptDefVal (default "foo") + --ND2 num[=4321] a num with NoOptDefVal (default 1234) + --StringArray stringArray string array with zero default + --StringSlice stringSlice string slice with zero default + --Z int an int that defaults to zero + --custom custom custom Value implementation + --customP custom a VarP with default (default 10) + --maxT timeout set timeout for dial +` + +// Custom value that satisfies the Value interface. +type customValue int + +func (cv *customValue) String() string { return fmt.Sprintf("%v", *cv) } + +func (cv *customValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *cv = customValue(v) + return err +} + +func (cv *customValue) Type() string { return "custom" } + +func TestPrintDefaults(t *testing.T) { + fs := NewFlagSet("print defaults test", ContinueOnError) + var buf bytes.Buffer + fs.SetOutput(&buf) + fs.Bool("A", false, "for bootstrapping, allow 'any' type") + fs.Bool("Alongflagname", false, "disable bounds checking") + fs.BoolP("CCC", "C", true, "a boolean defaulting to true") + fs.String("D", "", "set relative `path` for local imports") + fs.Float64("F", 2.7, "a non-zero `number`") + fs.Float64("G", 0, "a float that defaults to zero") + fs.Int("N", 27, "a non-zero int") + fs.IntSlice("Ints", []int{}, "int slice with zero default") + fs.IP("IP", nil, "IP address with no default") + fs.IPMask("IPMask", nil, "Netmask address with no default") + fs.IPNet("IPNet", net.IPNet{}, "IP network with no default") + fs.Int("Z", 0, "an int that defaults to zero") + fs.Duration("maxT", 0, "set `timeout` for dial") + fs.String("ND1", "foo", "a string with NoOptDefVal") + fs.Lookup("ND1").NoOptDefVal = "bar" + fs.Int("ND2", 1234, "a `num` with NoOptDefVal") + fs.Lookup("ND2").NoOptDefVal = "4321" + fs.IntP("EEE", "E", 4321, "a `num` with NoOptDefVal") + fs.ShorthandLookup("E").NoOptDefVal = "1234" + fs.StringSlice("StringSlice", []string{}, "string slice with zero default") + fs.StringArray("StringArray", []string{}, "string array with zero default") + + var cv customValue + fs.Var(&cv, "custom", "custom Value implementation") + + cv2 := customValue(10) + fs.VarP(&cv2, "customP", "", "a VarP with default") + + fs.PrintDefaults() + got := buf.String() + if got != defaultOutput { + fmt.Println("\n" + got) + fmt.Println("\n" + defaultOutput) + t.Errorf("got %q want %q\n", got, defaultOutput) + } +} + +func TestVisitAllFlagOrder(t *testing.T) { + fs := NewFlagSet("TestVisitAllFlagOrder", ContinueOnError) + fs.SortFlags = false + // https://github.com/spf13/pflag/issues/120 + fs.SetNormalizeFunc(func(f *FlagSet, name string) NormalizedName { + return NormalizedName(name) + }) + + names := []string{"C", "B", "A", "D"} + for _, name := range names { + fs.Bool(name, false, "") + } + + i := 0 + fs.VisitAll(func(f *Flag) { + if names[i] != f.Name { + t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name) + } + i++ + }) +} + +func TestVisitFlagOrder(t *testing.T) { + fs := NewFlagSet("TestVisitFlagOrder", ContinueOnError) + fs.SortFlags = false + names := []string{"C", "B", "A", "D"} + for _, name := range names { + fs.Bool(name, false, "") + fs.Set(name, "true") + } + + i := 0 + fs.Visit(func(f *Flag) { + if names[i] != f.Name { + t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name) + } + i++ + }) +} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go new file mode 100644 index 0000000..a243f81 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- float32 Value +type float32Value float32 + +func newFloat32Value(val float32, p *float32) *float32Value { + *p = val + return (*float32Value)(p) +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = float32Value(v) + return err +} + +func (f *float32Value) Type() string { + return "float32" +} + +func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } + +func float32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseFloat(sval, 32) + if err != nil { + return 0, err + } + return float32(v), nil +} + +// GetFloat32 return the float32 value of a flag with the given name +func (f *FlagSet) GetFloat32(name string) (float32, error) { + val, err := f.getFlagType(name, "float32", float32Conv) + if err != nil { + return 0, err + } + return val.(float32), nil +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func Float32Var(p *float32, name string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, "", value, usage) + return p +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, shorthand, value, usage) + return p +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func Float32(name string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, "", value, usage) +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func Float32P(name, shorthand string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go new file mode 100644 index 0000000..04b5492 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Type() string { + return "float64" +} + +func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } + +func float64Conv(sval string) (interface{}, error) { + return strconv.ParseFloat(sval, 64) +} + +// GetFloat64 return the float64 value of a flag with the given name +func (f *FlagSet) GetFloat64(name string) (float64, error) { + val, err := f.getFlagType(name, "float64", float64Conv) + if err != nil { + return 0, err + } + return val.(float64), nil +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, "", value, usage) + return p +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, shorthand, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, "", value, usage) +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func Float64P(name, shorthand string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go new file mode 100644 index 0000000..c4f47eb --- /dev/null +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -0,0 +1,101 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "reflect" + "strings" +) + +// flagValueWrapper implements pflag.Value around a flag.Value. The main +// difference here is the addition of the Type method that returns a string +// name of the type. As this is generally unknown, we approximate that with +// reflection. +type flagValueWrapper struct { + inner goflag.Value + flagType string +} + +// We are just copying the boolFlag interface out of goflag as that is what +// they use to decide if a flag should get "true" when no arg is given. +type goBoolFlag interface { + goflag.Value + IsBoolFlag() bool +} + +func wrapFlagValue(v goflag.Value) Value { + // If the flag.Value happens to also be a pflag.Value, just use it directly. + if pv, ok := v.(Value); ok { + return pv + } + + pv := &flagValueWrapper{ + inner: v, + } + + t := reflect.TypeOf(v) + if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { + t = t.Elem() + } + + pv.flagType = strings.TrimSuffix(t.Name(), "Value") + return pv +} + +func (v *flagValueWrapper) String() string { + return v.inner.String() +} + +func (v *flagValueWrapper) Set(s string) error { + return v.inner.Set(s) +} + +func (v *flagValueWrapper) Type() string { + return v.flagType +} + +// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag +// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei +// with both `-v` and `--v` in flags. If the golang flag was more than a single +// character (ex: `verbose`) it will only be accessible via `--verbose` +func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: goflag.Name, + Usage: goflag.Usage, + Value: wrapFlagValue(goflag.Value), + // Looks like golang flags don't set DefValue correctly :-( + //DefValue: goflag.DefValue, + DefValue: goflag.Value.String(), + } + // Ex: if the golang flag was -v, allow both -v and --v to work + if len(flag.Name) == 1 { + flag.Shorthand = flag.Name + } + if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { + flag.NoOptDefVal = "true" + } + return flag +} + +// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet +func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { + if f.Lookup(goflag.Name) != nil { + return + } + newflag := PFlagFromGoFlag(goflag) + f.AddFlag(newflag) +} + +// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet +func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(goflag *goflag.Flag) { + f.AddGoFlag(goflag) + }) +} diff --git a/vendor/github.com/spf13/pflag/golangflag_test.go b/vendor/github.com/spf13/pflag/golangflag_test.go new file mode 100644 index 0000000..77e2d7d --- /dev/null +++ b/vendor/github.com/spf13/pflag/golangflag_test.go @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "testing" +) + +func TestGoflags(t *testing.T) { + goflag.String("stringFlag", "stringFlag", "stringFlag") + goflag.Bool("boolFlag", false, "boolFlag") + + f := NewFlagSet("test", ContinueOnError) + + f.AddGoFlagSet(goflag.CommandLine) + err := f.Parse([]string{"--stringFlag=bob", "--boolFlag"}) + if err != nil { + t.Fatal("expected no error; get", err) + } + + getString, err := f.GetString("stringFlag") + if err != nil { + t.Fatal("expected no error; get", err) + } + if getString != "bob" { + t.Fatalf("expected getString=bob but got getString=%s", getString) + } + + getBool, err := f.GetBool("boolFlag") + if err != nil { + t.Fatal("expected no error; get", err) + } + if getBool != true { + t.Fatalf("expected getBool=true but got getBool=%v", getBool) + } +} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go new file mode 100644 index 0000000..1474b89 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Type() string { + return "int" +} + +func (i *intValue) String() string { return strconv.Itoa(int(*i)) } + +func intConv(sval string) (interface{}, error) { + return strconv.Atoi(sval) +} + +// GetInt return the int value of a flag with the given name +func (f *FlagSet) GetInt(name string) (int, error) { + val, err := f.getFlagType(name, "int", intConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { + f.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func IntVarP(p *int, name, shorthand string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, "", value, usage) + return p +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, shorthand, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.IntP(name, "", value, usage) +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func IntP(name, shorthand string, value int, usage string) *int { + return CommandLine.IntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go new file mode 100644 index 0000000..9b95944 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int32 Value +type int32Value int32 + +func newInt32Value(val int32, p *int32) *int32Value { + *p = val + return (*int32Value)(p) +} + +func (i *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + *i = int32Value(v) + return err +} + +func (i *int32Value) Type() string { + return "int32" +} + +func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 32) + if err != nil { + return 0, err + } + return int32(v), nil +} + +// GetInt32 return the int32 value of a flag with the given name +func (f *FlagSet) GetInt32(name string) (int32, error) { + val, err := f.getFlagType(name, "int32", int32Conv) + if err != nil { + return 0, err + } + return val.(int32), nil +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func Int32Var(p *int32, name string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, "", value, usage) + return p +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, shorthand, value, usage) + return p +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func Int32(name string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, "", value, usage) +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func Int32P(name, shorthand string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go new file mode 100644 index 0000000..0026d78 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Type() string { + return "int64" +} + +func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int64Conv(sval string) (interface{}, error) { + return strconv.ParseInt(sval, 0, 64) +} + +// GetInt64 return the int64 value of a flag with the given name +func (f *FlagSet) GetInt64(name string) (int64, error) { + val, err := f.getFlagType(name, "int64", int64Conv) + if err != nil { + return 0, err + } + return val.(int64), nil +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, "", value, usage) + return p +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, shorthand, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, "", value, usage) +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func Int64P(name, shorthand string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go new file mode 100644 index 0000000..4da9222 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int8 Value +type int8Value int8 + +func newInt8Value(val int8, p *int8) *int8Value { + *p = val + return (*int8Value)(p) +} + +func (i *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + *i = int8Value(v) + return err +} + +func (i *int8Value) Type() string { + return "int8" +} + +func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 8) + if err != nil { + return 0, err + } + return int8(v), nil +} + +// GetInt8 return the int8 value of a flag with the given name +func (f *FlagSet) GetInt8(name string) (int8, error) { + val, err := f.getFlagType(name, "int8", int8Conv) + if err != nil { + return 0, err + } + return val.(int8), nil +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func Int8Var(p *int8, name string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, "", value, usage) + return p +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, shorthand, value, usage) + return p +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func Int8(name string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, "", value, usage) +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func Int8P(name, shorthand string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go new file mode 100644 index 0000000..1e7c9ed --- /dev/null +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -0,0 +1,128 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- intSlice Value +type intSliceValue struct { + value *[]int + changed bool +} + +func newIntSliceValue(val []int, p *[]int) *intSliceValue { + isv := new(intSliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *intSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *intSliceValue) Type() string { + return "intSlice" +} + +func (s *intSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func intSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int{}, nil + } + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetIntSlice return the []int value of a flag with the given name +func (f *FlagSet) GetIntSlice(name string) ([]int, error) { + val, err := f.getFlagType(name, "intSlice", intSliceConv) + if err != nil { + return []int{}, err + } + return val.([]int), nil +} + +// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. +// The argument p points to a []int variable in which to store the value of the flag. +func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSliceVar defines a int[] flag with specified name, default value, and usage string. +// The argument p points to a int[] variable in which to store the value of the flag. +func IntSliceVar(p *[]int, name string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, "", value, usage) + return &p +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func IntSlice(name string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, "", value, usage) +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func IntSliceP(name, shorthand string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice_test.go b/vendor/github.com/spf13/pflag/int_slice_test.go new file mode 100644 index 0000000..745aecb --- /dev/null +++ b/vendor/github.com/spf13/pflag/int_slice_test.go @@ -0,0 +1,165 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpISFlagSet(isp *[]int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IntSliceVar(isp, "is", []int{}, "Command separated list!") + return f +} + +func setUpISFlagSetWithDefault(isp *[]int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IntSliceVar(isp, "is", []int{0, 1}, "Command separated list!") + return f +} + +func TestEmptyIS(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + if len(getIS) != 0 { + t.Fatalf("got is %v with len=%d but expected length=0", getIS, len(getIS)) + } +} + +func TestIS(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + + vals := []string{"1", "2", "4", "3"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d", i, vals[i], v) + } + } + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d from GetIntSlice", i, vals[i], v) + } + } +} + +func TestISDefault(t *testing.T) { + var is []int + f := setUpISFlagSetWithDefault(&is) + + vals := []string{"0", "1"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v) + } + } +} + +func TestISWithDefault(t *testing.T) { + var is []int + f := setUpISFlagSetWithDefault(&is) + + vals := []string{"1", "2"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v) + } + } +} + +func TestISCalledTwice(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + + in := []string{"1,2", "3"} + expected := []int{1, 2, 3} + argfmt := "--is=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + if expected[i] != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go new file mode 100644 index 0000000..3d414ba --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip.go @@ -0,0 +1,94 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// -- net.IP value +type ipValue net.IP + +func newIPValue(val net.IP, p *net.IP) *ipValue { + *p = val + return (*ipValue)(p) +} + +func (i *ipValue) String() string { return net.IP(*i).String() } +func (i *ipValue) Set(s string) error { + ip := net.ParseIP(strings.TrimSpace(s)) + if ip == nil { + return fmt.Errorf("failed to parse IP: %q", s) + } + *i = ipValue(ip) + return nil +} + +func (i *ipValue) Type() string { + return "ip" +} + +func ipConv(sval string) (interface{}, error) { + ip := net.ParseIP(sval) + if ip != nil { + return ip, nil + } + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) +} + +// GetIP return the net.IP value of a flag with the given name +func (f *FlagSet) GetIP(name string) (net.IP, error) { + val, err := f.getFlagType(name, "ip", ipConv) + if err != nil { + return nil, err + } + return val.(net.IP), nil +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func IPVar(p *net.IP, name string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, "", value, usage) + return p +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, shorthand, value, usage) + return p +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func IP(name string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, "", value, usage) +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPP(name, shorthand string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go new file mode 100644 index 0000000..7dd196f --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip_slice.go @@ -0,0 +1,148 @@ +package pflag + +import ( + "fmt" + "io" + "net" + "strings" +) + +// -- ipSlice Value +type ipSliceValue struct { + value *[]net.IP + changed bool +} + +func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { + ipsv := new(ipSliceValue) + ipsv.value = p + *ipsv.value = val + return ipsv +} + +// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. +// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. +func (s *ipSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse ip values into slice + out := make([]net.IP, 0, len(ipStrSlice)) + for _, ipStr := range ipStrSlice { + ip := net.ParseIP(strings.TrimSpace(ipStr)) + if ip == nil { + return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) + } + out = append(out, ip) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *ipSliceValue) Type() string { + return "ipSlice" +} + +// String defines a "native" format for this net.IP slice flag value. +func (s *ipSliceValue) String() string { + + ipStrSlice := make([]string, len(*s.value)) + for i, ip := range *s.value { + ipStrSlice[i] = ip.String() + } + + out, _ := writeAsCSV(ipStrSlice) + + return "[" + out + "]" +} + +func ipSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Emtpy string would cause a slice with one (empty) entry + if len(val) == 0 { + return []net.IP{}, nil + } + ss := strings.Split(val, ",") + out := make([]net.IP, len(ss)) + for i, sval := range ss { + ip := net.ParseIP(strings.TrimSpace(sval)) + if ip == nil { + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) + } + out[i] = ip + } + return out, nil +} + +// GetIPSlice returns the []net.IP value of a flag with the given name +func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { + val, err := f.getFlagType(name, "ipSlice", ipSliceConv) + if err != nil { + return []net.IP{}, err + } + return val.([]net.IP), nil +} + +// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. +// The argument p points to a []net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { + f.VarP(newIPSliceValue(value, p), name, "", usage) +} + +// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { + f.VarP(newIPSliceValue(value, p), name, shorthand, usage) +} + +// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. +// The argument p points to a []net.IP variable in which to store the value of the flag. +func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { + CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) +} + +// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { + CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) +} + +// IPSlice defines a []net.IP flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of that flag. +func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { + p := []net.IP{} + f.IPSliceVarP(&p, name, "", value, usage) + return &p +} + +// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { + p := []net.IP{} + f.IPSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IPSlice defines a []net.IP flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of the flag. +func IPSlice(name string, value []net.IP, usage string) *[]net.IP { + return CommandLine.IPSliceP(name, "", value, usage) +} + +// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. +func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { + return CommandLine.IPSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ip_slice_test.go b/vendor/github.com/spf13/pflag/ip_slice_test.go new file mode 100644 index 0000000..b0c681c --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip_slice_test.go @@ -0,0 +1,222 @@ +package pflag + +import ( + "fmt" + "net" + "strings" + "testing" +) + +func setUpIPSFlagSet(ipsp *[]net.IP) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IPSliceVar(ipsp, "ips", []net.IP{}, "Command separated list!") + return f +} + +func setUpIPSFlagSetWithDefault(ipsp *[]net.IP) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IPSliceVar(ipsp, "ips", + []net.IP{ + net.ParseIP("192.168.1.1"), + net.ParseIP("0:0:0:0:0:0:0:1"), + }, + "Command separated list!") + return f +} + +func TestEmptyIP(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSet(&ips) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getIPS, err := f.GetIPSlice("ips") + if err != nil { + t.Fatal("got an error from GetIPSlice():", err) + } + if len(getIPS) != 0 { + t.Fatalf("got ips %v with len=%d but expected length=0", getIPS, len(getIPS)) + } +} + +func TestIPS(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSet(&ips) + + vals := []string{"192.168.1.1", "10.0.0.1", "0:0:0:0:0:0:0:2"} + arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ips { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s from GetIPSlice", i, vals[i], v) + } + } +} + +func TestIPSDefault(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSetWithDefault(&ips) + + vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"} + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ips { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getIPS, err := f.GetIPSlice("ips") + if err != nil { + t.Fatal("got an error from GetIPSlice") + } + for i, v := range getIPS { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) + } + } +} + +func TestIPSWithDefault(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSetWithDefault(&ips) + + vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"} + arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ips { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getIPS, err := f.GetIPSlice("ips") + if err != nil { + t.Fatal("got an error from GetIPSlice") + } + for i, v := range getIPS { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) + } + } +} + +func TestIPSCalledTwice(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSet(&ips) + + in := []string{"192.168.1.2,0:0:0:0:0:0:0:1", "10.0.0.1"} + expected := []net.IP{net.ParseIP("192.168.1.2"), net.ParseIP("0:0:0:0:0:0:0:1"), net.ParseIP("10.0.0.1")} + argfmt := "ips=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ips { + if !expected[i].Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestIPSBadQuoting(t *testing.T) { + + tests := []struct { + Want []net.IP + FlagArg []string + }{ + { + Want: []net.IP{ + net.ParseIP("a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568"), + net.ParseIP("203.107.49.208"), + net.ParseIP("14.57.204.90"), + }, + FlagArg: []string{ + "a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568", + "203.107.49.208", + "14.57.204.90", + }, + }, + { + Want: []net.IP{ + net.ParseIP("204.228.73.195"), + net.ParseIP("86.141.15.94"), + }, + FlagArg: []string{ + "204.228.73.195", + "86.141.15.94", + }, + }, + { + Want: []net.IP{ + net.ParseIP("c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f"), + net.ParseIP("4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472"), + }, + FlagArg: []string{ + "c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f", + "4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472", + }, + }, + { + Want: []net.IP{ + net.ParseIP("5170:f971:cfac:7be3:512a:af37:952c:bc33"), + net.ParseIP("93.21.145.140"), + net.ParseIP("2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca"), + }, + FlagArg: []string{ + " 5170:f971:cfac:7be3:512a:af37:952c:bc33 , 93.21.145.140 ", + "2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca", + }, + }, + { + Want: []net.IP{ + net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), + net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), + net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), + net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), + }, + FlagArg: []string{ + `"2e5e:66b2:6441:848:5b74:76ea:574c:3a7b, 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b,2e5e:66b2:6441:848:5b74:76ea:574c:3a7b "`, + " 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"}, + }, + } + + for i, test := range tests { + + var ips []net.IP + f := setUpIPSFlagSet(&ips) + + if err := f.Parse([]string{fmt.Sprintf("--ips=%s", strings.Join(test.FlagArg, ","))}); err != nil { + t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%s", + err, test.FlagArg, test.Want[i]) + } + + for j, b := range ips { + if !b.Equal(test.Want[j]) { + t.Fatalf("bad value parsed for test %d on net.IP %d:\nwant:\t%s\ngot:\t%s", i, j, test.Want[j], b) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/ip_test.go b/vendor/github.com/spf13/pflag/ip_test.go new file mode 100644 index 0000000..1fec50e --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip_test.go @@ -0,0 +1,63 @@ +package pflag + +import ( + "fmt" + "net" + "os" + "testing" +) + +func setUpIP(ip *net.IP) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IPVar(ip, "address", net.ParseIP("0.0.0.0"), "IP Address") + return f +} + +func TestIP(t *testing.T) { + testCases := []struct { + input string + success bool + expected string + }{ + {"0.0.0.0", true, "0.0.0.0"}, + {" 0.0.0.0 ", true, "0.0.0.0"}, + {"1.2.3.4", true, "1.2.3.4"}, + {"127.0.0.1", true, "127.0.0.1"}, + {"255.255.255.255", true, "255.255.255.255"}, + {"", false, ""}, + {"0", false, ""}, + {"localhost", false, ""}, + {"0.0.0", false, ""}, + {"0.0.0.", false, ""}, + {"0.0.0.0.", false, ""}, + {"0.0.0.256", false, ""}, + {"0 . 0 . 0 . 0", false, ""}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var addr net.IP + f := setUpIP(&addr) + + tc := &testCases[i] + + arg := fmt.Sprintf("--address=%s", tc.input) + err := f.Parse([]string{arg}) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure") + continue + } else if tc.success { + ip, err := f.GetIP("address") + if err != nil { + t.Errorf("Got error trying to fetch the IP flag: %v", err) + } + if ip.String() != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, ip.String()) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go new file mode 100644 index 0000000..5bd44bd --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipmask.go @@ -0,0 +1,122 @@ +package pflag + +import ( + "fmt" + "net" + "strconv" +) + +// -- net.IPMask value +type ipMaskValue net.IPMask + +func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { + *p = val + return (*ipMaskValue)(p) +} + +func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } +func (i *ipMaskValue) Set(s string) error { + ip := ParseIPv4Mask(s) + if ip == nil { + return fmt.Errorf("failed to parse IP mask: %q", s) + } + *i = ipMaskValue(ip) + return nil +} + +func (i *ipMaskValue) Type() string { + return "ipMask" +} + +// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). +// This function should really belong to the net package. +func ParseIPv4Mask(s string) net.IPMask { + mask := net.ParseIP(s) + if mask == nil { + if len(s) != 8 { + return nil + } + // net.IPMask.String() actually outputs things like ffffff00 + // so write a horrible parser for that as well :-( + m := []int{} + for i := 0; i < 4; i++ { + b := "0x" + s[2*i:2*i+2] + d, err := strconv.ParseInt(b, 0, 0) + if err != nil { + return nil + } + m = append(m, int(d)) + } + s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) + mask = net.ParseIP(s) + if mask == nil { + return nil + } + } + return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) +} + +func parseIPv4Mask(sval string) (interface{}, error) { + mask := ParseIPv4Mask(sval) + if mask == nil { + return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) + } + return mask, nil +} + +// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name +func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { + val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) + if err != nil { + return nil, err + } + return val.(net.IPMask), nil +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, "", value, usage) + return p +} + +// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, shorthand, value, usage) + return p +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func IPMask(name string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, "", value, usage) +} + +// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go new file mode 100644 index 0000000..e2c1b8b --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet.go @@ -0,0 +1,98 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// IPNet adapts net.IPNet for use as a flag. +type ipNetValue net.IPNet + +func (ipnet ipNetValue) String() string { + n := net.IPNet(ipnet) + return n.String() +} + +func (ipnet *ipNetValue) Set(value string) error { + _, n, err := net.ParseCIDR(strings.TrimSpace(value)) + if err != nil { + return err + } + *ipnet = ipNetValue(*n) + return nil +} + +func (*ipNetValue) Type() string { + return "ipNet" +} + +func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { + *p = val + return (*ipNetValue)(p) +} + +func ipNetConv(sval string) (interface{}, error) { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err == nil { + return *n, nil + } + return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) +} + +// GetIPNet return the net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { + val, err := f.getFlagType(name, "ipNet", ipNetConv) + if err != nil { + return net.IPNet{}, err + } + return val.(net.IPNet), nil +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, "", value, usage) + return p +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, shorthand, value, usage) + return p +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func IPNet(name string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, "", value, usage) +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipnet_test.go b/vendor/github.com/spf13/pflag/ipnet_test.go new file mode 100644 index 0000000..335b6fa --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet_test.go @@ -0,0 +1,70 @@ +package pflag + +import ( + "fmt" + "net" + "os" + "testing" +) + +func setUpIPNet(ip *net.IPNet) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + _, def, _ := net.ParseCIDR("0.0.0.0/0") + f.IPNetVar(ip, "address", *def, "IP Address") + return f +} + +func TestIPNet(t *testing.T) { + testCases := []struct { + input string + success bool + expected string + }{ + {"0.0.0.0/0", true, "0.0.0.0/0"}, + {" 0.0.0.0/0 ", true, "0.0.0.0/0"}, + {"1.2.3.4/8", true, "1.0.0.0/8"}, + {"127.0.0.1/16", true, "127.0.0.0/16"}, + {"255.255.255.255/19", true, "255.255.224.0/19"}, + {"255.255.255.255/32", true, "255.255.255.255/32"}, + {"", false, ""}, + {"/0", false, ""}, + {"0", false, ""}, + {"0/0", false, ""}, + {"localhost/0", false, ""}, + {"0.0.0/4", false, ""}, + {"0.0.0./8", false, ""}, + {"0.0.0.0./12", false, ""}, + {"0.0.0.256/16", false, ""}, + {"0.0.0.0 /20", false, ""}, + {"0.0.0.0/ 24", false, ""}, + {"0 . 0 . 0 . 0 / 28", false, ""}, + {"0.0.0.0/33", false, ""}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var addr net.IPNet + f := setUpIPNet(&addr) + + tc := &testCases[i] + + arg := fmt.Sprintf("--address=%s", tc.input) + err := f.Parse([]string{arg}) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure") + continue + } else if tc.success { + ip, err := f.GetIPNet("address") + if err != nil { + t.Errorf("Got error trying to fetch the IP flag: %v", err) + } + if ip.String() != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, ip.String()) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go new file mode 100644 index 0000000..04e0a26 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string.go @@ -0,0 +1,80 @@ +package pflag + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { return string(*s) } + +func stringConv(sval string) (interface{}, error) { + return sval, nil +} + +// GetString return the string value of a flag with the given name +func (f *FlagSet) GetString(name string) (string, error) { + val, err := f.getFlagType(name, "string", stringConv) + if err != nil { + return "", err + } + return val.(string), nil +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { + f.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func StringVarP(p *string, name, shorthand string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, "", value, usage) + return p +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, shorthand, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.StringP(name, "", value, usage) +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func StringP(name, shorthand string, value string, usage string) *string { + return CommandLine.StringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go new file mode 100644 index 0000000..276b7ed --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -0,0 +1,103 @@ +package pflag + +// -- stringArray Value +type stringArrayValue struct { + value *[]string + changed bool +} + +func newStringArrayValue(val []string, p *[]string) *stringArrayValue { + ssv := new(stringArrayValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func (s *stringArrayValue) Set(val string) error { + if !s.changed { + *s.value = []string{val} + s.changed = true + } else { + *s.value = append(*s.value, val) + } + return nil +} + +func (s *stringArrayValue) Type() string { + return "stringArray" +} + +func (s *stringArrayValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringArrayConv(sval string) (interface{}, error) { + sval = sval[1 : len(sval)-1] + // An empty string would cause a array with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringArray return the []string value of a flag with the given name +func (f *FlagSet) GetStringArray(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringArray", stringArrayConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArrayVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, "", value, usage) + return &p +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArray(name string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, "", value, usage) +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func StringArrayP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array_test.go b/vendor/github.com/spf13/pflag/string_array_test.go new file mode 100644 index 0000000..1ceac8c --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_array_test.go @@ -0,0 +1,233 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "testing" +) + +func setUpSAFlagSet(sap *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringArrayVar(sap, "sa", []string{}, "Command separated list!") + return f +} + +func setUpSAFlagSetWithDefault(sap *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringArrayVar(sap, "sa", []string{"default", "values"}, "Command separated list!") + return f +} + +func TestEmptySA(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + if len(getSA) != 0 { + t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA)) + } +} + +func TestEmptySAValue(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + err := f.Parse([]string{"--sa="}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + if len(getSA) != 0 { + t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA)) + } +} + +func TestSADefault(t *testing.T) { + var sa []string + f := setUpSAFlagSetWithDefault(&sa) + + vals := []string{"default", "values"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range sa { + if vals[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + for i, v := range getSA { + if vals[i] != v { + t.Fatalf("expected sa[%d] to be %s from GetStringArray but got: %s", i, vals[i], v) + } + } +} + +func TestSAWithDefault(t *testing.T) { + var sa []string + f := setUpSAFlagSetWithDefault(&sa) + + val := "one" + arg := fmt.Sprintf("--sa=%s", val) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(sa) != 1 { + t.Fatalf("expected number of values to be %d but %d", 1, len(sa)) + } + + if sa[0] != val { + t.Fatalf("expected value to be %s but got: %s", sa[0], val) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + + if len(getSA) != 1 { + t.Fatalf("expected number of values to be %d but %d", 1, len(getSA)) + } + + if getSA[0] != val { + t.Fatalf("expected value to be %s but got: %s", getSA[0], val) + } +} + +func TestSACalledTwice(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"one", "two"} + expected := []string{"one", "two"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(sa) { + t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range sa { + if expected[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSAWithSpecialChar(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"} + expected := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + arg3 := fmt.Sprintf(argfmt, in[2]) + arg4 := fmt.Sprintf(argfmt, in[3]) + err := f.Parse([]string{arg1, arg2, arg3, arg4}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(sa) { + t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range sa { + if expected[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSAWithSquareBrackets(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"][]-[", "[a-z]", "[a-z]+"} + expected := []string{"][]-[", "[a-z]", "[a-z]+"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + arg3 := fmt.Sprintf(argfmt, in[2]) + err := f.Parse([]string{arg1, arg2, arg3}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(sa) { + t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range sa { + if expected[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go new file mode 100644 index 0000000..05eee75 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -0,0 +1,129 @@ +package pflag + +import ( + "bytes" + "encoding/csv" + "strings" +) + +// -- stringSlice Value +type stringSliceValue struct { + value *[]string + changed bool +} + +func newStringSliceValue(val []string, p *[]string) *stringSliceValue { + ssv := new(stringSliceValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), "\n"), nil +} + +func (s *stringSliceValue) Set(val string) error { + v, err := readAsCSV(val) + if err != nil { + return err + } + if !s.changed { + *s.value = v + } else { + *s.value = append(*s.value, v...) + } + s.changed = true + return nil +} + +func (s *stringSliceValue) Type() string { + return "stringSlice" +} + +func (s *stringSliceValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringSliceConv(sval string) (interface{}, error) { + sval = sval[1 : len(sval)-1] + // An empty string would cause a slice with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringSlice return the []string value of a flag with the given name +func (f *FlagSet) GetStringSlice(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringSlice", stringSliceConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +func StringSliceVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, "", value, usage) + return &p +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +func StringSlice(name string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, "", value, usage) +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func StringSliceP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_slice_test.go b/vendor/github.com/spf13/pflag/string_slice_test.go new file mode 100644 index 0000000..c41f3bd --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_slice_test.go @@ -0,0 +1,253 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strings" + "testing" +) + +func setUpSSFlagSet(ssp *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringSliceVar(ssp, "ss", []string{}, "Command separated list!") + return f +} + +func setUpSSFlagSetWithDefault(ssp *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringSliceVar(ssp, "ss", []string{"default", "values"}, "Command separated list!") + return f +} + +func TestEmptySS(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + if len(getSS) != 0 { + t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS)) + } +} + +func TestEmptySSValue(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + err := f.Parse([]string{"--ss="}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + if len(getSS) != 0 { + t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS)) + } +} + +func TestSS(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + vals := []string{"one", "two", "4", "3"} + arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSDefault(t *testing.T) { + var ss []string + f := setUpSSFlagSetWithDefault(&ss) + + vals := []string{"default", "values"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSWithDefault(t *testing.T) { + var ss []string + f := setUpSSFlagSetWithDefault(&ss) + + vals := []string{"one", "two", "4", "3"} + arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSCalledTwice(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{"one,two", "three"} + expected := []string{"one", "two", "three"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(ss) { + t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range ss { + if expected[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSSWithComma(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{`"one,two"`, `"three"`, `"four,five",six`} + expected := []string{"one,two", "three", "four,five", "six"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + arg3 := fmt.Sprintf(argfmt, in[2]) + err := f.Parse([]string{arg1, arg2, arg3}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(ss) { + t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range ss { + if expected[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSSWithSquareBrackets(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{`"[a-z]"`, `"[a-z]+"`} + expected := []string{"[a-z]", "[a-z]+"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(ss) { + t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range ss { + if expected[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go new file mode 100644 index 0000000..dcbc2b7 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Type() string { + return "uint" +} + +func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uintConv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 0) + if err != nil { + return 0, err + } + return uint(v), nil +} + +// GetUint return the uint value of a flag with the given name +func (f *FlagSet) GetUint(name string) (uint, error) { + val, err := f.getFlagType(name, "uint", uintConv) + if err != nil { + return 0, err + } + return val.(uint), nil +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func UintVarP(p *uint, name, shorthand string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, "", value, usage) + return p +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, shorthand, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.UintP(name, "", value, usage) +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func UintP(name, shorthand string, value uint, usage string) *uint { + return CommandLine.UintP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go new file mode 100644 index 0000000..7e9914e --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint16 value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} + +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Type() string { + return "uint16" +} + +func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 16) + if err != nil { + return 0, err + } + return uint16(v), nil +} + +// GetUint16 return the uint16 value of a flag with the given name +func (f *FlagSet) GetUint16(name string) (uint16, error) { + val, err := f.getFlagType(name, "uint16", uint16Conv) + if err != nil { + return 0, err + } + return val.(uint16), nil +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func Uint16Var(p *uint16, name string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, "", value, usage) + return p +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, shorthand, value, usage) + return p +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint16(name string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, "", value, usage) +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go new file mode 100644 index 0000000..d802453 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint32 value +type uint32Value uint32 + +func newUint32Value(val uint32, p *uint32) *uint32Value { + *p = val + return (*uint32Value)(p) +} + +func (i *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + *i = uint32Value(v) + return err +} + +func (i *uint32Value) Type() string { + return "uint32" +} + +func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 32) + if err != nil { + return 0, err + } + return uint32(v), nil +} + +// GetUint32 return the uint32 value of a flag with the given name +func (f *FlagSet) GetUint32(name string) (uint32, error) { + val, err := f.getFlagType(name, "uint32", uint32Conv) + if err != nil { + return 0, err + } + return val.(uint32), nil +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func Uint32Var(p *uint32, name string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, "", value, usage) + return p +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, shorthand, value, usage) + return p +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func Uint32(name string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, "", value, usage) +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go new file mode 100644 index 0000000..f62240f --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint64.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Type() string { + return "uint64" +} + +func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint64Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 64) + if err != nil { + return 0, err + } + return uint64(v), nil +} + +// GetUint64 return the uint64 value of a flag with the given name +func (f *FlagSet) GetUint64(name string) (uint64, error) { + val, err := f.getFlagType(name, "uint64", uint64Conv) + if err != nil { + return 0, err + } + return val.(uint64), nil +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, "", value, usage) + return p +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, shorthand, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, "", value, usage) +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go new file mode 100644 index 0000000..bb0e83c --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint8 Value +type uint8Value uint8 + +func newUint8Value(val uint8, p *uint8) *uint8Value { + *p = val + return (*uint8Value)(p) +} + +func (i *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + *i = uint8Value(v) + return err +} + +func (i *uint8Value) Type() string { + return "uint8" +} + +func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 8) + if err != nil { + return 0, err + } + return uint8(v), nil +} + +// GetUint8 return the uint8 value of a flag with the given name +func (f *FlagSet) GetUint8(name string) (uint8, error) { + val, err := f.getFlagType(name, "uint8", uint8Conv) + if err != nil { + return 0, err + } + return val.(uint8), nil +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func Uint8Var(p *uint8, name string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, "", value, usage) + return p +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, shorthand, value, usage) + return p +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func Uint8(name string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, "", value, usage) +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go new file mode 100644 index 0000000..edd94c6 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint_slice.go @@ -0,0 +1,126 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- uintSlice Value +type uintSliceValue struct { + value *[]uint + changed bool +} + +func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { + uisv := new(uintSliceValue) + uisv.value = p + *uisv.value = val + return uisv +} + +func (s *uintSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]uint, len(ss)) + for i, d := range ss { + u, err := strconv.ParseUint(d, 10, 0) + if err != nil { + return err + } + out[i] = uint(u) + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *uintSliceValue) Type() string { + return "uintSlice" +} + +func (s *uintSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func uintSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []uint{}, nil + } + ss := strings.Split(val, ",") + out := make([]uint, len(ss)) + for i, d := range ss { + u, err := strconv.ParseUint(d, 10, 0) + if err != nil { + return nil, err + } + out[i] = uint(u) + } + return out, nil +} + +// GetUintSlice returns the []uint value of a flag with the given name. +func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { + val, err := f.getFlagType(name, "uintSlice", uintSliceConv) + if err != nil { + return []uint{}, err + } + return val.([]uint), nil +} + +// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. +// The argument p points to a []uint variable in which to store the value of the flag. +func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { + f.VarP(newUintSliceValue(value, p), name, "", usage) +} + +// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { + f.VarP(newUintSliceValue(value, p), name, shorthand, usage) +} + +// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. +// The argument p points to a uint[] variable in which to store the value of the flag. +func UintSliceVar(p *[]uint, name string, value []uint, usage string) { + CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) +} + +// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. +func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { + CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) +} + +// UintSlice defines a []uint flag with specified name, default value, and usage string. +// The return value is the address of a []uint variable that stores the value of the flag. +func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { + p := []uint{} + f.UintSliceVarP(&p, name, "", value, usage) + return &p +} + +// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { + p := []uint{} + f.UintSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// UintSlice defines a []uint flag with specified name, default value, and usage string. +// The return value is the address of a []uint variable that stores the value of the flag. +func UintSlice(name string, value []uint, usage string) *[]uint { + return CommandLine.UintSliceP(name, "", value, usage) +} + +// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. +func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { + return CommandLine.UintSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint_slice_test.go b/vendor/github.com/spf13/pflag/uint_slice_test.go new file mode 100644 index 0000000..db1a19d --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint_slice_test.go @@ -0,0 +1,161 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpUISFlagSet(uisp *[]uint) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.UintSliceVar(uisp, "uis", []uint{}, "Command separated list!") + return f +} + +func setUpUISFlagSetWithDefault(uisp *[]uint) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.UintSliceVar(uisp, "uis", []uint{0, 1}, "Command separated list!") + return f +} + +func TestEmptyUIS(t *testing.T) { + var uis []uint + f := setUpUISFlagSet(&uis) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getUIS, err := f.GetUintSlice("uis") + if err != nil { + t.Fatal("got an error from GetUintSlice():", err) + } + if len(getUIS) != 0 { + t.Fatalf("got is %v with len=%d but expected length=0", getUIS, len(getUIS)) + } +} + +func TestUIS(t *testing.T) { + var uis []uint + f := setUpUISFlagSet(&uis) + + vals := []string{"1", "2", "4", "3"} + arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range uis { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %s but got %d", i, vals[i], v) + } + } + getUIS, err := f.GetUintSlice("uis") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getUIS { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %s but got: %d from GetUintSlice", i, vals[i], v) + } + } +} + +func TestUISDefault(t *testing.T) { + var uis []uint + f := setUpUISFlagSetWithDefault(&uis) + + vals := []string{"0", "1"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range uis { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expect uis[%d] to be %d but got: %d", i, u, v) + } + } + + getUIS, err := f.GetUintSlice("uis") + if err != nil { + t.Fatal("got an error from GetUintSlice():", err) + } + for i, v := range getUIS { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) + } + } +} + +func TestUISWithDefault(t *testing.T) { + var uis []uint + f := setUpUISFlagSetWithDefault(&uis) + + vals := []string{"1", "2"} + arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range uis { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) + } + } + + getUIS, err := f.GetUintSlice("uis") + if err != nil { + t.Fatal("got an error from GetUintSlice():", err) + } + for i, v := range getUIS { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) + } + } +} + +func TestUISCalledTwice(t *testing.T) { + var uis []uint + f := setUpUISFlagSet(&uis) + + in := []string{"1,2", "3"} + expected := []int{1, 2, 3} + argfmt := "--uis=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range uis { + if uint(expected[i]) != v { + t.Fatalf("expected uis[%d] to be %d but got: %d", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/verify/all.sh b/vendor/github.com/spf13/pflag/verify/all.sh new file mode 100755 index 0000000..739f89c --- /dev/null +++ b/vendor/github.com/spf13/pflag/verify/all.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(dirname "${BASH_SOURCE}")/.. + +# Some useful colors. +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + declare -r color_red="${color_start}0;31m" + declare -r color_yellow="${color_start}0;33m" + declare -r color_green="${color_start}0;32m" + declare -r color_norm="${color_start}0m" +fi + +SILENT=true + +function is-excluded { + for e in $EXCLUDE; do + if [[ $1 -ef ${BASH_SOURCE} ]]; then + return + fi + if [[ $1 -ef "$ROOT/hack/$e" ]]; then + return + fi + done + return 1 +} + +while getopts ":v" opt; do + case $opt in + v) + SILENT=false + ;; + \?) + echo "Invalid flag: -$OPTARG" >&2 + exit 1 + ;; + esac +done + +if $SILENT ; then + echo "Running in the silent mode, run with -v if you want to see script logs." +fi + +EXCLUDE="all.sh" + +ret=0 +for t in `ls $ROOT/verify/*.sh` +do + if is-excluded $t ; then + echo "Skipping $t" + continue + fi + if $SILENT ; then + echo -e "Verifying $t" + if bash "$t" &> /dev/null; then + echo -e "${color_green}SUCCESS${color_norm}" + else + echo -e "${color_red}FAILED${color_norm}" + ret=1 + fi + else + bash "$t" || ret=1 + fi +done +exit $ret diff --git a/vendor/github.com/spf13/pflag/verify/gofmt.sh b/vendor/github.com/spf13/pflag/verify/gofmt.sh new file mode 100755 index 0000000..f66acf8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/verify/gofmt.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(dirname "${BASH_SOURCE}")/.. + +pushd "${ROOT}" > /dev/null + +GOFMT=${GOFMT:-"gofmt"} +bad_files=$(find . -name '*.go' | xargs $GOFMT -s -l) +if [[ -n "${bad_files}" ]]; then + echo "!!! '$GOFMT' needs to be run on the following files: " + echo "${bad_files}" + exit 1 +fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/vendor/github.com/spf13/pflag/verify/golint.sh b/vendor/github.com/spf13/pflag/verify/golint.sh new file mode 100755 index 0000000..685c177 --- /dev/null +++ b/vendor/github.com/spf13/pflag/verify/golint.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +ROOT=$(dirname "${BASH_SOURCE}")/.. +GOLINT=${GOLINT:-"golint"} + +pushd "${ROOT}" > /dev/null + bad_files=$($GOLINT -min_confidence=0.9 ./...) + if [[ -n "${bad_files}" ]]; then + echo "!!! '$GOLINT' problems: " + echo "${bad_files}" + exit 1 + fi +popd > /dev/null + +# ex: ts=2 sw=2 et filetype=sh diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes new file mode 100644 index 0000000..d2f212e --- /dev/null +++ b/vendor/golang.org/x/net/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore new file mode 100644 index 0000000..8339fd6 --- /dev/null +++ b/vendor/golang.org/x/net/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md new file mode 100644 index 0000000..d0485e8 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/README.md b/vendor/golang.org/x/net/README.md new file mode 100644 index 0000000..00a9b6e --- /dev/null +++ b/vendor/golang.org/x/net/README.md @@ -0,0 +1,16 @@ +# Go Networking + +This repository holds supplementary Go networking libraries. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/net`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/net`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit +changes to this repository, see https://golang.org/doc/contribute.html. +The main issue tracker for the net repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/net:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go new file mode 100644 index 0000000..15e21b1 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/asm.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go new file mode 100644 index 0000000..b89ca35 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/constants.go @@ -0,0 +1,218 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // extOffset is the negative maximum number of instructions used + // to load instructions by overloading the K argument. + extOffset = -0x1000 + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto Extension = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType Extension = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset Extension = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex Extension = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr Extension = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested Extension = 16 + // ExtMark returns the packet's mark value. + ExtMark Extension = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue Extension = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType Extension = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash Extension = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID Extension = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag Extension = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent Extension = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto Extension = 60 + // ExtRand returns a uniformly random uint32. + ExtRand Extension = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU + opMaskOperandSrc = 0x08 + opMaskOperator = 0xf0 + // opClsJump + opMaskJumpConst = 0x0f + opMaskJumpCond = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operator defined by ALUOp* + +const ( + opALUSrcConstant uint16 = iota << 3 + opALUSrcX +) + +const ( + opJumpAlways = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go new file mode 100644 index 0000000..ae62feb --- /dev/null +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine, and provides a Go implementation +of the virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go new file mode 100644 index 0000000..f9dc0e8 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -0,0 +1,704 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + if ri.K > extOffset+0xffffffff { + return LoadExtension{Num: Extension(-extOffset + ri.K)} + } + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + if ri.Op&opMaskOperandSrc != 0 { + return ALUOpX{Op: op} + } + return ALUOpConstant{Op: op, Val: ri.K} + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + if ri.Op&opMaskJumpConst != opClsJump { + return ri + } + switch ri.Op & opMaskJumpCond { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpNotEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGT: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessOrEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpGreaterThan, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGE: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessThan, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpGreaterOrEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpSet: + return JumpIf{ + Cond: JumpBitsSet, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// String returns the instruction in assembler notation. +func (a LoadConstant) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld #%d", a.Val) + case RegX: + return fmt.Sprintf("ldx #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// String returns the instruction in assembler notation. +func (a LoadScratch) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld M[%d]", a.N) + case RegX: + return fmt.Sprintf("ldx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadAbsolute) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [%d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [%d]", a.Off) + case 4: // word + if a.Off > extOffset+0xffffffff { + return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() + } + return fmt.Sprintf("ld [%d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadIndirect) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [x + %d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [x + %d]", a.Off) + case 4: // word + return fmt.Sprintf("ld [x + %d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadMemShift) String() string { + return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) +} + +// String returns the instruction in assembler notation. +func (a LoadExtension) String() string { + switch a.Num { + case ExtLen: + return "ld #len" + case ExtProto: + return "ld #proto" + case ExtType: + return "ld #type" + case ExtPayloadOffset: + return "ld #poff" + case ExtInterfaceIndex: + return "ld #ifidx" + case ExtNetlinkAttr: + return "ld #nla" + case ExtNetlinkAttrNested: + return "ld #nlan" + case ExtMark: + return "ld #mark" + case ExtQueue: + return "ld #queue" + case ExtLinkLayerType: + return "ld #hatype" + case ExtRXHash: + return "ld #rxhash" + case ExtCPUID: + return "ld #cpu" + case ExtVLANTag: + return "ld #vlan_tci" + case ExtVLANTagPresent: + return "ld #vlan_avail" + case ExtVLANProto: + return "ld #vlan_tpid" + case ExtRand: + return "ld #rand" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// String returns the instruction in assembler notation. +func (a StoreScratch) String() string { + switch a.Src { + case RegA: + return fmt.Sprintf("st M[%d]", a.N) + case RegX: + return fmt.Sprintf("stx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcConstant | uint16(a.Op), + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpConstant) String() string { + switch a.Op { + case ALUOpAdd: + return fmt.Sprintf("add #%d", a.Val) + case ALUOpSub: + return fmt.Sprintf("sub #%d", a.Val) + case ALUOpMul: + return fmt.Sprintf("mul #%d", a.Val) + case ALUOpDiv: + return fmt.Sprintf("div #%d", a.Val) + case ALUOpMod: + return fmt.Sprintf("mod #%d", a.Val) + case ALUOpAnd: + return fmt.Sprintf("and #%d", a.Val) + case ALUOpOr: + return fmt.Sprintf("or #%d", a.Val) + case ALUOpXor: + return fmt.Sprintf("xor #%d", a.Val) + case ALUOpShiftLeft: + return fmt.Sprintf("lsh #%d", a.Val) + case ALUOpShiftRight: + return fmt.Sprintf("rsh #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcX | uint16(a.Op), + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpX) String() string { + switch a.Op { + case ALUOpAdd: + return "add x" + case ALUOpSub: + return "sub x" + case ALUOpMul: + return "mul x" + case ALUOpDiv: + return "div x" + case ALUOpMod: + return "mod x" + case ALUOpAnd: + return "and x" + case ALUOpOr: + return "or x" + case ALUOpXor: + return "xor x" + case ALUOpShiftLeft: + return "lsh x" + case ALUOpShiftRight: + return "rsh x" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// String returns the instruction in assembler notation. +func (a NegateA) String() string { + return fmt.Sprintf("neg") +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | opJumpAlways, + K: a.Skip, + }, nil +} + +// String returns the instruction in assembler notation. +func (a Jump) String() string { + return fmt.Sprintf("ja %d", a.Skip) +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + var ( + cond uint16 + flip bool + ) + switch a.Cond { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond) + } + jt, jf := a.SkipTrue, a.SkipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | cond, + Jt: jt, + Jf: jf, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a JumpIf) String() string { + switch a.Cond { + // K == A + case JumpEqual: + return conditionalJump(a, "jeq", "jneq") + // K != A + case JumpNotEqual: + return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue) + // K > A + case JumpGreaterThan: + return conditionalJump(a, "jgt", "jle") + // K < A + case JumpLessThan: + return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue) + // K >= A + case JumpGreaterOrEqual: + return conditionalJump(a, "jge", "jlt") + // K <= A + case JumpLessOrEqual: + return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue) + // K & A != 0 + case JumpBitsSet: + if a.SkipFalse > 0 { + return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse) + } + return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue) + // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips + case JumpBitsNotSet: + return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String() + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string { + if inst.SkipTrue > 0 { + if inst.SkipFalse > 0 { + return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse) + } + return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue) + } + return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse) +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetA) String() string { + return fmt.Sprintf("ret a") +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetConstant) String() string { + return fmt.Sprintf("ret #%d", a.Val) +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TXA) String() string { + return fmt.Sprintf("txa") +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TAX) String() string { + return fmt.Sprintf("tax") +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff --git a/vendor/golang.org/x/net/bpf/instructions_test.go b/vendor/golang.org/x/net/bpf/instructions_test.go new file mode 100644 index 0000000..dde474a --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions_test.go @@ -0,0 +1,525 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "fmt" + "io/ioutil" + "reflect" + "strconv" + "strings" + "testing" +) + +// This is a direct translation of the program in +// testdata/all_instructions.txt. +var allInstructions = []Instruction{ + LoadConstant{Dst: RegA, Val: 42}, + LoadConstant{Dst: RegX, Val: 42}, + + LoadScratch{Dst: RegA, N: 3}, + LoadScratch{Dst: RegX, N: 3}, + + LoadAbsolute{Off: 42, Size: 1}, + LoadAbsolute{Off: 42, Size: 2}, + LoadAbsolute{Off: 42, Size: 4}, + + LoadIndirect{Off: 42, Size: 1}, + LoadIndirect{Off: 42, Size: 2}, + LoadIndirect{Off: 42, Size: 4}, + + LoadMemShift{Off: 42}, + + LoadExtension{Num: ExtLen}, + LoadExtension{Num: ExtProto}, + LoadExtension{Num: ExtType}, + LoadExtension{Num: ExtRand}, + + StoreScratch{Src: RegA, N: 3}, + StoreScratch{Src: RegX, N: 3}, + + ALUOpConstant{Op: ALUOpAdd, Val: 42}, + ALUOpConstant{Op: ALUOpSub, Val: 42}, + ALUOpConstant{Op: ALUOpMul, Val: 42}, + ALUOpConstant{Op: ALUOpDiv, Val: 42}, + ALUOpConstant{Op: ALUOpOr, Val: 42}, + ALUOpConstant{Op: ALUOpAnd, Val: 42}, + ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, + ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, + ALUOpConstant{Op: ALUOpMod, Val: 42}, + ALUOpConstant{Op: ALUOpXor, Val: 42}, + + ALUOpX{Op: ALUOpAdd}, + ALUOpX{Op: ALUOpSub}, + ALUOpX{Op: ALUOpMul}, + ALUOpX{Op: ALUOpDiv}, + ALUOpX{Op: ALUOpOr}, + ALUOpX{Op: ALUOpAnd}, + ALUOpX{Op: ALUOpShiftLeft}, + ALUOpX{Op: ALUOpShiftRight}, + ALUOpX{Op: ALUOpMod}, + ALUOpX{Op: ALUOpXor}, + + NegateA{}, + + Jump{Skip: 10}, + JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, + JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, + JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, + JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, + JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, + JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, + JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + + TAX{}, + TXA{}, + + RetA{}, + RetConstant{Val: 42}, +} +var allInstructionsExpected = "testdata/all_instructions.bpf" + +// Check that we produce the same output as the canonical bpf_asm +// linux kernel tool. +func TestInterop(t *testing.T) { + out, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(out)) + + bs, err := ioutil.ReadFile(allInstructionsExpected) + if err != nil { + t.Fatalf("reading %s: %s", allInstructionsExpected, err) + } + // First statement is the number of statements, last statement is + // empty. We just ignore both and rely on slice length. + stmts := strings.Split(string(bs), ",") + if len(stmts)-2 != len(out) { + t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions)) + } + + for i, stmt := range stmts[1 : len(stmts)-2] { + nums := strings.Split(stmt, " ") + if len(nums) != 4 { + t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt) + } + + actual := out[i] + + op, err := strconv.ParseUint(nums[0], 10, 16) + if err != nil { + t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected) + } + if actual.Op != uint16(op) { + t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op) + } + + jt, err := strconv.ParseUint(nums[1], 10, 8) + if err != nil { + t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected) + } + if actual.Jt != uint8(jt) { + t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt) + } + + jf, err := strconv.ParseUint(nums[2], 10, 8) + if err != nil { + t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected) + } + if actual.Jf != uint8(jf) { + t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf) + } + + k, err := strconv.ParseUint(nums[3], 10, 32) + if err != nil { + t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected) + } + if actual.K != uint32(k) { + t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k) + } + } +} + +// Check that assembly and disassembly match each other. +func TestAsmDisasm(t *testing.T) { + prog1, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(prog1)) + + got, allDecoded := Disassemble(prog1) + if !allDecoded { + t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:") + for i, inst := range got { + if r, ok := inst.(RawInstruction); ok { + t.Logf(" insn %d, %#v --> %#v", i+1, allInstructions[i], r) + } + } + } + + if len(allInstructions) != len(got) { + t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(allInstructions), len(got)) + } + if !reflect.DeepEqual(allInstructions, got) { + t.Errorf("program mutated by disassembly:") + for i := range got { + if !reflect.DeepEqual(allInstructions[i], got[i]) { + t.Logf(" insn %d, s: %#v, p1: %#v, got: %#v", i+1, allInstructions[i], prog1[i], got[i]) + } + } + } +} + +type InvalidInstruction struct{} + +func (a InvalidInstruction) Assemble() (RawInstruction, error) { + return RawInstruction{}, fmt.Errorf("Invalid Instruction") +} + +func (a InvalidInstruction) String() string { + return fmt.Sprintf("unknown instruction: %#v", a) +} + +func TestString(t *testing.T) { + testCases := []struct { + instruction Instruction + assembler string + }{ + { + instruction: LoadConstant{Dst: RegA, Val: 42}, + assembler: "ld #42", + }, + { + instruction: LoadConstant{Dst: RegX, Val: 42}, + assembler: "ldx #42", + }, + { + instruction: LoadConstant{Dst: 0xffff, Val: 42}, + assembler: "unknown instruction: bpf.LoadConstant{Dst:0xffff, Val:0x2a}", + }, + { + instruction: LoadScratch{Dst: RegA, N: 3}, + assembler: "ld M[3]", + }, + { + instruction: LoadScratch{Dst: RegX, N: 3}, + assembler: "ldx M[3]", + }, + { + instruction: LoadScratch{Dst: 0xffff, N: 3}, + assembler: "unknown instruction: bpf.LoadScratch{Dst:0xffff, N:3}", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 1}, + assembler: "ldb [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 2}, + assembler: "ldh [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 4}, + assembler: "ld [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: -1}, + assembler: "unknown instruction: bpf.LoadAbsolute{Off:0x2a, Size:-1}", + }, + { + instruction: LoadIndirect{Off: 42, Size: 1}, + assembler: "ldb [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: 2}, + assembler: "ldh [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: 4}, + assembler: "ld [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: -1}, + assembler: "unknown instruction: bpf.LoadIndirect{Off:0x2a, Size:-1}", + }, + { + instruction: LoadMemShift{Off: 42}, + assembler: "ldx 4*([42]&0xf)", + }, + { + instruction: LoadExtension{Num: ExtLen}, + assembler: "ld #len", + }, + { + instruction: LoadExtension{Num: ExtProto}, + assembler: "ld #proto", + }, + { + instruction: LoadExtension{Num: ExtType}, + assembler: "ld #type", + }, + { + instruction: LoadExtension{Num: ExtPayloadOffset}, + assembler: "ld #poff", + }, + { + instruction: LoadExtension{Num: ExtInterfaceIndex}, + assembler: "ld #ifidx", + }, + { + instruction: LoadExtension{Num: ExtNetlinkAttr}, + assembler: "ld #nla", + }, + { + instruction: LoadExtension{Num: ExtNetlinkAttrNested}, + assembler: "ld #nlan", + }, + { + instruction: LoadExtension{Num: ExtMark}, + assembler: "ld #mark", + }, + { + instruction: LoadExtension{Num: ExtQueue}, + assembler: "ld #queue", + }, + { + instruction: LoadExtension{Num: ExtLinkLayerType}, + assembler: "ld #hatype", + }, + { + instruction: LoadExtension{Num: ExtRXHash}, + assembler: "ld #rxhash", + }, + { + instruction: LoadExtension{Num: ExtCPUID}, + assembler: "ld #cpu", + }, + { + instruction: LoadExtension{Num: ExtVLANTag}, + assembler: "ld #vlan_tci", + }, + { + instruction: LoadExtension{Num: ExtVLANTagPresent}, + assembler: "ld #vlan_avail", + }, + { + instruction: LoadExtension{Num: ExtVLANProto}, + assembler: "ld #vlan_tpid", + }, + { + instruction: LoadExtension{Num: ExtRand}, + assembler: "ld #rand", + }, + { + instruction: LoadAbsolute{Off: 0xfffff038, Size: 4}, + assembler: "ld #rand", + }, + { + instruction: LoadExtension{Num: 0xfff}, + assembler: "unknown instruction: bpf.LoadExtension{Num:4095}", + }, + { + instruction: StoreScratch{Src: RegA, N: 3}, + assembler: "st M[3]", + }, + { + instruction: StoreScratch{Src: RegX, N: 3}, + assembler: "stx M[3]", + }, + { + instruction: StoreScratch{Src: 0xffff, N: 3}, + assembler: "unknown instruction: bpf.StoreScratch{Src:0xffff, N:3}", + }, + { + instruction: ALUOpConstant{Op: ALUOpAdd, Val: 42}, + assembler: "add #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpSub, Val: 42}, + assembler: "sub #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpMul, Val: 42}, + assembler: "mul #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpDiv, Val: 42}, + assembler: "div #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpOr, Val: 42}, + assembler: "or #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpAnd, Val: 42}, + assembler: "and #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, + assembler: "lsh #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, + assembler: "rsh #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpMod, Val: 42}, + assembler: "mod #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpXor, Val: 42}, + assembler: "xor #42", + }, + { + instruction: ALUOpConstant{Op: 0xffff, Val: 42}, + assembler: "unknown instruction: bpf.ALUOpConstant{Op:0xffff, Val:0x2a}", + }, + { + instruction: ALUOpX{Op: ALUOpAdd}, + assembler: "add x", + }, + { + instruction: ALUOpX{Op: ALUOpSub}, + assembler: "sub x", + }, + { + instruction: ALUOpX{Op: ALUOpMul}, + assembler: "mul x", + }, + { + instruction: ALUOpX{Op: ALUOpDiv}, + assembler: "div x", + }, + { + instruction: ALUOpX{Op: ALUOpOr}, + assembler: "or x", + }, + { + instruction: ALUOpX{Op: ALUOpAnd}, + assembler: "and x", + }, + { + instruction: ALUOpX{Op: ALUOpShiftLeft}, + assembler: "lsh x", + }, + { + instruction: ALUOpX{Op: ALUOpShiftRight}, + assembler: "rsh x", + }, + { + instruction: ALUOpX{Op: ALUOpMod}, + assembler: "mod x", + }, + { + instruction: ALUOpX{Op: ALUOpXor}, + assembler: "xor x", + }, + { + instruction: ALUOpX{Op: 0xffff}, + assembler: "unknown instruction: bpf.ALUOpX{Op:0xffff}", + }, + { + instruction: NegateA{}, + assembler: "neg", + }, + { + instruction: Jump{Skip: 10}, + assembler: "ja 10", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, + assembler: "jeq #42,8,9", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8}, + assembler: "jeq #42,8", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipFalse: 8}, + assembler: "jneq #42,8", + }, + { + instruction: JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, + assembler: "jneq #42,8", + }, + { + instruction: JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, + assembler: "jlt #42,7", + }, + { + instruction: JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, + assembler: "jle #42,6", + }, + { + instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, + assembler: "jgt #42,4,5", + }, + { + instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4}, + assembler: "jgt #42,4", + }, + { + instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, + assembler: "jge #42,3,4", + }, + { + instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3}, + assembler: "jge #42,3", + }, + { + instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + assembler: "jset #42,2,3", + }, + { + instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2}, + assembler: "jset #42,2", + }, + { + instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + assembler: "jset #42,3,2", + }, + { + instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2}, + assembler: "jset #42,0,2", + }, + { + instruction: JumpIf{Cond: 0xffff, Val: 42, SkipTrue: 1, SkipFalse: 2}, + assembler: "unknown instruction: bpf.JumpIf{Cond:0xffff, Val:0x2a, SkipTrue:0x1, SkipFalse:0x2}", + }, + { + instruction: TAX{}, + assembler: "tax", + }, + { + instruction: TXA{}, + assembler: "txa", + }, + { + instruction: RetA{}, + assembler: "ret a", + }, + { + instruction: RetConstant{Val: 42}, + assembler: "ret #42", + }, + // Invalid instruction + { + instruction: InvalidInstruction{}, + assembler: "unknown instruction: bpf.InvalidInstruction{}", + }, + } + + for _, testCase := range testCases { + if input, ok := testCase.instruction.(fmt.Stringer); ok { + got := input.String() + if got != testCase.assembler { + t.Errorf("String did not return expected assembler notation, expected: %s, got: %s", testCase.assembler, got) + } + } else { + t.Errorf("Instruction %#v is not a fmt.Stringer", testCase.instruction) + } + } +} diff --git a/vendor/golang.org/x/net/bpf/setter.go b/vendor/golang.org/x/net/bpf/setter.go new file mode 100644 index 0000000..43e35f0 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/setter.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Setter is a type which can attach a compiled BPF filter to itself. +type Setter interface { + SetBPF(filter []RawInstruction) error +} diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf new file mode 100644 index 0000000..f871440 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf @@ -0,0 +1 @@ +50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0, diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt new file mode 100644 index 0000000..3045501 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt @@ -0,0 +1,79 @@ +# This filter is compiled to all_instructions.bpf by the `bpf_asm` +# tool, which can be found in the linux kernel source tree under +# tools/net. + +# Load immediate +ld #42 +ldx #42 + +# Load scratch +ld M[3] +ldx M[3] + +# Load absolute +ldb [42] +ldh [42] +ld [42] + +# Load indirect +ldb [x + 42] +ldh [x + 42] +ld [x + 42] + +# Load IPv4 header length +ldx 4*([42]&0xf) + +# Run extension function +ld #len +ld #proto +ld #type +ld #rand + +# Store scratch +st M[3] +stx M[3] + +# A constant +add #42 +sub #42 +mul #42 +div #42 +or #42 +and #42 +lsh #42 +rsh #42 +mod #42 +xor #42 + +# A X +add x +sub x +mul x +div x +or x +and x +lsh x +rsh x +mod x +xor x + +# !A +neg + +# Jumps +ja end +jeq #42,prev,end +jne #42,end +jlt #42,end +jle #42,end +jgt #42,prev,end +jge #42,prev,end +jset #42,prev,end + +# Register transfers +tax +txa + +# Returns +prev: ret a +end: ret #42 diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go new file mode 100644 index 0000000..4c656f1 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm.go @@ -0,0 +1,140 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "errors" + "fmt" +) + +// A VM is an emulated BPF virtual machine. +type VM struct { + filter []Instruction +} + +// NewVM returns a new VM using the input BPF program. +func NewVM(filter []Instruction) (*VM, error) { + if len(filter) == 0 { + return nil, errors.New("one or more Instructions must be specified") + } + + for i, ins := range filter { + check := len(filter) - (i + 1) + switch ins := ins.(type) { + // Check for out-of-bounds jumps in instructions + case Jump: + if check <= int(ins.Skip) { + return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) + } + case JumpIf: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + // Check for division or modulus by zero + case ALUOpConstant: + if ins.Val != 0 { + break + } + + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return nil, errors.New("cannot divide by zero using ALUOpConstant") + } + // Check for unknown extensions + case LoadExtension: + switch ins.Num { + case ExtLen: + default: + return nil, fmt.Errorf("extension %d not implemented", ins.Num) + } + } + } + + // Make sure last instruction is a return instruction + switch filter[len(filter)-1].(type) { + case RetA, RetConstant: + default: + return nil, errors.New("BPF program must end with RetA or RetConstant") + } + + // Though our VM works using disassembled instructions, we + // attempt to assemble the input filter anyway to ensure it is compatible + // with an operating system VM. + _, err := Assemble(filter) + + return &VM{ + filter: filter, + }, err +} + +// Run runs the VM's BPF program against the input bytes. +// Run returns the number of bytes accepted by the BPF program, and any errors +// which occurred while processing the program. +func (v *VM) Run(in []byte) (int, error) { + var ( + // Registers of the virtual machine + regA uint32 + regX uint32 + regScratch [16]uint32 + + // OK is true if the program should continue processing the next + // instruction, or false if not, causing the loop to break + ok = true + ) + + // TODO(mdlayher): implement: + // - NegateA: + // - would require a change from uint32 registers to int32 + // registers + + // TODO(mdlayher): add interop tests that check signedness of ALU + // operations against kernel implementation, and make sure Go + // implementation matches behavior + + for i := 0; i < len(v.filter) && ok; i++ { + ins := v.filter[i] + + switch ins := ins.(type) { + case ALUOpConstant: + regA = aluOpConstant(ins, regA) + case ALUOpX: + regA, ok = aluOpX(ins, regA, regX) + case Jump: + i += int(ins.Skip) + case JumpIf: + jump := jumpIf(ins, regA) + i += jump + case LoadAbsolute: + regA, ok = loadAbsolute(ins, in) + case LoadConstant: + regA, regX = loadConstant(ins, regA, regX) + case LoadExtension: + regA = loadExtension(ins, in) + case LoadIndirect: + regA, ok = loadIndirect(ins, in, regX) + case LoadMemShift: + regX, ok = loadMemShift(ins, in) + case LoadScratch: + regA, regX = loadScratch(ins, regScratch, regA, regX) + case RetA: + return int(regA), nil + case RetConstant: + return int(ins.Val), nil + case StoreScratch: + regScratch = storeScratch(ins, regScratch, regA, regX) + case TAX: + regX = regA + case TXA: + regA = regX + default: + return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) + } + } + + return 0, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_aluop_test.go b/vendor/golang.org/x/net/bpf/vm_aluop_test.go new file mode 100644 index 0000000..1667824 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_aluop_test.go @@ -0,0 +1,512 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMALUOpAdd(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAdd, + Val: 3, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 8, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 3, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpSub(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + bpf.ALUOpX{ + Op: bpf.ALUOpSub, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpMul(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMul, + Val: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 6, 2, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpDiv(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpDiv, + Val: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 20, 2, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpDivByZeroALUOpConstant(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.ALUOpConstant{ + Op: bpf.ALUOpDiv, + Val: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot divide by zero using ALUOpConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMALUOpDivByZeroALUOpX(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 0 into X + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + // Load byte 1 into A + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Attempt to perform 1/0 + bpf.ALUOpX{ + Op: bpf.ALUOpDiv, + }, + // Return 4 bytes if program does not terminate + bpf.LoadConstant{ + Val: 12, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpOr(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpOr, + Val: 0x01, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x10, 0x03, 0x04, + 0x05, 0x06, 0x07, 0x08, + 0x09, 0xff, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 9, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpAnd(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAnd, + Val: 0x0019, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xaa, 0x09, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpShiftLeft(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpShiftLeft, + Val: 0x01, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x02, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0xaa, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpShiftRight(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpShiftRight, + Val: 0x01, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x04, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x08, 0xff, 0xff, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpMod(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMod, + Val: 20, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 30, 0, 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpModByZeroALUOpConstant(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMod, + Val: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot divide by zero using ALUOpConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMALUOpModByZeroALUOpX(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 0 into X + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + // Load byte 1 into A + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Attempt to perform 1%0 + bpf.ALUOpX{ + Op: bpf.ALUOpMod, + }, + // Return 4 bytes if program does not terminate + bpf.LoadConstant{ + Val: 12, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpXor(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpXor, + Val: 0x0a, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x01, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x00, 0x00, 0x00, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpUnknown(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAdd, + Val: 1, + }, + // Verify that an unknown operation is a no-op + bpf.ALUOpConstant{ + Op: 100, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x02, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/vendor/golang.org/x/net/bpf/vm_bpf_test.go new file mode 100644 index 0000000..77fa8fe --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_bpf_test.go @@ -0,0 +1,192 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +// A virtualMachine is a BPF virtual machine which can process an +// input packet against a BPF program and render a verdict. +type virtualMachine interface { + Run(in []byte) (int, error) +} + +// canUseOSVM indicates if the OS BPF VM is available on this platform. +func canUseOSVM() bool { + // OS BPF VM can only be used on platforms where x/net/ipv4 supports + // attaching a BPF program to a socket. + switch runtime.GOOS { + case "linux": + return true + } + + return false +} + +// All BPF tests against both the Go VM and OS VM are assumed to +// be used with a UDP socket. As a result, the entire contents +// of a UDP datagram is sent through the BPF program, but only +// the body after the UDP header will ever be returned in output. + +// testVM sets up a Go BPF VM, and if available, a native OS BPF VM +// for integration testing. +func testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) { + goVM, err := bpf.NewVM(filter) + if err != nil { + // Some tests expect an error, so this error must be returned + // instead of fatally exiting the test + return nil, nil, err + } + + mvm := &multiVirtualMachine{ + goVM: goVM, + + t: t, + } + + // If available, add the OS VM for tests which verify that both the Go + // VM and OS VM have exactly the same output for the same input program + // and packet. + done := func() {} + if canUseOSVM() { + osVM, osVMDone := testOSVM(t, filter) + done = func() { osVMDone() } + mvm.osVM = osVM + } + + return mvm, done, nil +} + +// udpHeaderLen is the length of a UDP header. +const udpHeaderLen = 8 + +// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM +// and the native OS VM, if the OS VM is available. +type multiVirtualMachine struct { + goVM virtualMachine + osVM virtualMachine + + t *testing.T +} + +func (mvm *multiVirtualMachine) Run(in []byte) (int, error) { + if len(in) < udpHeaderLen { + mvm.t.Fatalf("input must be at least length of UDP header (%d), got: %d", + udpHeaderLen, len(in)) + } + + // All tests have a UDP header as part of input, because the OS VM + // packets always will. For the Go VM, this output is trimmed before + // being sent back to tests. + goOut, goErr := mvm.goVM.Run(in) + if goOut >= udpHeaderLen { + goOut -= udpHeaderLen + } + + // If Go output is larger than the size of the packet, packet filtering + // interop tests must trim the output bytes to the length of the packet. + // The BPF VM should not do this on its own, as other uses of it do + // not trim the output byte count. + trim := len(in) - udpHeaderLen + if goOut > trim { + goOut = trim + } + + // When the OS VM is not available, process using the Go VM alone + if mvm.osVM == nil { + return goOut, goErr + } + + // The OS VM will apply its own UDP header, so remove the pseudo header + // that the Go VM needs. + osOut, err := mvm.osVM.Run(in[udpHeaderLen:]) + if err != nil { + mvm.t.Fatalf("error while running OS VM: %v", err) + } + + // Verify both VMs return same number of bytes + var mismatch bool + if goOut != osOut { + mismatch = true + mvm.t.Logf("output byte count does not match:\n- go: %v\n- os: %v", goOut, osOut) + } + + if mismatch { + mvm.t.Fatal("Go BPF and OS BPF packet outputs do not match") + } + + return goOut, goErr +} + +// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for +// processing BPF programs. +type osVirtualMachine struct { + l net.PacketConn + s net.Conn +} + +// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting +// packets into a UDP listener with a BPF program attached to it. +func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) { + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to open OS VM UDP listener: %v", err) + } + + prog, err := bpf.Assemble(filter) + if err != nil { + t.Fatalf("failed to compile BPF program: %v", err) + } + + p := ipv4.NewPacketConn(l) + if err = p.SetBPF(prog); err != nil { + t.Fatalf("failed to attach BPF program to listener: %v", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatalf("failed to dial connection to listener: %v", err) + } + + done := func() { + _ = s.Close() + _ = l.Close() + } + + return &osVirtualMachine{ + l: l, + s: s, + }, done +} + +// Run sends the input bytes into the OS's BPF VM and returns its verdict. +func (vm *osVirtualMachine) Run(in []byte) (int, error) { + go func() { + _, _ = vm.s.Write(in) + }() + + vm.l.SetDeadline(time.Now().Add(50 * time.Millisecond)) + + var b [512]byte + n, _, err := vm.l.ReadFrom(b[:]) + if err != nil { + // A timeout indicates that BPF filtered out the packet, and thus, + // no input should be returned. + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + return n, nil + } + + return n, err + } + + return n, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_extension_test.go b/vendor/golang.org/x/net/bpf/vm_extension_test.go new file mode 100644 index 0000000..7a48c82 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_extension_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMLoadExtensionNotImplemented(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadExtension{ + Num: 100, + }, + bpf.RetA{}, + }) + if errStr(err) != "extension 100 not implemented" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadExtensionExtLen(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadExtension{ + Num: bpf.ExtLen, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go new file mode 100644 index 0000000..516f946 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -0,0 +1,174 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "encoding/binary" + "fmt" +) + +func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { + return aluOpCommon(ins.Op, regA, ins.Val) +} + +func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { + // Guard against division or modulus by zero by terminating + // the program, as the OS BPF VM does + if regX == 0 { + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return 0, false + } + } + + return aluOpCommon(ins.Op, regA, regX), true +} + +func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { + switch op { + case ALUOpAdd: + return regA + value + case ALUOpSub: + return regA - value + case ALUOpMul: + return regA * value + case ALUOpDiv: + // Division by zero not permitted by NewVM and aluOpX checks + return regA / value + case ALUOpOr: + return regA | value + case ALUOpAnd: + return regA & value + case ALUOpShiftLeft: + return regA << value + case ALUOpShiftRight: + return regA >> value + case ALUOpMod: + // Modulus by zero not permitted by NewVM and aluOpX checks + return regA % value + case ALUOpXor: + return regA ^ value + default: + return regA + } +} + +func jumpIf(ins JumpIf, value uint32) int { + var ok bool + inV := uint32(ins.Val) + + switch ins.Cond { + case JumpEqual: + ok = value == inV + case JumpNotEqual: + ok = value != inV + case JumpGreaterThan: + ok = value > inV + case JumpLessThan: + ok = value < inV + case JumpGreaterOrEqual: + ok = value >= inV + case JumpLessOrEqual: + ok = value <= inV + case JumpBitsSet: + ok = (value & inV) != 0 + case JumpBitsNotSet: + ok = (value & inV) == 0 + } + + if ok { + return int(ins.SkipTrue) + } + + return int(ins.SkipFalse) +} + +func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { + offset := int(ins.Off) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = ins.Val + case RegX: + regX = ins.Val + } + + return regA, regX +} + +func loadExtension(ins LoadExtension, in []byte) uint32 { + switch ins.Num { + case ExtLen: + return uint32(len(in)) + default: + panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) + } +} + +func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { + offset := int(ins.Off) + int(regX) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { + offset := int(ins.Off) + + if !inBounds(len(in), offset, 0) { + return 0, false + } + + // Mask off high 4 bits and multiply low 4 bits by 4 + return uint32(in[offset]&0x0f) * 4, true +} + +func inBounds(inLen int, offset int, size int) bool { + return offset+size <= inLen +} + +func loadCommon(in []byte, offset int, size int) (uint32, bool) { + if !inBounds(len(in), offset, size) { + return 0, false + } + + switch size { + case 1: + return uint32(in[offset]), true + case 2: + return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true + case 4: + return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true + default: + panic(fmt.Sprintf("invalid load size: %d", size)) + } +} + +func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = regScratch[ins.N] + case RegX: + regX = regScratch[ins.N] + } + + return regA, regX +} + +func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { + switch ins.Src { + case RegA: + regScratch[ins.N] = regA + case RegX: + regScratch[ins.N] = regX + } + + return regScratch +} diff --git a/vendor/golang.org/x/net/bpf/vm_jump_test.go b/vendor/golang.org/x/net/bpf/vm_jump_test.go new file mode 100644 index 0000000..e0a3a98 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_jump_test.go @@ -0,0 +1,380 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMJumpOne(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.Jump{ + Skip: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.Jump{ + Skip: 1, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 1 instructions; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfTrueOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.JumpIf{ + Cond: bpf.JumpEqual, + SkipTrue: 2, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 2 instructions in true case; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfFalseOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.JumpIf{ + Cond: bpf.JumpEqual, + SkipFalse: 3, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 3 instructions in false case; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 1, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfNotEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.JumpIf{ + Cond: bpf.JumpNotEqual, + Val: 1, + SkipFalse: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfGreaterThan(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpGreaterThan, + Val: 0x00010202, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfLessThan(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpLessThan, + Val: 0xff010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfGreaterOrEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpGreaterOrEqual, + Val: 0x00010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfLessOrEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpLessOrEqual, + Val: 0xff010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfBitsSet(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.JumpIf{ + Cond: bpf.JumpBitsSet, + Val: 0x1122, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0x02, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfBitsNotSet(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.JumpIf{ + Cond: bpf.JumpBitsNotSet, + Val: 0x1221, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0x02, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_load_test.go b/vendor/golang.org/x/net/bpf/vm_load_test.go new file mode 100644 index 0000000..04578b6 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_load_test.go @@ -0,0 +1,246 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "net" + "testing" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 100, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Size: 5, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid load byte length 0" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadConstantOK(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegX, + Val: 9, + }, + bpf.TXA{}, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadIndirectOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadIndirect{ + Off: 100, + Size: 1, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadMemShiftOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadMemShift{ + Off: 100, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +const ( + dhcp4Port = 53 +) + +func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) { + vm, in, done := testDHCPv4(t) + defer done() + + // Append mostly empty UDP header with incorrect DHCPv4 port + in = append(in, []byte{ + 0, 0, + 0, dhcp4Port + 1, + 0, 0, + 0, 0, + }...) + + out, err := vm.Run(in) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) { + vm, in, done := testDHCPv4(t) + defer done() + + // Append mostly empty UDP header with correct DHCPv4 port + in = append(in, []byte{ + 0, 0, + 0, dhcp4Port, + 0, 0, + 0, 0, + }...) + + out, err := vm.Run(in) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := len(in)-8, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) { + // DHCPv4 test data courtesy of David Anderson: + // https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70 + vm, done, err := testVM(t, []bpf.Instruction{ + // Load IPv4 packet length + bpf.LoadMemShift{Off: 8}, + // Get UDP dport + bpf.LoadIndirect{Off: 8 + 2, Size: 2}, + // Correct dport? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1}, + // Accept + bpf.RetConstant{Val: 1500}, + // Ignore + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + + // Minimal requirements to make a valid IPv4 header + h := &ipv4.Header{ + Len: ipv4.HeaderLen, + Src: net.IPv4(192, 168, 1, 1), + Dst: net.IPv4(192, 168, 1, 2), + } + hb, err := h.Marshal() + if err != nil { + t.Fatalf("failed to marshal IPv4 header: %v", err) + } + + hb = append([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, hb...) + + return vm, hb, done +} diff --git a/vendor/golang.org/x/net/bpf/vm_ret_test.go b/vendor/golang.org/x/net/bpf/vm_ret_test.go new file mode 100644 index 0000000..2d86eae --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_ret_test.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMRetA(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 9, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetALargerThanInput(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 255, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetConstant(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetConstantLargerThanInput(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.RetConstant{ + Val: 16, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_scratch_test.go b/vendor/golang.org/x/net/bpf/vm_scratch_test.go new file mode 100644 index 0000000..e600e3c --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_scratch_test.go @@ -0,0 +1,247 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: bpf.RegA, + N: -1, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: bpf.RegA, + N: 16, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchUnknownSourceRegister(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: 100, + N: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid source register 100" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: bpf.RegX, + N: -1, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: bpf.RegX, + N: 16, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchUnknownDestinationRegister(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: 100, + N: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid target register 100" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchLoadScratchOneValue(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 255 + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + // Copy to X and store in scratch[0] + bpf.TAX{}, + bpf.StoreScratch{ + Src: bpf.RegX, + N: 0, + }, + // Load byte 1 + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Overwrite 1 with 255 from scratch[0] + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 0, + }, + // Return 255 + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 255, 1, 2, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 3, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 10 + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + // Store in scratch[0] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 0, + }, + // Load byte 20 + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Store in scratch[1] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 1, + }, + // Load byte 30 + bpf.LoadAbsolute{ + Off: 10, + Size: 1, + }, + // Store in scratch[2] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 2, + }, + // Load byte 1 + bpf.LoadAbsolute{ + Off: 11, + Size: 1, + }, + // Store in scratch[3] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 3, + }, + // Load in byte 10 to X + bpf.LoadScratch{ + Dst: bpf.RegX, + N: 0, + }, + // Copy X -> A + bpf.TXA{}, + // Verify value is 10 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 10, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Load in byte 20 to A + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 1, + }, + // Verify value is 20 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 20, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Load in byte 30 to A + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 2, + }, + // Verify value is 30 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 30, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Return first two bytes on success + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 10, 20, 30, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_test.go b/vendor/golang.org/x/net/bpf/vm_test.go new file mode 100644 index 0000000..6bd4dd5 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_test.go @@ -0,0 +1,144 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "fmt" + "testing" + + "golang.org/x/net/bpf" +) + +var _ bpf.Instruction = unknown{} + +type unknown struct{} + +func (unknown) Assemble() (bpf.RawInstruction, error) { + return bpf.RawInstruction{}, nil +} + +func TestVMUnknownInstruction(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegA, + Val: 100, + }, + // Should terminate the program with an error immediately + unknown{}, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer done() + + _, err = vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, + }) + if errStr(err) != "unknown Instruction at index 1: bpf_test.unknown" { + t.Fatalf("unexpected error while running program: %v", err) + } +} + +func TestVMNoReturnInstruction(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegA, + Val: 1, + }, + }) + if errStr(err) != "BPF program must end with RetA or RetConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMNoInputInstructions(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{}) + if errStr(err) != "one or more Instructions must be specified" { + t.Fatalf("unexpected error: %v", err) + } +} + +// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame +// as input and checking its EtherType to determine if it should be accepted. +func ExampleNewVM() { + // Offset | Length | Comment + // ------------------------- + // 00 | 06 | Ethernet destination MAC address + // 06 | 06 | Ethernet source MAC address + // 12 | 02 | Ethernet EtherType + const ( + etOff = 12 + etLen = 2 + + etARP = 0x0806 + ) + + // Set up a VM to filter traffic based on if its EtherType + // matches the ARP EtherType. + vm, err := bpf.NewVM([]bpf.Instruction{ + // Load EtherType value from Ethernet header + bpf.LoadAbsolute{ + Off: etOff, + Size: etLen, + }, + // If EtherType is equal to the ARP EtherType, jump to allow + // packet to be accepted + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: etARP, + SkipTrue: 1, + }, + // EtherType does not match the ARP EtherType + bpf.RetConstant{ + Val: 0, + }, + // EtherType matches the ARP EtherType, accept up to 1500 + // bytes of packet + bpf.RetConstant{ + Val: 1500, + }, + }) + if err != nil { + panic(fmt.Sprintf("failed to load BPF program: %v", err)) + } + + // Create an Ethernet frame with the ARP EtherType for testing + frame := []byte{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, + 0x08, 0x06, + // Payload omitted for brevity + } + + // Run our VM's BPF program using the Ethernet frame as input + out, err := vm.Run(frame) + if err != nil { + panic(fmt.Sprintf("failed to accept Ethernet frame: %v", err)) + } + + // BPF VM can return a byte count greater than the number of input + // bytes, so trim the output to match the input byte length + if out > len(frame) { + out = len(frame) + } + + fmt.Printf("out: %d bytes", out) + + // Output: + // out: 14 bytes +} + +// errStr returns the string representation of an error, or +// "" if it is nil. +func errStr(err error) string { + if err == nil { + return "" + } + + return err.Error() +} diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg new file mode 100644 index 0000000..3f8b14b --- /dev/null +++ b/vendor/golang.org/x/net/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000..a3c021d --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go new file mode 100644 index 0000000..6284413 --- /dev/null +++ b/vendor/golang.org/x/net/context/context_test.go @@ -0,0 +1,583 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) + testDeadline(c, 2*timeUnit, t) +} + +func TestTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 1*timeUnit) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o = otherContext{c} + c, _ = WithTimeout(o, 3*timeUnit) + testDeadline(c, 2*timeUnit, t) +} + +func TestCanceledTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 2*timeUnit) + o := otherContext{c} + c, cancel := WithTimeout(o, 4*timeUnit) + cancel() + time.Sleep(1 * timeUnit) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 16, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TODO(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 0000000..606cf1f --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go new file mode 100644 index 0000000..72411b1 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,go1.7 + +package ctxhttp + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "context" +) + +func TestGo17Context(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + })) + defer ts.Close() + ctx := context.Background() + resp, err := Get(ctx, http.DefaultClient, ts.URL) + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } + resp.Body.Close() +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go new file mode 100644 index 0000000..926870c --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -0,0 +1,147 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go new file mode 100644 index 0000000..9159cf0 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!go1.7 + +package ctxhttp + +import ( + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "golang.org/x/net/context" +) + +// golang.org/issue/14065 +func TestClosesResponseBodyOnCancel(t *testing.T) { + defer func() { testHookContextDoneBeforeHeaders = nop }() + defer func() { testHookDoReturned = nop }() + defer func() { testHookDidBodyClose = nop }() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // closed when Do enters select case <-ctx.Done() + enteredDonePath := make(chan struct{}) + + testHookContextDoneBeforeHeaders = func() { + close(enteredDonePath) + } + + testHookDoReturned = func() { + // We now have the result (the Flush'd headers) at least, + // so we can cancel the request. + cancel() + + // But block the client.Do goroutine from sending + // until Do enters into the <-ctx.Done() path, since + // otherwise if both channels are readable, select + // picks a random one. + <-enteredDonePath + } + + sawBodyClose := make(chan struct{}) + testHookDidBodyClose = func() { close(sawBodyClose) } + + tr := &http.Transport{} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", ts.URL, nil) + _, doErr := Do(ctx, c, req) + + select { + case <-sawBodyClose: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for body to close") + } + + if doErr != ctx.Err() { + t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) + } +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go new file mode 100644 index 0000000..1e41551 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go @@ -0,0 +1,105 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package ctxhttp + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "golang.org/x/net/context" +) + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func okHandler(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + io.WriteString(w, requestBody) +} + +func TestNoTimeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(okHandler)) + defer ts.Close() + + ctx := context.Background() + res, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != requestBody { + t.Errorf("body = %q; want %q", slurp, requestBody) + } +} + +func TestCancelBeforeHeaders(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + blockServer := make(chan struct{}) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cancel() + <-blockServer + io.WriteString(w, requestBody) + })) + defer ts.Close() + defer close(blockServer) + + res, err := Get(ctx, nil, ts.URL) + if err == nil { + res.Body.Close() + t.Fatal("Get returned unexpected nil error") + } + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) + } +} + +func TestCancelAfterHangingRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + <-w.(http.CloseNotifier).CloseNotify() + })) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + resp, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + // Cancel befer reading the body. + // Reading Request.Body should fail, since the request was + // canceled before anything was written. + cancel() + + done := make(chan struct{}) + + go func() { + b, err := ioutil.ReadAll(resp.Body) + if len(b) != 0 || err == nil { + t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) + } + close(done) + }() + + select { + case <-time.After(1 * time.Second): + t.Errorf("Test timed out") + case <-done: + } +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000..d20f52b --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 0000000..d88bd1d --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000..0f35592 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 0000000..b105f80 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 0000000..e6f5669 --- /dev/null +++ b/vendor/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "golang.org/x/net/context" +) + +// This example passes a context with a timeout to tell a blocking function that +// it should abandon its work after the timeout elapses. +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + select { + case <-time.After(1 * time.Second): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + + // Output: + // context deadline exceeded +} diff --git a/vendor/golang.org/x/net/dict/dict.go b/vendor/golang.org/x/net/dict/dict.go new file mode 100644 index 0000000..93e65c0 --- /dev/null +++ b/vendor/golang.org/x/net/dict/dict.go @@ -0,0 +1,210 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dict implements the Dictionary Server Protocol +// as defined in RFC 2229. +package dict // import "golang.org/x/net/dict" + +import ( + "net/textproto" + "strconv" + "strings" +) + +// A Client represents a client connection to a dictionary server. +type Client struct { + text *textproto.Conn +} + +// Dial returns a new client connected to a dictionary server at +// addr on the given network. +func Dial(network, addr string) (*Client, error) { + text, err := textproto.Dial(network, addr) + if err != nil { + return nil, err + } + _, _, err = text.ReadCodeLine(220) + if err != nil { + text.Close() + return nil, err + } + return &Client{text: text}, nil +} + +// Close closes the connection to the dictionary server. +func (c *Client) Close() error { + return c.text.Close() +} + +// A Dict represents a dictionary available on the server. +type Dict struct { + Name string // short name of dictionary + Desc string // long description +} + +// Dicts returns a list of the dictionaries available on the server. +func (c *Client) Dicts() ([]Dict, error) { + id, err := c.text.Cmd("SHOW DB") + if err != nil { + return nil, err + } + + c.text.StartResponse(id) + defer c.text.EndResponse(id) + + _, _, err = c.text.ReadCodeLine(110) + if err != nil { + return nil, err + } + lines, err := c.text.ReadDotLines() + if err != nil { + return nil, err + } + _, _, err = c.text.ReadCodeLine(250) + + dicts := make([]Dict, len(lines)) + for i := range dicts { + d := &dicts[i] + a, _ := fields(lines[i]) + if len(a) < 2 { + return nil, textproto.ProtocolError("invalid dictionary: " + lines[i]) + } + d.Name = a[0] + d.Desc = a[1] + } + return dicts, err +} + +// A Defn represents a definition. +type Defn struct { + Dict Dict // Dict where definition was found + Word string // Word being defined + Text []byte // Definition text, typically multiple lines +} + +// Define requests the definition of the given word. +// The argument dict names the dictionary to use, +// the Name field of a Dict returned by Dicts. +// +// The special dictionary name "*" means to look in all the +// server's dictionaries. +// The special dictionary name "!" means to look in all the +// server's dictionaries in turn, stopping after finding the word +// in one of them. +func (c *Client) Define(dict, word string) ([]*Defn, error) { + id, err := c.text.Cmd("DEFINE %s %q", dict, word) + if err != nil { + return nil, err + } + + c.text.StartResponse(id) + defer c.text.EndResponse(id) + + _, line, err := c.text.ReadCodeLine(150) + if err != nil { + return nil, err + } + a, _ := fields(line) + if len(a) < 1 { + return nil, textproto.ProtocolError("malformed response: " + line) + } + n, err := strconv.Atoi(a[0]) + if err != nil { + return nil, textproto.ProtocolError("invalid definition count: " + a[0]) + } + def := make([]*Defn, n) + for i := 0; i < n; i++ { + _, line, err = c.text.ReadCodeLine(151) + if err != nil { + return nil, err + } + a, _ := fields(line) + if len(a) < 3 { + // skip it, to keep protocol in sync + i-- + n-- + def = def[0:n] + continue + } + d := &Defn{Word: a[0], Dict: Dict{a[1], a[2]}} + d.Text, err = c.text.ReadDotBytes() + if err != nil { + return nil, err + } + def[i] = d + } + _, _, err = c.text.ReadCodeLine(250) + return def, err +} + +// Fields returns the fields in s. +// Fields are space separated unquoted words +// or quoted with single or double quote. +func fields(s string) ([]string, error) { + var v []string + i := 0 + for { + for i < len(s) && (s[i] == ' ' || s[i] == '\t') { + i++ + } + if i >= len(s) { + break + } + if s[i] == '"' || s[i] == '\'' { + q := s[i] + // quoted string + var j int + for j = i + 1; ; j++ { + if j >= len(s) { + return nil, textproto.ProtocolError("malformed quoted string") + } + if s[j] == '\\' { + j++ + continue + } + if s[j] == q { + j++ + break + } + } + v = append(v, unquote(s[i+1:j-1])) + i = j + } else { + // atom + var j int + for j = i; j < len(s); j++ { + if s[j] == ' ' || s[j] == '\t' || s[j] == '\\' || s[j] == '"' || s[j] == '\'' { + break + } + } + v = append(v, s[i:j]) + i = j + } + if i < len(s) { + c := s[i] + if c != ' ' && c != '\t' { + return nil, textproto.ProtocolError("quotes not on word boundaries") + } + } + } + return v, nil +} + +func unquote(s string) string { + if strings.Index(s, "\\") < 0 { + return s + } + b := []byte(s) + w := 0 + for r := 0; r < len(b); r++ { + c := b[r] + if c == '\\' { + r++ + c = b[r] + } + b[w] = c + w++ + } + return string(b[0:w]) +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/example_test.go b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go new file mode 100644 index 0000000..8600a6b --- /dev/null +++ b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go @@ -0,0 +1,132 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dnsmessage_test + +import ( + "fmt" + "net" + "strings" + + "golang.org/x/net/dns/dnsmessage" +) + +func mustNewName(name string) dnsmessage.Name { + n, err := dnsmessage.NewName(name) + if err != nil { + panic(err) + } + return n +} + +func ExampleParser() { + msg := dnsmessage.Message{ + Header: dnsmessage.Header{Response: true, Authoritative: true}, + Questions: []dnsmessage.Question{ + { + Name: mustNewName("foo.bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + { + Name: mustNewName("bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + Answers: []dnsmessage.Resource{ + { + Header: dnsmessage.ResourceHeader{ + Name: mustNewName("foo.bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 1}}, + }, + { + Header: dnsmessage.ResourceHeader{ + Name: mustNewName("bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 2}}, + }, + }, + } + + buf, err := msg.Pack() + if err != nil { + panic(err) + } + + wantName := "bar.example.com." + + var p dnsmessage.Parser + if _, err := p.Start(buf); err != nil { + panic(err) + } + + for { + q, err := p.Question() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + if q.Name.String() != wantName { + continue + } + + fmt.Println("Found question for name", wantName) + if err := p.SkipAllQuestions(); err != nil { + panic(err) + } + break + } + + var gotIPs []net.IP + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + if (h.Type != dnsmessage.TypeA && h.Type != dnsmessage.TypeAAAA) || h.Class != dnsmessage.ClassINET { + continue + } + + if !strings.EqualFold(h.Name.String(), wantName) { + if err := p.SkipAnswer(); err != nil { + panic(err) + } + continue + } + + switch h.Type { + case dnsmessage.TypeA: + r, err := p.AResource() + if err != nil { + panic(err) + } + gotIPs = append(gotIPs, r.A[:]) + case dnsmessage.TypeAAAA: + r, err := p.AAAAResource() + if err != nil { + panic(err) + } + gotIPs = append(gotIPs, r.AAAA[:]) + } + } + + fmt.Printf("Found A/AAAA records for name %s: %v\n", wantName, gotIPs) + + // Output: + // Found question for name bar.example.com. + // Found A/AAAA records for name bar.example.com.: [127.0.0.2] +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go new file mode 100644 index 0000000..38f8177 --- /dev/null +++ b/vendor/golang.org/x/net/dns/dnsmessage/message.go @@ -0,0 +1,2247 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dnsmessage provides a mostly RFC 1035 compliant implementation of +// DNS message packing and unpacking. +// +// The package also supports messages with Extension Mechanisms for DNS +// (EDNS(0)) as defined in RFC 6891. +// +// This implementation is designed to minimize heap allocations and avoid +// unnecessary packing and unpacking as much as possible. +package dnsmessage + +import ( + "errors" +) + +// Message formats + +// A Type is a type of DNS request and response. +type Type uint16 + +// A Class is a type of network. +type Class uint16 + +// An OpCode is a DNS operation code. +type OpCode uint16 + +// An RCode is a DNS response status code. +type RCode uint16 + +// Wire constants. +const ( + // ResourceHeader.Type and Question.Type + TypeA Type = 1 + TypeNS Type = 2 + TypeCNAME Type = 5 + TypeSOA Type = 6 + TypePTR Type = 12 + TypeMX Type = 15 + TypeTXT Type = 16 + TypeAAAA Type = 28 + TypeSRV Type = 33 + TypeOPT Type = 41 + + // Question.Type + TypeWKS Type = 11 + TypeHINFO Type = 13 + TypeMINFO Type = 14 + TypeAXFR Type = 252 + TypeALL Type = 255 + + // ResourceHeader.Class and Question.Class + ClassINET Class = 1 + ClassCSNET Class = 2 + ClassCHAOS Class = 3 + ClassHESIOD Class = 4 + + // Question.Class + ClassANY Class = 255 + + // Message.Rcode + RCodeSuccess RCode = 0 + RCodeFormatError RCode = 1 + RCodeServerFailure RCode = 2 + RCodeNameError RCode = 3 + RCodeNotImplemented RCode = 4 + RCodeRefused RCode = 5 +) + +var ( + // ErrNotStarted indicates that the prerequisite information isn't + // available yet because the previous records haven't been appropriately + // parsed, skipped or finished. + ErrNotStarted = errors.New("parsing/packing of this type isn't available yet") + + // ErrSectionDone indicated that all records in the section have been + // parsed or finished. + ErrSectionDone = errors.New("parsing/packing of this section has completed") + + errBaseLen = errors.New("insufficient data for base length type") + errCalcLen = errors.New("insufficient data for calculated length type") + errReserved = errors.New("segment prefix is reserved") + errTooManyPtr = errors.New("too many pointers (>10)") + errInvalidPtr = errors.New("invalid pointer") + errNilResouceBody = errors.New("nil resource body") + errResourceLen = errors.New("insufficient data for resource body length") + errSegTooLong = errors.New("segment length too long") + errZeroSegLen = errors.New("zero length segment") + errResTooLong = errors.New("resource length too long") + errTooManyQuestions = errors.New("too many Questions to pack (>65535)") + errTooManyAnswers = errors.New("too many Answers to pack (>65535)") + errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)") + errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)") + errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)") + errStringTooLong = errors.New("character string exceeds maximum length (255)") + errCompressedSRV = errors.New("compressed name in SRV resource data") +) + +// Internal constants. +const ( + // packStartingCap is the default initial buffer size allocated during + // packing. + // + // The starting capacity doesn't matter too much, but most DNS responses + // Will be <= 512 bytes as it is the limit for DNS over UDP. + packStartingCap = 512 + + // uint16Len is the length (in bytes) of a uint16. + uint16Len = 2 + + // uint32Len is the length (in bytes) of a uint32. + uint32Len = 4 + + // headerLen is the length (in bytes) of a DNS header. + // + // A header is comprised of 6 uint16s and no padding. + headerLen = 6 * uint16Len +) + +type nestedError struct { + // s is the current level's error message. + s string + + // err is the nested error. + err error +} + +// nestedError implements error.Error. +func (e *nestedError) Error() string { + return e.s + ": " + e.err.Error() +} + +// Header is a representation of a DNS message header. +type Header struct { + ID uint16 + Response bool + OpCode OpCode + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + RCode RCode +} + +func (m *Header) pack() (id uint16, bits uint16) { + id = m.ID + bits = uint16(m.OpCode)<<11 | uint16(m.RCode) + if m.RecursionAvailable { + bits |= headerBitRA + } + if m.RecursionDesired { + bits |= headerBitRD + } + if m.Truncated { + bits |= headerBitTC + } + if m.Authoritative { + bits |= headerBitAA + } + if m.Response { + bits |= headerBitQR + } + return +} + +// Message is a representation of a DNS message. +type Message struct { + Header + Questions []Question + Answers []Resource + Authorities []Resource + Additionals []Resource +} + +type section uint8 + +const ( + sectionNotStarted section = iota + sectionHeader + sectionQuestions + sectionAnswers + sectionAuthorities + sectionAdditionals + sectionDone + + headerBitQR = 1 << 15 // query/response (response=1) + headerBitAA = 1 << 10 // authoritative + headerBitTC = 1 << 9 // truncated + headerBitRD = 1 << 8 // recursion desired + headerBitRA = 1 << 7 // recursion available +) + +var sectionNames = map[section]string{ + sectionHeader: "header", + sectionQuestions: "Question", + sectionAnswers: "Answer", + sectionAuthorities: "Authority", + sectionAdditionals: "Additional", +} + +// header is the wire format for a DNS message header. +type header struct { + id uint16 + bits uint16 + questions uint16 + answers uint16 + authorities uint16 + additionals uint16 +} + +func (h *header) count(sec section) uint16 { + switch sec { + case sectionQuestions: + return h.questions + case sectionAnswers: + return h.answers + case sectionAuthorities: + return h.authorities + case sectionAdditionals: + return h.additionals + } + return 0 +} + +// pack appends the wire format of the header to msg. +func (h *header) pack(msg []byte) []byte { + msg = packUint16(msg, h.id) + msg = packUint16(msg, h.bits) + msg = packUint16(msg, h.questions) + msg = packUint16(msg, h.answers) + msg = packUint16(msg, h.authorities) + return packUint16(msg, h.additionals) +} + +func (h *header) unpack(msg []byte, off int) (int, error) { + newOff := off + var err error + if h.id, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"id", err} + } + if h.bits, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"bits", err} + } + if h.questions, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"questions", err} + } + if h.answers, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"answers", err} + } + if h.authorities, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"authorities", err} + } + if h.additionals, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"additionals", err} + } + return newOff, nil +} + +func (h *header) header() Header { + return Header{ + ID: h.id, + Response: (h.bits & headerBitQR) != 0, + OpCode: OpCode(h.bits>>11) & 0xF, + Authoritative: (h.bits & headerBitAA) != 0, + Truncated: (h.bits & headerBitTC) != 0, + RecursionDesired: (h.bits & headerBitRD) != 0, + RecursionAvailable: (h.bits & headerBitRA) != 0, + RCode: RCode(h.bits & 0xF), + } +} + +// A Resource is a DNS resource record. +type Resource struct { + Header ResourceHeader + Body ResourceBody +} + +// A ResourceBody is a DNS resource record minus the header. +type ResourceBody interface { + // pack packs a Resource except for its header. + pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) + + // realType returns the actual type of the Resource. This is used to + // fill in the header Type field. + realType() Type +} + +// pack appends the wire format of the Resource to msg. +func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + if r.Body == nil { + return msg, errNilResouceBody + } + oldMsg := msg + r.Header.Type = r.Body.realType() + msg, length, err := r.Header.pack(msg, compression, compressionOff) + if err != nil { + return msg, &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + msg, err = r.Body.pack(msg, compression, compressionOff) + if err != nil { + return msg, &nestedError{"content", err} + } + if err := r.Header.fixLen(msg, length, preLen); err != nil { + return oldMsg, err + } + return msg, nil +} + +// A Parser allows incrementally parsing a DNS message. +// +// When parsing is started, the Header is parsed. Next, each Question can be +// either parsed or skipped. Alternatively, all Questions can be skipped at +// once. When all Questions have been parsed, attempting to parse Questions +// will return (nil, nil) and attempting to skip Questions will return +// (true, nil). After all Questions have been either parsed or skipped, all +// Answers, Authorities and Additionals can be either parsed or skipped in the +// same way, and each type of Resource must be fully parsed or skipped before +// proceeding to the next type of Resource. +// +// Note that there is no requirement to fully skip or parse the message. +type Parser struct { + msg []byte + header header + + section section + off int + index int + resHeaderValid bool + resHeader ResourceHeader +} + +// Start parses the header and enables the parsing of Questions. +func (p *Parser) Start(msg []byte) (Header, error) { + if p.msg != nil { + *p = Parser{} + } + p.msg = msg + var err error + if p.off, err = p.header.unpack(msg, 0); err != nil { + return Header{}, &nestedError{"unpacking header", err} + } + p.section = sectionQuestions + return p.header.header(), nil +} + +func (p *Parser) checkAdvance(sec section) error { + if p.section < sec { + return ErrNotStarted + } + if p.section > sec { + return ErrSectionDone + } + p.resHeaderValid = false + if p.index == int(p.header.count(sec)) { + p.index = 0 + p.section++ + return ErrSectionDone + } + return nil +} + +func (p *Parser) resource(sec section) (Resource, error) { + var r Resource + var err error + r.Header, err = p.resourceHeader(sec) + if err != nil { + return r, err + } + p.resHeaderValid = false + r.Body, p.off, err = unpackResourceBody(p.msg, p.off, r.Header) + if err != nil { + return Resource{}, &nestedError{"unpacking " + sectionNames[sec], err} + } + p.index++ + return r, nil +} + +func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) { + if p.resHeaderValid { + return p.resHeader, nil + } + if err := p.checkAdvance(sec); err != nil { + return ResourceHeader{}, err + } + var hdr ResourceHeader + off, err := hdr.unpack(p.msg, p.off) + if err != nil { + return ResourceHeader{}, err + } + p.resHeaderValid = true + p.resHeader = hdr + p.off = off + return hdr, nil +} + +func (p *Parser) skipResource(sec section) error { + if p.resHeaderValid { + newOff := p.off + int(p.resHeader.Length) + if newOff > len(p.msg) { + return errResourceLen + } + p.off = newOff + p.resHeaderValid = false + p.index++ + return nil + } + if err := p.checkAdvance(sec); err != nil { + return err + } + var err error + p.off, err = skipResource(p.msg, p.off) + if err != nil { + return &nestedError{"skipping: " + sectionNames[sec], err} + } + p.index++ + return nil +} + +// Question parses a single Question. +func (p *Parser) Question() (Question, error) { + if err := p.checkAdvance(sectionQuestions); err != nil { + return Question{}, err + } + var name Name + off, err := name.unpack(p.msg, p.off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Name", err} + } + typ, off, err := unpackType(p.msg, off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Type", err} + } + class, off, err := unpackClass(p.msg, off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Class", err} + } + p.off = off + p.index++ + return Question{name, typ, class}, nil +} + +// AllQuestions parses all Questions. +func (p *Parser) AllQuestions() ([]Question, error) { + // Multiple questions are valid according to the spec, + // but servers don't actually support them. There will + // be at most one question here. + // + // Do not pre-allocate based on info in p.header, since + // the data is untrusted. + qs := []Question{} + for { + q, err := p.Question() + if err == ErrSectionDone { + return qs, nil + } + if err != nil { + return nil, err + } + qs = append(qs, q) + } +} + +// SkipQuestion skips a single Question. +func (p *Parser) SkipQuestion() error { + if err := p.checkAdvance(sectionQuestions); err != nil { + return err + } + off, err := skipName(p.msg, p.off) + if err != nil { + return &nestedError{"skipping Question Name", err} + } + if off, err = skipType(p.msg, off); err != nil { + return &nestedError{"skipping Question Type", err} + } + if off, err = skipClass(p.msg, off); err != nil { + return &nestedError{"skipping Question Class", err} + } + p.off = off + p.index++ + return nil +} + +// SkipAllQuestions skips all Questions. +func (p *Parser) SkipAllQuestions() error { + for { + if err := p.SkipQuestion(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AnswerHeader parses a single Answer ResourceHeader. +func (p *Parser) AnswerHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAnswers) +} + +// Answer parses a single Answer Resource. +func (p *Parser) Answer() (Resource, error) { + return p.resource(sectionAnswers) +} + +// AllAnswers parses all Answer Resources. +func (p *Parser) AllAnswers() ([]Resource, error) { + // The most common query is for A/AAAA, which usually returns + // a handful of IPs. + // + // Pre-allocate up to a certain limit, since p.header is + // untrusted data. + n := int(p.header.answers) + if n > 20 { + n = 20 + } + as := make([]Resource, 0, n) + for { + a, err := p.Answer() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAnswer skips a single Answer Resource. +func (p *Parser) SkipAnswer() error { + return p.skipResource(sectionAnswers) +} + +// SkipAllAnswers skips all Answer Resources. +func (p *Parser) SkipAllAnswers() error { + for { + if err := p.SkipAnswer(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AuthorityHeader parses a single Authority ResourceHeader. +func (p *Parser) AuthorityHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAuthorities) +} + +// Authority parses a single Authority Resource. +func (p *Parser) Authority() (Resource, error) { + return p.resource(sectionAuthorities) +} + +// AllAuthorities parses all Authority Resources. +func (p *Parser) AllAuthorities() ([]Resource, error) { + // Authorities contains SOA in case of NXDOMAIN and friends, + // otherwise it is empty. + // + // Pre-allocate up to a certain limit, since p.header is + // untrusted data. + n := int(p.header.authorities) + if n > 10 { + n = 10 + } + as := make([]Resource, 0, n) + for { + a, err := p.Authority() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAuthority skips a single Authority Resource. +func (p *Parser) SkipAuthority() error { + return p.skipResource(sectionAuthorities) +} + +// SkipAllAuthorities skips all Authority Resources. +func (p *Parser) SkipAllAuthorities() error { + for { + if err := p.SkipAuthority(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AdditionalHeader parses a single Additional ResourceHeader. +func (p *Parser) AdditionalHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAdditionals) +} + +// Additional parses a single Additional Resource. +func (p *Parser) Additional() (Resource, error) { + return p.resource(sectionAdditionals) +} + +// AllAdditionals parses all Additional Resources. +func (p *Parser) AllAdditionals() ([]Resource, error) { + // Additionals usually contain OPT, and sometimes A/AAAA + // glue records. + // + // Pre-allocate up to a certain limit, since p.header is + // untrusted data. + n := int(p.header.additionals) + if n > 10 { + n = 10 + } + as := make([]Resource, 0, n) + for { + a, err := p.Additional() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAdditional skips a single Additional Resource. +func (p *Parser) SkipAdditional() error { + return p.skipResource(sectionAdditionals) +} + +// SkipAllAdditionals skips all Additional Resources. +func (p *Parser) SkipAllAdditionals() error { + for { + if err := p.SkipAdditional(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// CNAMEResource parses a single CNAMEResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) CNAMEResource() (CNAMEResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeCNAME { + return CNAMEResource{}, ErrNotStarted + } + r, err := unpackCNAMEResource(p.msg, p.off) + if err != nil { + return CNAMEResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// MXResource parses a single MXResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) MXResource() (MXResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeMX { + return MXResource{}, ErrNotStarted + } + r, err := unpackMXResource(p.msg, p.off) + if err != nil { + return MXResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// NSResource parses a single NSResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) NSResource() (NSResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeNS { + return NSResource{}, ErrNotStarted + } + r, err := unpackNSResource(p.msg, p.off) + if err != nil { + return NSResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// PTRResource parses a single PTRResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) PTRResource() (PTRResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypePTR { + return PTRResource{}, ErrNotStarted + } + r, err := unpackPTRResource(p.msg, p.off) + if err != nil { + return PTRResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// SOAResource parses a single SOAResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) SOAResource() (SOAResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeSOA { + return SOAResource{}, ErrNotStarted + } + r, err := unpackSOAResource(p.msg, p.off) + if err != nil { + return SOAResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// TXTResource parses a single TXTResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) TXTResource() (TXTResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeTXT { + return TXTResource{}, ErrNotStarted + } + r, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length) + if err != nil { + return TXTResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// SRVResource parses a single SRVResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) SRVResource() (SRVResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeSRV { + return SRVResource{}, ErrNotStarted + } + r, err := unpackSRVResource(p.msg, p.off) + if err != nil { + return SRVResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// AResource parses a single AResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) AResource() (AResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeA { + return AResource{}, ErrNotStarted + } + r, err := unpackAResource(p.msg, p.off) + if err != nil { + return AResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// AAAAResource parses a single AAAAResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) AAAAResource() (AAAAResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeAAAA { + return AAAAResource{}, ErrNotStarted + } + r, err := unpackAAAAResource(p.msg, p.off) + if err != nil { + return AAAAResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// OPTResource parses a single OPTResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) OPTResource() (OPTResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeOPT { + return OPTResource{}, ErrNotStarted + } + r, err := unpackOPTResource(p.msg, p.off, p.resHeader.Length) + if err != nil { + return OPTResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// Unpack parses a full Message. +func (m *Message) Unpack(msg []byte) error { + var p Parser + var err error + if m.Header, err = p.Start(msg); err != nil { + return err + } + if m.Questions, err = p.AllQuestions(); err != nil { + return err + } + if m.Answers, err = p.AllAnswers(); err != nil { + return err + } + if m.Authorities, err = p.AllAuthorities(); err != nil { + return err + } + if m.Additionals, err = p.AllAdditionals(); err != nil { + return err + } + return nil +} + +// Pack packs a full Message. +func (m *Message) Pack() ([]byte, error) { + return m.AppendPack(make([]byte, 0, packStartingCap)) +} + +// AppendPack is like Pack but appends the full Message to b and returns the +// extended buffer. +func (m *Message) AppendPack(b []byte) ([]byte, error) { + // Validate the lengths. It is very unlikely that anyone will try to + // pack more than 65535 of any particular type, but it is possible and + // we should fail gracefully. + if len(m.Questions) > int(^uint16(0)) { + return nil, errTooManyQuestions + } + if len(m.Answers) > int(^uint16(0)) { + return nil, errTooManyAnswers + } + if len(m.Authorities) > int(^uint16(0)) { + return nil, errTooManyAuthorities + } + if len(m.Additionals) > int(^uint16(0)) { + return nil, errTooManyAdditionals + } + + var h header + h.id, h.bits = m.Header.pack() + + h.questions = uint16(len(m.Questions)) + h.answers = uint16(len(m.Answers)) + h.authorities = uint16(len(m.Authorities)) + h.additionals = uint16(len(m.Additionals)) + + compressionOff := len(b) + msg := h.pack(b) + + // RFC 1035 allows (but does not require) compression for packing. RFC + // 1035 requires unpacking implementations to support compression, so + // unconditionally enabling it is fine. + // + // DNS lookups are typically done over UDP, and RFC 1035 states that UDP + // DNS messages can be a maximum of 512 bytes long. Without compression, + // many DNS response messages are over this limit, so enabling + // compression will help ensure compliance. + compression := map[string]int{} + + for i := range m.Questions { + var err error + if msg, err = m.Questions[i].pack(msg, compression, compressionOff); err != nil { + return nil, &nestedError{"packing Question", err} + } + } + for i := range m.Answers { + var err error + if msg, err = m.Answers[i].pack(msg, compression, compressionOff); err != nil { + return nil, &nestedError{"packing Answer", err} + } + } + for i := range m.Authorities { + var err error + if msg, err = m.Authorities[i].pack(msg, compression, compressionOff); err != nil { + return nil, &nestedError{"packing Authority", err} + } + } + for i := range m.Additionals { + var err error + if msg, err = m.Additionals[i].pack(msg, compression, compressionOff); err != nil { + return nil, &nestedError{"packing Additional", err} + } + } + + return msg, nil +} + +// A Builder allows incrementally packing a DNS message. +// +// Example usage: +// buf := make([]byte, 2, 514) +// b := NewBuilder(buf, Header{...}) +// b.EnableCompression() +// // Optionally start a section and add things to that section. +// // Repeat adding sections as necessary. +// buf, err := b.Finish() +// // If err is nil, buf[2:] will contain the built bytes. +type Builder struct { + // msg is the storage for the message being built. + msg []byte + + // section keeps track of the current section being built. + section section + + // header keeps track of what should go in the header when Finish is + // called. + header header + + // start is the starting index of the bytes allocated in msg for header. + start int + + // compression is a mapping from name suffixes to their starting index + // in msg. + compression map[string]int +} + +// NewBuilder creates a new builder with compression disabled. +// +// Note: Most users will want to immediately enable compression with the +// EnableCompression method. See that method's comment for why you may or may +// not want to enable compression. +// +// The DNS message is appended to the provided initial buffer buf (which may be +// nil) as it is built. The final message is returned by the (*Builder).Finish +// method, which may return the same underlying array if there was sufficient +// capacity in the slice. +func NewBuilder(buf []byte, h Header) Builder { + if buf == nil { + buf = make([]byte, 0, packStartingCap) + } + b := Builder{msg: buf, start: len(buf)} + b.header.id, b.header.bits = h.pack() + var hb [headerLen]byte + b.msg = append(b.msg, hb[:]...) + b.section = sectionHeader + return b +} + +// EnableCompression enables compression in the Builder. +// +// Leaving compression disabled avoids compression related allocations, but can +// result in larger message sizes. Be careful with this mode as it can cause +// messages to exceed the UDP size limit. +// +// According to RFC 1035, section 4.1.4, the use of compression is optional, but +// all implementations must accept both compressed and uncompressed DNS +// messages. +// +// Compression should be enabled before any sections are added for best results. +func (b *Builder) EnableCompression() { + b.compression = map[string]int{} +} + +func (b *Builder) startCheck(s section) error { + if b.section <= sectionNotStarted { + return ErrNotStarted + } + if b.section > s { + return ErrSectionDone + } + return nil +} + +// StartQuestions prepares the builder for packing Questions. +func (b *Builder) StartQuestions() error { + if err := b.startCheck(sectionQuestions); err != nil { + return err + } + b.section = sectionQuestions + return nil +} + +// StartAnswers prepares the builder for packing Answers. +func (b *Builder) StartAnswers() error { + if err := b.startCheck(sectionAnswers); err != nil { + return err + } + b.section = sectionAnswers + return nil +} + +// StartAuthorities prepares the builder for packing Authorities. +func (b *Builder) StartAuthorities() error { + if err := b.startCheck(sectionAuthorities); err != nil { + return err + } + b.section = sectionAuthorities + return nil +} + +// StartAdditionals prepares the builder for packing Additionals. +func (b *Builder) StartAdditionals() error { + if err := b.startCheck(sectionAdditionals); err != nil { + return err + } + b.section = sectionAdditionals + return nil +} + +func (b *Builder) incrementSectionCount() error { + var count *uint16 + var err error + switch b.section { + case sectionQuestions: + count = &b.header.questions + err = errTooManyQuestions + case sectionAnswers: + count = &b.header.answers + err = errTooManyAnswers + case sectionAuthorities: + count = &b.header.authorities + err = errTooManyAuthorities + case sectionAdditionals: + count = &b.header.additionals + err = errTooManyAdditionals + } + if *count == ^uint16(0) { + return err + } + *count++ + return nil +} + +// Question adds a single Question. +func (b *Builder) Question(q Question) error { + if b.section < sectionQuestions { + return ErrNotStarted + } + if b.section > sectionQuestions { + return ErrSectionDone + } + msg, err := q.pack(b.msg, b.compression, b.start) + if err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +func (b *Builder) checkResourceSection() error { + if b.section < sectionAnswers { + return ErrNotStarted + } + if b.section > sectionAdditionals { + return ErrSectionDone + } + return nil +} + +// CNAMEResource adds a single CNAMEResource. +func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"CNAMEResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// MXResource adds a single MXResource. +func (b *Builder) MXResource(h ResourceHeader, r MXResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"MXResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// NSResource adds a single NSResource. +func (b *Builder) NSResource(h ResourceHeader, r NSResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"NSResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// PTRResource adds a single PTRResource. +func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"PTRResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// SOAResource adds a single SOAResource. +func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"SOAResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// TXTResource adds a single TXTResource. +func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"TXTResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// SRVResource adds a single SRVResource. +func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"SRVResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// AResource adds a single AResource. +func (b *Builder) AResource(h ResourceHeader, r AResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"AResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// AAAAResource adds a single AAAAResource. +func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"AAAAResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// OPTResource adds a single OPTResource. +func (b *Builder) OPTResource(h ResourceHeader, r OPTResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"OPTResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// Finish ends message building and generates a binary message. +func (b *Builder) Finish() ([]byte, error) { + if b.section < sectionHeader { + return nil, ErrNotStarted + } + b.section = sectionDone + // Space for the header was allocated in NewBuilder. + b.header.pack(b.msg[b.start:b.start]) + return b.msg, nil +} + +// A ResourceHeader is the header of a DNS resource record. There are +// many types of DNS resource records, but they all share the same header. +type ResourceHeader struct { + // Name is the domain name for which this resource record pertains. + Name Name + + // Type is the type of DNS resource record. + // + // This field will be set automatically during packing. + Type Type + + // Class is the class of network to which this DNS resource record + // pertains. + Class Class + + // TTL is the length of time (measured in seconds) which this resource + // record is valid for (time to live). All Resources in a set should + // have the same TTL (RFC 2181 Section 5.2). + TTL uint32 + + // Length is the length of data in the resource record after the header. + // + // This field will be set automatically during packing. + Length uint16 +} + +// pack appends the wire format of the ResourceHeader to oldMsg. +// +// The bytes where length was packed are returned as a slice so they can be +// updated after the rest of the Resource has been packed. +func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, length []byte, err error) { + msg = oldMsg + if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil { + return oldMsg, nil, &nestedError{"Name", err} + } + msg = packType(msg, h.Type) + msg = packClass(msg, h.Class) + msg = packUint32(msg, h.TTL) + lenBegin := len(msg) + msg = packUint16(msg, h.Length) + return msg, msg[lenBegin : lenBegin+uint16Len], nil +} + +func (h *ResourceHeader) unpack(msg []byte, off int) (int, error) { + newOff := off + var err error + if newOff, err = h.Name.unpack(msg, newOff); err != nil { + return off, &nestedError{"Name", err} + } + if h.Type, newOff, err = unpackType(msg, newOff); err != nil { + return off, &nestedError{"Type", err} + } + if h.Class, newOff, err = unpackClass(msg, newOff); err != nil { + return off, &nestedError{"Class", err} + } + if h.TTL, newOff, err = unpackUint32(msg, newOff); err != nil { + return off, &nestedError{"TTL", err} + } + if h.Length, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"Length", err} + } + return newOff, nil +} + +func (h *ResourceHeader) fixLen(msg []byte, length []byte, preLen int) error { + conLen := len(msg) - preLen + if conLen > int(^uint16(0)) { + return errResTooLong + } + + // Fill in the length now that we know how long the content is. + packUint16(length[:0], uint16(conLen)) + h.Length = uint16(conLen) + + return nil +} + +// EDNS(0) wire costants. +const ( + edns0Version = 0 + + edns0DNSSECOK = 0x00008000 + ednsVersionMask = 0x00ff0000 + edns0DNSSECOKMask = 0x00ff8000 +) + +// SetEDNS0 configures h for EDNS(0). +// +// The provided extRCode must be an extedned RCode. +func (h *ResourceHeader) SetEDNS0(udpPayloadLen int, extRCode RCode, dnssecOK bool) error { + h.Name = Name{Data: [nameLen]byte{'.'}, Length: 1} // RFC 6891 section 6.1.2 + h.Type = TypeOPT + h.Class = Class(udpPayloadLen) + h.TTL = uint32(extRCode) >> 4 << 24 + if dnssecOK { + h.TTL |= edns0DNSSECOK + } + return nil +} + +// DNSSECAllowed reports whether the DNSSEC OK bit is set. +func (h *ResourceHeader) DNSSECAllowed() bool { + return h.TTL&edns0DNSSECOKMask == edns0DNSSECOK // RFC 6891 section 6.1.3 +} + +// ExtendedRCode returns an extended RCode. +// +// The provided rcode must be the RCode in DNS message header. +func (h *ResourceHeader) ExtendedRCode(rcode RCode) RCode { + if h.TTL&ednsVersionMask == edns0Version { // RFC 6891 section 6.1.3 + return RCode(h.TTL>>24<<4) | rcode + } + return rcode +} + +func skipResource(msg []byte, off int) (int, error) { + newOff, err := skipName(msg, off) + if err != nil { + return off, &nestedError{"Name", err} + } + if newOff, err = skipType(msg, newOff); err != nil { + return off, &nestedError{"Type", err} + } + if newOff, err = skipClass(msg, newOff); err != nil { + return off, &nestedError{"Class", err} + } + if newOff, err = skipUint32(msg, newOff); err != nil { + return off, &nestedError{"TTL", err} + } + length, newOff, err := unpackUint16(msg, newOff) + if err != nil { + return off, &nestedError{"Length", err} + } + if newOff += int(length); newOff > len(msg) { + return off, errResourceLen + } + return newOff, nil +} + +// packUint16 appends the wire format of field to msg. +func packUint16(msg []byte, field uint16) []byte { + return append(msg, byte(field>>8), byte(field)) +} + +func unpackUint16(msg []byte, off int) (uint16, int, error) { + if off+uint16Len > len(msg) { + return 0, off, errBaseLen + } + return uint16(msg[off])<<8 | uint16(msg[off+1]), off + uint16Len, nil +} + +func skipUint16(msg []byte, off int) (int, error) { + if off+uint16Len > len(msg) { + return off, errBaseLen + } + return off + uint16Len, nil +} + +// packType appends the wire format of field to msg. +func packType(msg []byte, field Type) []byte { + return packUint16(msg, uint16(field)) +} + +func unpackType(msg []byte, off int) (Type, int, error) { + t, o, err := unpackUint16(msg, off) + return Type(t), o, err +} + +func skipType(msg []byte, off int) (int, error) { + return skipUint16(msg, off) +} + +// packClass appends the wire format of field to msg. +func packClass(msg []byte, field Class) []byte { + return packUint16(msg, uint16(field)) +} + +func unpackClass(msg []byte, off int) (Class, int, error) { + c, o, err := unpackUint16(msg, off) + return Class(c), o, err +} + +func skipClass(msg []byte, off int) (int, error) { + return skipUint16(msg, off) +} + +// packUint32 appends the wire format of field to msg. +func packUint32(msg []byte, field uint32) []byte { + return append( + msg, + byte(field>>24), + byte(field>>16), + byte(field>>8), + byte(field), + ) +} + +func unpackUint32(msg []byte, off int) (uint32, int, error) { + if off+uint32Len > len(msg) { + return 0, off, errBaseLen + } + v := uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3]) + return v, off + uint32Len, nil +} + +func skipUint32(msg []byte, off int) (int, error) { + if off+uint32Len > len(msg) { + return off, errBaseLen + } + return off + uint32Len, nil +} + +// packText appends the wire format of field to msg. +func packText(msg []byte, field string) ([]byte, error) { + l := len(field) + if l > 255 { + return nil, errStringTooLong + } + msg = append(msg, byte(l)) + msg = append(msg, field...) + + return msg, nil +} + +func unpackText(msg []byte, off int) (string, int, error) { + if off >= len(msg) { + return "", off, errBaseLen + } + beginOff := off + 1 + endOff := beginOff + int(msg[off]) + if endOff > len(msg) { + return "", off, errCalcLen + } + return string(msg[beginOff:endOff]), endOff, nil +} + +func skipText(msg []byte, off int) (int, error) { + if off >= len(msg) { + return off, errBaseLen + } + endOff := off + 1 + int(msg[off]) + if endOff > len(msg) { + return off, errCalcLen + } + return endOff, nil +} + +// packBytes appends the wire format of field to msg. +func packBytes(msg []byte, field []byte) []byte { + return append(msg, field...) +} + +func unpackBytes(msg []byte, off int, field []byte) (int, error) { + newOff := off + len(field) + if newOff > len(msg) { + return off, errBaseLen + } + copy(field, msg[off:newOff]) + return newOff, nil +} + +func skipBytes(msg []byte, off int, field []byte) (int, error) { + newOff := off + len(field) + if newOff > len(msg) { + return off, errBaseLen + } + return newOff, nil +} + +const nameLen = 255 + +// A Name is a non-encoded domain name. It is used instead of strings to avoid +// allocations. +type Name struct { + Data [nameLen]byte + Length uint8 +} + +// NewName creates a new Name from a string. +func NewName(name string) (Name, error) { + if len([]byte(name)) > nameLen { + return Name{}, errCalcLen + } + n := Name{Length: uint8(len(name))} + copy(n.Data[:], []byte(name)) + return n, nil +} + +func (n Name) String() string { + return string(n.Data[:n.Length]) +} + +// pack appends the wire format of the Name to msg. +// +// Domain names are a sequence of counted strings split at the dots. They end +// with a zero-length string. Compression can be used to reuse domain suffixes. +// +// The compression map will be updated with new domain suffixes. If compression +// is nil, compression will not be used. +func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + + // Add a trailing dot to canonicalize name. + if n.Length == 0 || n.Data[n.Length-1] != '.' { + return oldMsg, errNonCanonicalName + } + + // Allow root domain. + if n.Data[0] == '.' && n.Length == 1 { + return append(msg, 0), nil + } + + // Emit sequence of counted strings, chopping at dots. + for i, begin := 0, 0; i < int(n.Length); i++ { + // Check for the end of the segment. + if n.Data[i] == '.' { + // The two most significant bits have special meaning. + // It isn't allowed for segments to be long enough to + // need them. + if i-begin >= 1<<6 { + return oldMsg, errSegTooLong + } + + // Segments must have a non-zero length. + if i-begin == 0 { + return oldMsg, errZeroSegLen + } + + msg = append(msg, byte(i-begin)) + + for j := begin; j < i; j++ { + msg = append(msg, n.Data[j]) + } + + begin = i + 1 + continue + } + + // We can only compress domain suffixes starting with a new + // segment. A pointer is two bytes with the two most significant + // bits set to 1 to indicate that it is a pointer. + if (i == 0 || n.Data[i-1] == '.') && compression != nil { + if ptr, ok := compression[string(n.Data[i:])]; ok { + // Hit. Emit a pointer instead of the rest of + // the domain. + return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil + } + + // Miss. Add the suffix to the compression table if the + // offset can be stored in the available 14 bytes. + if len(msg) <= int(^uint16(0)>>2) { + compression[string(n.Data[i:])] = len(msg) - compressionOff + } + } + } + return append(msg, 0), nil +} + +// unpack unpacks a domain name. +func (n *Name) unpack(msg []byte, off int) (int, error) { + return n.unpackCompressed(msg, off, true /* allowCompression */) +} + +func (n *Name) unpackCompressed(msg []byte, off int, allowCompression bool) (int, error) { + // currOff is the current working offset. + currOff := off + + // newOff is the offset where the next record will start. Pointers lead + // to data that belongs to other names and thus doesn't count towards to + // the usage of this name. + newOff := off + + // ptr is the number of pointers followed. + var ptr int + + // Name is a slice representation of the name data. + name := n.Data[:0] + +Loop: + for { + if currOff >= len(msg) { + return off, errBaseLen + } + c := int(msg[currOff]) + currOff++ + switch c & 0xC0 { + case 0x00: // String segment + if c == 0x00 { + // A zero length signals the end of the name. + break Loop + } + endOff := currOff + c + if endOff > len(msg) { + return off, errCalcLen + } + name = append(name, msg[currOff:endOff]...) + name = append(name, '.') + currOff = endOff + case 0xC0: // Pointer + if !allowCompression { + return off, errCompressedSRV + } + if currOff >= len(msg) { + return off, errInvalidPtr + } + c1 := msg[currOff] + currOff++ + if ptr == 0 { + newOff = currOff + } + // Don't follow too many pointers, maybe there's a loop. + if ptr++; ptr > 10 { + return off, errTooManyPtr + } + currOff = (c^0xC0)<<8 | int(c1) + default: + // Prefixes 0x80 and 0x40 are reserved. + return off, errReserved + } + } + if len(name) == 0 { + name = append(name, '.') + } + if len(name) > len(n.Data) { + return off, errCalcLen + } + n.Length = uint8(len(name)) + if ptr == 0 { + newOff = currOff + } + return newOff, nil +} + +func skipName(msg []byte, off int) (int, error) { + // newOff is the offset where the next record will start. Pointers lead + // to data that belongs to other names and thus doesn't count towards to + // the usage of this name. + newOff := off + +Loop: + for { + if newOff >= len(msg) { + return off, errBaseLen + } + c := int(msg[newOff]) + newOff++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // A zero length signals the end of the name. + break Loop + } + // literal string + newOff += c + if newOff > len(msg) { + return off, errCalcLen + } + case 0xC0: + // Pointer to somewhere else in msg. + + // Pointers are two bytes. + newOff++ + + // Don't follow the pointer as the data here has ended. + break Loop + default: + // Prefixes 0x80 and 0x40 are reserved. + return off, errReserved + } + } + + return newOff, nil +} + +// A Question is a DNS query. +type Question struct { + Name Name + Type Type + Class Class +} + +// pack appends the wire format of the Question to msg. +func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + msg, err := q.Name.pack(msg, compression, compressionOff) + if err != nil { + return msg, &nestedError{"Name", err} + } + msg = packType(msg, q.Type) + return packClass(msg, q.Class), nil +} + +func unpackResourceBody(msg []byte, off int, hdr ResourceHeader) (ResourceBody, int, error) { + var ( + r ResourceBody + err error + name string + ) + switch hdr.Type { + case TypeA: + var rb AResource + rb, err = unpackAResource(msg, off) + r = &rb + name = "A" + case TypeNS: + var rb NSResource + rb, err = unpackNSResource(msg, off) + r = &rb + name = "NS" + case TypeCNAME: + var rb CNAMEResource + rb, err = unpackCNAMEResource(msg, off) + r = &rb + name = "CNAME" + case TypeSOA: + var rb SOAResource + rb, err = unpackSOAResource(msg, off) + r = &rb + name = "SOA" + case TypePTR: + var rb PTRResource + rb, err = unpackPTRResource(msg, off) + r = &rb + name = "PTR" + case TypeMX: + var rb MXResource + rb, err = unpackMXResource(msg, off) + r = &rb + name = "MX" + case TypeTXT: + var rb TXTResource + rb, err = unpackTXTResource(msg, off, hdr.Length) + r = &rb + name = "TXT" + case TypeAAAA: + var rb AAAAResource + rb, err = unpackAAAAResource(msg, off) + r = &rb + name = "AAAA" + case TypeSRV: + var rb SRVResource + rb, err = unpackSRVResource(msg, off) + r = &rb + name = "SRV" + case TypeOPT: + var rb OPTResource + rb, err = unpackOPTResource(msg, off, hdr.Length) + r = &rb + name = "OPT" + } + if err != nil { + return nil, off, &nestedError{name + " record", err} + } + if r == nil { + return nil, off, errors.New("invalid resource type: " + string(hdr.Type+'0')) + } + return r, off + int(hdr.Length), nil +} + +// A CNAMEResource is a CNAME Resource record. +type CNAMEResource struct { + CNAME Name +} + +func (r *CNAMEResource) realType() Type { + return TypeCNAME +} + +// pack appends the wire format of the CNAMEResource to msg. +func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.CNAME.pack(msg, compression, compressionOff) +} + +func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) { + var cname Name + if _, err := cname.unpack(msg, off); err != nil { + return CNAMEResource{}, err + } + return CNAMEResource{cname}, nil +} + +// An MXResource is an MX Resource record. +type MXResource struct { + Pref uint16 + MX Name +} + +func (r *MXResource) realType() Type { + return TypeMX +} + +// pack appends the wire format of the MXResource to msg. +func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + msg = packUint16(msg, r.Pref) + msg, err := r.MX.pack(msg, compression, compressionOff) + if err != nil { + return oldMsg, &nestedError{"MXResource.MX", err} + } + return msg, nil +} + +func unpackMXResource(msg []byte, off int) (MXResource, error) { + pref, off, err := unpackUint16(msg, off) + if err != nil { + return MXResource{}, &nestedError{"Pref", err} + } + var mx Name + if _, err := mx.unpack(msg, off); err != nil { + return MXResource{}, &nestedError{"MX", err} + } + return MXResource{pref, mx}, nil +} + +// An NSResource is an NS Resource record. +type NSResource struct { + NS Name +} + +func (r *NSResource) realType() Type { + return TypeNS +} + +// pack appends the wire format of the NSResource to msg. +func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.NS.pack(msg, compression, compressionOff) +} + +func unpackNSResource(msg []byte, off int) (NSResource, error) { + var ns Name + if _, err := ns.unpack(msg, off); err != nil { + return NSResource{}, err + } + return NSResource{ns}, nil +} + +// A PTRResource is a PTR Resource record. +type PTRResource struct { + PTR Name +} + +func (r *PTRResource) realType() Type { + return TypePTR +} + +// pack appends the wire format of the PTRResource to msg. +func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.PTR.pack(msg, compression, compressionOff) +} + +func unpackPTRResource(msg []byte, off int) (PTRResource, error) { + var ptr Name + if _, err := ptr.unpack(msg, off); err != nil { + return PTRResource{}, err + } + return PTRResource{ptr}, nil +} + +// An SOAResource is an SOA Resource record. +type SOAResource struct { + NS Name + MBox Name + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + + // MinTTL the is the default TTL of Resources records which did not + // contain a TTL value and the TTL of negative responses. (RFC 2308 + // Section 4) + MinTTL uint32 +} + +func (r *SOAResource) realType() Type { + return TypeSOA +} + +// pack appends the wire format of the SOAResource to msg. +func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + msg, err := r.NS.pack(msg, compression, compressionOff) + if err != nil { + return oldMsg, &nestedError{"SOAResource.NS", err} + } + msg, err = r.MBox.pack(msg, compression, compressionOff) + if err != nil { + return oldMsg, &nestedError{"SOAResource.MBox", err} + } + msg = packUint32(msg, r.Serial) + msg = packUint32(msg, r.Refresh) + msg = packUint32(msg, r.Retry) + msg = packUint32(msg, r.Expire) + return packUint32(msg, r.MinTTL), nil +} + +func unpackSOAResource(msg []byte, off int) (SOAResource, error) { + var ns Name + off, err := ns.unpack(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"NS", err} + } + var mbox Name + if off, err = mbox.unpack(msg, off); err != nil { + return SOAResource{}, &nestedError{"MBox", err} + } + serial, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Serial", err} + } + refresh, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Refresh", err} + } + retry, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Retry", err} + } + expire, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Expire", err} + } + minTTL, _, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"MinTTL", err} + } + return SOAResource{ns, mbox, serial, refresh, retry, expire, minTTL}, nil +} + +// A TXTResource is a TXT Resource record. +type TXTResource struct { + TXT []string +} + +func (r *TXTResource) realType() Type { + return TypeTXT +} + +// pack appends the wire format of the TXTResource to msg. +func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + for _, s := range r.TXT { + var err error + msg, err = packText(msg, s) + if err != nil { + return oldMsg, err + } + } + return msg, nil +} + +func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) { + txts := make([]string, 0, 1) + for n := uint16(0); n < length; { + var t string + var err error + if t, off, err = unpackText(msg, off); err != nil { + return TXTResource{}, &nestedError{"text", err} + } + // Check if we got too many bytes. + if length-n < uint16(len(t))+1 { + return TXTResource{}, errCalcLen + } + n += uint16(len(t)) + 1 + txts = append(txts, t) + } + return TXTResource{txts}, nil +} + +// An SRVResource is an SRV Resource record. +type SRVResource struct { + Priority uint16 + Weight uint16 + Port uint16 + Target Name // Not compressed as per RFC 2782. +} + +func (r *SRVResource) realType() Type { + return TypeSRV +} + +// pack appends the wire format of the SRVResource to msg. +func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + msg = packUint16(msg, r.Priority) + msg = packUint16(msg, r.Weight) + msg = packUint16(msg, r.Port) + msg, err := r.Target.pack(msg, nil, compressionOff) + if err != nil { + return oldMsg, &nestedError{"SRVResource.Target", err} + } + return msg, nil +} + +func unpackSRVResource(msg []byte, off int) (SRVResource, error) { + priority, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Priority", err} + } + weight, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Weight", err} + } + port, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Port", err} + } + var target Name + if _, err := target.unpackCompressed(msg, off, false /* allowCompression */); err != nil { + return SRVResource{}, &nestedError{"Target", err} + } + return SRVResource{priority, weight, port, target}, nil +} + +// An AResource is an A Resource record. +type AResource struct { + A [4]byte +} + +func (r *AResource) realType() Type { + return TypeA +} + +// pack appends the wire format of the AResource to msg. +func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return packBytes(msg, r.A[:]), nil +} + +func unpackAResource(msg []byte, off int) (AResource, error) { + var a [4]byte + if _, err := unpackBytes(msg, off, a[:]); err != nil { + return AResource{}, err + } + return AResource{a}, nil +} + +// An AAAAResource is an AAAA Resource record. +type AAAAResource struct { + AAAA [16]byte +} + +func (r *AAAAResource) realType() Type { + return TypeAAAA +} + +// pack appends the wire format of the AAAAResource to msg. +func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return packBytes(msg, r.AAAA[:]), nil +} + +func unpackAAAAResource(msg []byte, off int) (AAAAResource, error) { + var aaaa [16]byte + if _, err := unpackBytes(msg, off, aaaa[:]); err != nil { + return AAAAResource{}, err + } + return AAAAResource{aaaa}, nil +} + +// An OPTResource is an OPT pseudo Resource record. +// +// The pseudo resource record is part of the extension mechanisms for DNS +// as defined in RFC 6891. +type OPTResource struct { + Options []Option +} + +// An Option represents a DNS message option within OPTResource. +// +// The message option is part of the extension mechanisms for DNS as +// defined in RFC 6891. +type Option struct { + Code uint16 // option code + Data []byte +} + +func (r *OPTResource) realType() Type { + return TypeOPT +} + +func (r *OPTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + for _, opt := range r.Options { + msg = packUint16(msg, opt.Code) + l := uint16(len(opt.Data)) + msg = packUint16(msg, l) + msg = packBytes(msg, opt.Data) + } + return msg, nil +} + +func unpackOPTResource(msg []byte, off int, length uint16) (OPTResource, error) { + var opts []Option + for oldOff := off; off < oldOff+int(length); { + var err error + var o Option + o.Code, off, err = unpackUint16(msg, off) + if err != nil { + return OPTResource{}, &nestedError{"Code", err} + } + var l uint16 + l, off, err = unpackUint16(msg, off) + if err != nil { + return OPTResource{}, &nestedError{"Data", err} + } + o.Data = make([]byte, l) + if copy(o.Data, msg[off:]) != int(l) { + return OPTResource{}, &nestedError{"Data", errCalcLen} + } + off += int(l) + opts = append(opts, o) + } + return OPTResource{opts}, nil +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go new file mode 100644 index 0000000..7e4e4bd --- /dev/null +++ b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go @@ -0,0 +1,1316 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dnsmessage + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "testing" +) + +func mustNewName(name string) Name { + n, err := NewName(name) + if err != nil { + panic(err) + } + return n +} + +func mustEDNS0ResourceHeader(l int, extrc RCode, do bool) ResourceHeader { + h := ResourceHeader{Class: ClassINET} + if err := h.SetEDNS0(l, extrc, do); err != nil { + panic(err) + } + return h +} + +func (m *Message) String() string { + s := fmt.Sprintf("Message: %#v\n", &m.Header) + if len(m.Questions) > 0 { + s += "-- Questions\n" + for _, q := range m.Questions { + s += fmt.Sprintf("%#v\n", q) + } + } + if len(m.Answers) > 0 { + s += "-- Answers\n" + for _, a := range m.Answers { + s += fmt.Sprintf("%#v\n", a) + } + } + if len(m.Authorities) > 0 { + s += "-- Authorities\n" + for _, ns := range m.Authorities { + s += fmt.Sprintf("%#v\n", ns) + } + } + if len(m.Additionals) > 0 { + s += "-- Additionals\n" + for _, e := range m.Additionals { + s += fmt.Sprintf("%#v\n", e) + } + } + return s +} + +func TestNameString(t *testing.T) { + want := "foo" + name := mustNewName(want) + if got := fmt.Sprint(name); got != want { + t.Errorf("got fmt.Sprint(%#v) = %s, want = %s", name, got, want) + } +} + +func TestQuestionPackUnpack(t *testing.T) { + want := Question{ + Name: mustNewName("."), + Type: TypeA, + Class: ClassINET, + } + buf, err := want.pack(make([]byte, 1, 50), map[string]int{}, 1) + if err != nil { + t.Fatal("Question.pack() =", err) + } + var p Parser + p.msg = buf + p.header.questions = 1 + p.section = sectionQuestions + p.off = 1 + got, err := p.Question() + if err != nil { + t.Fatalf("Parser{%q}.Question() = %v", string(buf[1:]), err) + } + if p.off != len(buf) { + t.Errorf("unpacked different amount than packed: got = %d, want = %d", p.off, len(buf)) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got from Parser.Question() = %+v, want = %+v", got, want) + } +} + +func TestName(t *testing.T) { + tests := []string{ + "", + ".", + "google..com", + "google.com", + "google..com.", + "google.com.", + ".google.com.", + "www..google.com.", + "www.google.com.", + } + + for _, test := range tests { + n, err := NewName(test) + if err != nil { + t.Errorf("NewName(%q) = %v", test, err) + continue + } + if ns := n.String(); ns != test { + t.Errorf("got %#v.String() = %q, want = %q", n, ns, test) + continue + } + } +} + +func TestNamePackUnpack(t *testing.T) { + tests := []struct { + in string + want string + err error + }{ + {"", "", errNonCanonicalName}, + {".", ".", nil}, + {"google..com", "", errNonCanonicalName}, + {"google.com", "", errNonCanonicalName}, + {"google..com.", "", errZeroSegLen}, + {"google.com.", "google.com.", nil}, + {".google.com.", "", errZeroSegLen}, + {"www..google.com.", "", errZeroSegLen}, + {"www.google.com.", "www.google.com.", nil}, + } + + for _, test := range tests { + in := mustNewName(test.in) + want := mustNewName(test.want) + buf, err := in.pack(make([]byte, 0, 30), map[string]int{}, 0) + if err != test.err { + t.Errorf("got %q.pack() = %v, want = %v", test.in, err, test.err) + continue + } + if test.err != nil { + continue + } + var got Name + n, err := got.unpack(buf, 0) + if err != nil { + t.Errorf("%q.unpack() = %v", test.in, err) + continue + } + if n != len(buf) { + t.Errorf( + "unpacked different amount than packed for %q: got = %d, want = %d", + test.in, + n, + len(buf), + ) + } + if got != want { + t.Errorf("unpacking packing of %q: got = %#v, want = %#v", test.in, got, want) + } + } +} + +func TestIncompressibleName(t *testing.T) { + name := mustNewName("example.com.") + compression := map[string]int{} + buf, err := name.pack(make([]byte, 0, 100), compression, 0) + if err != nil { + t.Fatal("first Name.pack() =", err) + } + buf, err = name.pack(buf, compression, 0) + if err != nil { + t.Fatal("second Name.pack() =", err) + } + var n1 Name + off, err := n1.unpackCompressed(buf, 0, false /* allowCompression */) + if err != nil { + t.Fatal("unpacking incompressible name without pointers failed:", err) + } + var n2 Name + if _, err := n2.unpackCompressed(buf, off, false /* allowCompression */); err != errCompressedSRV { + t.Errorf("unpacking compressed incompressible name with pointers: got %v, want = %v", err, errCompressedSRV) + } +} + +func checkErrorPrefix(err error, prefix string) bool { + e, ok := err.(*nestedError) + return ok && e.s == prefix +} + +func TestHeaderUnpackError(t *testing.T) { + wants := []string{ + "id", + "bits", + "questions", + "answers", + "authorities", + "additionals", + } + var buf []byte + var h header + for _, want := range wants { + n, err := h.unpack(buf, 0) + if n != 0 || !checkErrorPrefix(err, want) { + t.Errorf("got header.unpack([%d]byte, 0) = %d, %v, want = 0, %s", len(buf), n, err, want) + } + buf = append(buf, 0, 0) + } +} + +func TestParserStart(t *testing.T) { + const want = "unpacking header" + var p Parser + for i := 0; i <= 1; i++ { + _, err := p.Start([]byte{}) + if !checkErrorPrefix(err, want) { + t.Errorf("got Parser.Start(nil) = _, %v, want = _, %s", err, want) + } + } +} + +func TestResourceNotStarted(t *testing.T) { + tests := []struct { + name string + fn func(*Parser) error + }{ + {"CNAMEResource", func(p *Parser) error { _, err := p.CNAMEResource(); return err }}, + {"MXResource", func(p *Parser) error { _, err := p.MXResource(); return err }}, + {"NSResource", func(p *Parser) error { _, err := p.NSResource(); return err }}, + {"PTRResource", func(p *Parser) error { _, err := p.PTRResource(); return err }}, + {"SOAResource", func(p *Parser) error { _, err := p.SOAResource(); return err }}, + {"TXTResource", func(p *Parser) error { _, err := p.TXTResource(); return err }}, + {"SRVResource", func(p *Parser) error { _, err := p.SRVResource(); return err }}, + {"AResource", func(p *Parser) error { _, err := p.AResource(); return err }}, + {"AAAAResource", func(p *Parser) error { _, err := p.AAAAResource(); return err }}, + } + + for _, test := range tests { + if err := test.fn(&Parser{}); err != ErrNotStarted { + t.Errorf("got Parser.%s() = _ , %v, want = _, %v", test.name, err, ErrNotStarted) + } + } +} + +func TestDNSPackUnpack(t *testing.T) { + wants := []Message{ + { + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{}, + Authorities: []Resource{}, + Additionals: []Resource{}, + }, + largeTestMsg(), + } + for i, want := range wants { + b, err := want.Pack() + if err != nil { + t.Fatalf("%d: Message.Pack() = %v", i, err) + } + var got Message + err = got.Unpack(b) + if err != nil { + t.Fatalf("%d: Message.Unapck() = %v", i, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: Message.Pack/Unpack() roundtrip: got = %+v, want = %+v", i, &got, &want) + } + } +} + +func TestDNSAppendPackUnpack(t *testing.T) { + wants := []Message{ + { + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{}, + Authorities: []Resource{}, + Additionals: []Resource{}, + }, + largeTestMsg(), + } + for i, want := range wants { + b := make([]byte, 2, 514) + b, err := want.AppendPack(b) + if err != nil { + t.Fatalf("%d: Message.AppendPack() = %v", i, err) + } + b = b[2:] + var got Message + err = got.Unpack(b) + if err != nil { + t.Fatalf("%d: Message.Unapck() = %v", i, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: Message.AppendPack/Unpack() roundtrip: got = %+v, want = %+v", i, &got, &want) + } + } +} + +func TestSkipAll(t *testing.T) { + msg := largeTestMsg() + buf, err := msg.Pack() + if err != nil { + t.Fatal("Message.Pack() =", err) + } + var p Parser + if _, err := p.Start(buf); err != nil { + t.Fatal("Parser.Start(non-nil) =", err) + } + + tests := []struct { + name string + f func() error + }{ + {"SkipAllQuestions", p.SkipAllQuestions}, + {"SkipAllAnswers", p.SkipAllAnswers}, + {"SkipAllAuthorities", p.SkipAllAuthorities}, + {"SkipAllAdditionals", p.SkipAllAdditionals}, + } + for _, test := range tests { + for i := 1; i <= 3; i++ { + if err := test.f(); err != nil { + t.Errorf("%d: Parser.%s() = %v", i, test.name, err) + } + } + } +} + +func TestSkipEach(t *testing.T) { + msg := smallTestMsg() + + buf, err := msg.Pack() + if err != nil { + t.Fatal("Message.Pack() =", err) + } + var p Parser + if _, err := p.Start(buf); err != nil { + t.Fatal("Parser.Start(non-nil) =", err) + } + + tests := []struct { + name string + f func() error + }{ + {"SkipQuestion", p.SkipQuestion}, + {"SkipAnswer", p.SkipAnswer}, + {"SkipAuthority", p.SkipAuthority}, + {"SkipAdditional", p.SkipAdditional}, + } + for _, test := range tests { + if err := test.f(); err != nil { + t.Errorf("first Parser.%s() = %v, want = nil", test.name, err) + } + if err := test.f(); err != ErrSectionDone { + t.Errorf("second Parser.%s() = %v, want = %v", test.name, err, ErrSectionDone) + } + } +} + +func TestSkipAfterRead(t *testing.T) { + msg := smallTestMsg() + + buf, err := msg.Pack() + if err != nil { + t.Fatal("Message.Pack() =", err) + } + var p Parser + if _, err := p.Start(buf); err != nil { + t.Fatal("Parser.Srart(non-nil) =", err) + } + + tests := []struct { + name string + skip func() error + read func() error + }{ + {"Question", p.SkipQuestion, func() error { _, err := p.Question(); return err }}, + {"Answer", p.SkipAnswer, func() error { _, err := p.Answer(); return err }}, + {"Authority", p.SkipAuthority, func() error { _, err := p.Authority(); return err }}, + {"Additional", p.SkipAdditional, func() error { _, err := p.Additional(); return err }}, + } + for _, test := range tests { + if err := test.read(); err != nil { + t.Errorf("got Parser.%s() = _, %v, want = _, nil", test.name, err) + } + if err := test.skip(); err != ErrSectionDone { + t.Errorf("got Parser.Skip%s() = %v, want = %v", test.name, err, ErrSectionDone) + } + } +} + +func TestSkipNotStarted(t *testing.T) { + var p Parser + + tests := []struct { + name string + f func() error + }{ + {"SkipAllQuestions", p.SkipAllQuestions}, + {"SkipAllAnswers", p.SkipAllAnswers}, + {"SkipAllAuthorities", p.SkipAllAuthorities}, + {"SkipAllAdditionals", p.SkipAllAdditionals}, + } + for _, test := range tests { + if err := test.f(); err != ErrNotStarted { + t.Errorf("got Parser.%s() = %v, want = %v", test.name, err, ErrNotStarted) + } + } +} + +func TestTooManyRecords(t *testing.T) { + const recs = int(^uint16(0)) + 1 + tests := []struct { + name string + msg Message + want error + }{ + { + "Questions", + Message{ + Questions: make([]Question, recs), + }, + errTooManyQuestions, + }, + { + "Answers", + Message{ + Answers: make([]Resource, recs), + }, + errTooManyAnswers, + }, + { + "Authorities", + Message{ + Authorities: make([]Resource, recs), + }, + errTooManyAuthorities, + }, + { + "Additionals", + Message{ + Additionals: make([]Resource, recs), + }, + errTooManyAdditionals, + }, + } + + for _, test := range tests { + if _, got := test.msg.Pack(); got != test.want { + t.Errorf("got Message.Pack() for %d %s = %v, want = %v", recs, test.name, got, test.want) + } + } +} + +func TestVeryLongTxt(t *testing.T) { + want := Resource{ + ResourceHeader{ + Name: mustNewName("foo.bar.example.com."), + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{[]string{ + "", + "", + "foo bar", + "", + "www.example.com", + "www.example.com.", + strings.Repeat(".", 255), + }}, + } + buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}, 0) + if err != nil { + t.Fatal("Resource.pack() =", err) + } + var got Resource + off, err := got.Header.unpack(buf, 0) + if err != nil { + t.Fatal("ResourceHeader.unpack() =", err) + } + body, n, err := unpackResourceBody(buf, off, got.Header) + if err != nil { + t.Fatal("unpackResourceBody() =", err) + } + got.Body = body + if n != len(buf) { + t.Errorf("unpacked different amount than packed: got = %d, want = %d", n, len(buf)) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Resource.pack/unpack() roundtrip: got = %#v, want = %#v", got, want) + } +} + +func TestTooLongTxt(t *testing.T) { + rb := TXTResource{[]string{strings.Repeat(".", 256)}} + if _, err := rb.pack(make([]byte, 0, 8000), map[string]int{}, 0); err != errStringTooLong { + t.Errorf("packing TXTResource with 256 character string: got err = %v, want = %v", err, errStringTooLong) + } +} + +func TestStartAppends(t *testing.T) { + buf := make([]byte, 2, 514) + wantBuf := []byte{4, 44} + copy(buf, wantBuf) + + b := NewBuilder(buf, Header{}) + b.EnableCompression() + + buf, err := b.Finish() + if err != nil { + t.Fatal("Builder.Finish() =", err) + } + if got, want := len(buf), headerLen+2; got != want { + t.Errorf("got len(buf) = %d, want = %d", got, want) + } + if string(buf[:2]) != string(wantBuf) { + t.Errorf("original data not preserved, got = %#v, want = %#v", buf[:2], wantBuf) + } +} + +func TestStartError(t *testing.T) { + tests := []struct { + name string + fn func(*Builder) error + }{ + {"Questions", func(b *Builder) error { return b.StartQuestions() }}, + {"Answers", func(b *Builder) error { return b.StartAnswers() }}, + {"Authorities", func(b *Builder) error { return b.StartAuthorities() }}, + {"Additionals", func(b *Builder) error { return b.StartAdditionals() }}, + } + + envs := []struct { + name string + fn func() *Builder + want error + }{ + {"sectionNotStarted", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted}, + {"sectionDone", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone}, + } + + for _, env := range envs { + for _, test := range tests { + if got := test.fn(env.fn()); got != env.want { + t.Errorf("got Builder{%s}.Start%s() = %v, want = %v", env.name, test.name, got, env.want) + } + } + } +} + +func TestBuilderResourceError(t *testing.T) { + tests := []struct { + name string + fn func(*Builder) error + }{ + {"CNAMEResource", func(b *Builder) error { return b.CNAMEResource(ResourceHeader{}, CNAMEResource{}) }}, + {"MXResource", func(b *Builder) error { return b.MXResource(ResourceHeader{}, MXResource{}) }}, + {"NSResource", func(b *Builder) error { return b.NSResource(ResourceHeader{}, NSResource{}) }}, + {"PTRResource", func(b *Builder) error { return b.PTRResource(ResourceHeader{}, PTRResource{}) }}, + {"SOAResource", func(b *Builder) error { return b.SOAResource(ResourceHeader{}, SOAResource{}) }}, + {"TXTResource", func(b *Builder) error { return b.TXTResource(ResourceHeader{}, TXTResource{}) }}, + {"SRVResource", func(b *Builder) error { return b.SRVResource(ResourceHeader{}, SRVResource{}) }}, + {"AResource", func(b *Builder) error { return b.AResource(ResourceHeader{}, AResource{}) }}, + {"AAAAResource", func(b *Builder) error { return b.AAAAResource(ResourceHeader{}, AAAAResource{}) }}, + {"OPTResource", func(b *Builder) error { return b.OPTResource(ResourceHeader{}, OPTResource{}) }}, + } + + envs := []struct { + name string + fn func() *Builder + want error + }{ + {"sectionNotStarted", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted}, + {"sectionHeader", func() *Builder { return &Builder{section: sectionHeader} }, ErrNotStarted}, + {"sectionQuestions", func() *Builder { return &Builder{section: sectionQuestions} }, ErrNotStarted}, + {"sectionDone", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone}, + } + + for _, env := range envs { + for _, test := range tests { + if got := test.fn(env.fn()); got != env.want { + t.Errorf("got Builder{%s}.%s() = %v, want = %v", env.name, test.name, got, env.want) + } + } + } +} + +func TestFinishError(t *testing.T) { + var b Builder + want := ErrNotStarted + if _, got := b.Finish(); got != want { + t.Errorf("got Builder.Finish() = %v, want = %v", got, want) + } +} + +func TestBuilder(t *testing.T) { + msg := largeTestMsg() + want, err := msg.Pack() + if err != nil { + t.Fatal("Message.Pack() =", err) + } + + b := NewBuilder(nil, msg.Header) + b.EnableCompression() + + if err := b.StartQuestions(); err != nil { + t.Fatal("Builder.StartQuestions() =", err) + } + for _, q := range msg.Questions { + if err := b.Question(q); err != nil { + t.Fatalf("Builder.Question(%#v) = %v", q, err) + } + } + + if err := b.StartAnswers(); err != nil { + t.Fatal("Builder.StartAnswers() =", err) + } + for _, a := range msg.Answers { + switch a.Header.Type { + case TypeA: + if err := b.AResource(a.Header, *a.Body.(*AResource)); err != nil { + t.Fatalf("Builder.AResource(%#v) = %v", a, err) + } + case TypeNS: + if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { + t.Fatalf("Builder.NSResource(%#v) = %v", a, err) + } + case TypeCNAME: + if err := b.CNAMEResource(a.Header, *a.Body.(*CNAMEResource)); err != nil { + t.Fatalf("Builder.CNAMEResource(%#v) = %v", a, err) + } + case TypeSOA: + if err := b.SOAResource(a.Header, *a.Body.(*SOAResource)); err != nil { + t.Fatalf("Builder.SOAResource(%#v) = %v", a, err) + } + case TypePTR: + if err := b.PTRResource(a.Header, *a.Body.(*PTRResource)); err != nil { + t.Fatalf("Builder.PTRResource(%#v) = %v", a, err) + } + case TypeMX: + if err := b.MXResource(a.Header, *a.Body.(*MXResource)); err != nil { + t.Fatalf("Builder.MXResource(%#v) = %v", a, err) + } + case TypeTXT: + if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { + t.Fatalf("Builder.TXTResource(%#v) = %v", a, err) + } + case TypeAAAA: + if err := b.AAAAResource(a.Header, *a.Body.(*AAAAResource)); err != nil { + t.Fatalf("Builder.AAAAResource(%#v) = %v", a, err) + } + case TypeSRV: + if err := b.SRVResource(a.Header, *a.Body.(*SRVResource)); err != nil { + t.Fatalf("Builder.SRVResource(%#v) = %v", a, err) + } + } + } + + if err := b.StartAuthorities(); err != nil { + t.Fatal("Builder.StartAuthorities() =", err) + } + for _, a := range msg.Authorities { + if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { + t.Fatalf("Builder.NSResource(%#v) = %v", a, err) + } + } + + if err := b.StartAdditionals(); err != nil { + t.Fatal("Builder.StartAdditionals() =", err) + } + for _, a := range msg.Additionals { + switch a.Body.(type) { + case *TXTResource: + if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { + t.Fatalf("Builder.TXTResource(%#v) = %v", a, err) + } + case *OPTResource: + if err := b.OPTResource(a.Header, *a.Body.(*OPTResource)); err != nil { + t.Fatalf("Builder.OPTResource(%#v) = %v", a, err) + } + } + } + + got, err := b.Finish() + if err != nil { + t.Fatal("Builder.Finish() =", err) + } + if !bytes.Equal(got, want) { + t.Fatalf("got from Builder.Finish() = %#v\nwant = %#v", got, want) + } +} + +func TestResourcePack(t *testing.T) { + for _, tt := range []struct { + m Message + err error + }{ + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{{ResourceHeader{}, nil}}, + }, + &nestedError{"packing Answer", errNilResouceBody}, + }, + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Authorities: []Resource{{ResourceHeader{}, (*NSResource)(nil)}}, + }, + &nestedError{"packing Authority", + &nestedError{"ResourceHeader", + &nestedError{"Name", errNonCanonicalName}, + }, + }, + }, + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeA, + Class: ClassINET, + }, + }, + Additionals: []Resource{{ResourceHeader{}, nil}}, + }, + &nestedError{"packing Additional", errNilResouceBody}, + }, + } { + _, err := tt.m.Pack() + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("got Message{%v}.Pack() = %v, want %v", tt.m, err, tt.err) + } + } +} + +func TestOptionPackUnpack(t *testing.T) { + for _, tt := range []struct { + name string + w []byte // wire format of m.Additionals + m Message + dnssecOK bool + extRCode RCode + }{ + { + name: "without EDNS(0) options", + w: []byte{ + 0x00, 0x00, 0x29, 0x10, 0x00, 0xfe, 0x00, 0x80, + 0x00, 0x00, 0x00, + }, + m: Message{ + Header: Header{RCode: RCodeFormatError}, + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeA, + Class: ClassINET, + }, + }, + Additionals: []Resource{ + { + mustEDNS0ResourceHeader(4096, 0xfe0|RCodeFormatError, true), + &OPTResource{}, + }, + }, + }, + dnssecOK: true, + extRCode: 0xfe0 | RCodeFormatError, + }, + { + name: "with EDNS(0) options", + w: []byte{ + 0x00, 0x00, 0x29, 0x10, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x0b, 0x00, 0x02, 0x12, 0x34, + }, + m: Message{ + Header: Header{RCode: RCodeServerFailure}, + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Additionals: []Resource{ + { + mustEDNS0ResourceHeader(4096, 0xff0|RCodeServerFailure, false), + &OPTResource{ + Options: []Option{ + { + Code: 12, // see RFC 7828 + Data: []byte{0x00, 0x00}, + }, + { + Code: 11, // see RFC 7830 + Data: []byte{0x12, 0x34}, + }, + }, + }, + }, + }, + }, + dnssecOK: false, + extRCode: 0xff0 | RCodeServerFailure, + }, + { + // Containing multiple OPT resources in a + // message is invalid, but it's necessary for + // protocol conformance testing. + name: "with multiple OPT resources", + w: []byte{ + 0x00, 0x00, 0x29, 0x10, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x06, 0x00, 0x0b, 0x00, 0x02, 0x12, + 0x34, 0x00, 0x00, 0x29, 0x10, 0x00, 0xff, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x00, 0x0c, 0x00, 0x02, + 0x00, 0x00, + }, + m: Message{ + Header: Header{RCode: RCodeNameError}, + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Additionals: []Resource{ + { + mustEDNS0ResourceHeader(4096, 0xff0|RCodeNameError, false), + &OPTResource{ + Options: []Option{ + { + Code: 11, // see RFC 7830 + Data: []byte{0x12, 0x34}, + }, + }, + }, + }, + { + mustEDNS0ResourceHeader(4096, 0xff0|RCodeNameError, false), + &OPTResource{ + Options: []Option{ + { + Code: 12, // see RFC 7828 + Data: []byte{0x00, 0x00}, + }, + }, + }, + }, + }, + }, + }, + } { + w, err := tt.m.Pack() + if err != nil { + t.Errorf("Message.Pack() for %s = %v", tt.name, err) + continue + } + if !bytes.Equal(w[len(w)-len(tt.w):], tt.w) { + t.Errorf("got Message.Pack() for %s = %#v, want %#v", tt.name, w[len(w)-len(tt.w):], tt.w) + continue + } + var m Message + if err := m.Unpack(w); err != nil { + t.Errorf("Message.Unpack() for %s = %v", tt.name, err) + continue + } + if !reflect.DeepEqual(m.Additionals, tt.m.Additionals) { + t.Errorf("got Message.Pack/Unpack() roundtrip for %s = %+v, want %+v", tt.name, m, tt.m) + continue + } + } +} + +func benchmarkParsingSetup() ([]byte, error) { + name := mustNewName("foo.bar.example.com.") + msg := Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: name, + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &AResource{[4]byte{}}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &AAAAResource{[16]byte{}}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &CNAMEResource{name}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &NSResource{name}, + }, + }, + } + + buf, err := msg.Pack() + if err != nil { + return nil, fmt.Errorf("Message.Pack() = %v", err) + } + return buf, nil +} + +func benchmarkParsing(tb testing.TB, buf []byte) { + var p Parser + if _, err := p.Start(buf); err != nil { + tb.Fatal("Parser.Start(non-nil) =", err) + } + + for { + _, err := p.Question() + if err == ErrSectionDone { + break + } + if err != nil { + tb.Fatal("Parser.Question() =", err) + } + } + + for { + h, err := p.AnswerHeader() + if err == ErrSectionDone { + break + } + if err != nil { + tb.Fatal("Parser.AnswerHeader() =", err) + } + + switch h.Type { + case TypeA: + if _, err := p.AResource(); err != nil { + tb.Fatal("Parser.AResource() =", err) + } + case TypeAAAA: + if _, err := p.AAAAResource(); err != nil { + tb.Fatal("Parser.AAAAResource() =", err) + } + case TypeCNAME: + if _, err := p.CNAMEResource(); err != nil { + tb.Fatal("Parser.CNAMEResource() =", err) + } + case TypeNS: + if _, err := p.NSResource(); err != nil { + tb.Fatal("Parser.NSResource() =", err) + } + case TypeOPT: + if _, err := p.OPTResource(); err != nil { + tb.Fatal("Parser.OPTResource() =", err) + } + default: + tb.Fatalf("got unknown type: %T", h) + } + } +} + +func BenchmarkParsing(b *testing.B) { + buf, err := benchmarkParsingSetup() + if err != nil { + b.Fatal(err) + } + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + benchmarkParsing(b, buf) + } +} + +func TestParsingAllocs(t *testing.T) { + buf, err := benchmarkParsingSetup() + if err != nil { + t.Fatal(err) + } + + if allocs := testing.AllocsPerRun(100, func() { benchmarkParsing(t, buf) }); allocs > 0.5 { + t.Errorf("allocations during parsing: got = %f, want ~0", allocs) + } +} + +func benchmarkBuildingSetup() (Name, []byte) { + name := mustNewName("foo.bar.example.com.") + buf := make([]byte, 0, packStartingCap) + return name, buf +} + +func benchmarkBuilding(tb testing.TB, name Name, buf []byte) { + bld := NewBuilder(buf, Header{Response: true, Authoritative: true}) + + if err := bld.StartQuestions(); err != nil { + tb.Fatal("Builder.StartQuestions() =", err) + } + q := Question{ + Name: name, + Type: TypeA, + Class: ClassINET, + } + if err := bld.Question(q); err != nil { + tb.Fatalf("Builder.Question(%+v) = %v", q, err) + } + + hdr := ResourceHeader{ + Name: name, + Class: ClassINET, + } + if err := bld.StartAnswers(); err != nil { + tb.Fatal("Builder.StartQuestions() =", err) + } + + ar := AResource{[4]byte{}} + if err := bld.AResource(hdr, ar); err != nil { + tb.Fatalf("Builder.AResource(%+v, %+v) = %v", hdr, ar, err) + } + + aaar := AAAAResource{[16]byte{}} + if err := bld.AAAAResource(hdr, aaar); err != nil { + tb.Fatalf("Builder.AAAAResource(%+v, %+v) = %v", hdr, aaar, err) + } + + cnr := CNAMEResource{name} + if err := bld.CNAMEResource(hdr, cnr); err != nil { + tb.Fatalf("Builder.CNAMEResource(%+v, %+v) = %v", hdr, cnr, err) + } + + nsr := NSResource{name} + if err := bld.NSResource(hdr, nsr); err != nil { + tb.Fatalf("Builder.NSResource(%+v, %+v) = %v", hdr, nsr, err) + } + + extrc := 0xfe0 | RCodeNotImplemented + if err := (&hdr).SetEDNS0(4096, extrc, true); err != nil { + tb.Fatalf("ResourceHeader.SetEDNS0(4096, %#x, true) = %v", extrc, err) + } + optr := OPTResource{} + if err := bld.OPTResource(hdr, optr); err != nil { + tb.Fatalf("Builder.OPTResource(%+v, %+v) = %v", hdr, optr, err) + } + + if _, err := bld.Finish(); err != nil { + tb.Fatal("Builder.Finish() =", err) + } +} + +func BenchmarkBuilding(b *testing.B) { + name, buf := benchmarkBuildingSetup() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + benchmarkBuilding(b, name, buf) + } +} + +func TestBuildingAllocs(t *testing.T) { + name, buf := benchmarkBuildingSetup() + if allocs := testing.AllocsPerRun(100, func() { benchmarkBuilding(t, name, buf) }); allocs > 0.5 { + t.Errorf("allocations during building: got = %f, want ~0", allocs) + } +} + +func smallTestMsg() Message { + name := mustNewName("example.com.") + return Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: name, + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + }, + Authorities: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + }, + Additionals: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + }, + } +} + +func BenchmarkPack(b *testing.B) { + msg := largeTestMsg() + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + if _, err := msg.Pack(); err != nil { + b.Fatal("Message.Pack() =", err) + } + } +} + +func BenchmarkAppendPack(b *testing.B) { + msg := largeTestMsg() + buf := make([]byte, 0, packStartingCap) + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + if _, err := msg.AppendPack(buf[:0]); err != nil { + b.Fatal("Message.AppendPack() = ", err) + } + } +} + +func largeTestMsg() Message { + name := mustNewName("foo.bar.example.com.") + return Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: name, + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 2}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeAAAA, + Class: ClassINET, + }, + &AAAAResource{[16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeCNAME, + Class: ClassINET, + }, + &CNAMEResource{mustNewName("alias.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeSOA, + Class: ClassINET, + }, + &SOAResource{ + NS: mustNewName("ns1.example.com."), + MBox: mustNewName("mb.example.com."), + Serial: 1, + Refresh: 2, + Retry: 3, + Expire: 4, + MinTTL: 5, + }, + }, + { + ResourceHeader{ + Name: name, + Type: TypePTR, + Class: ClassINET, + }, + &PTRResource{mustNewName("ptr.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeMX, + Class: ClassINET, + }, + &MXResource{ + 7, + mustNewName("mx.example.com."), + }, + }, + { + ResourceHeader{ + Name: name, + Type: TypeSRV, + Class: ClassINET, + }, + &SRVResource{ + 8, + 9, + 11, + mustNewName("srv.example.com."), + }, + }, + }, + Authorities: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeNS, + Class: ClassINET, + }, + &NSResource{mustNewName("ns1.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeNS, + Class: ClassINET, + }, + &NSResource{mustNewName("ns2.example.com.")}, + }, + }, + Additionals: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{[]string{"So Long, and Thanks for All the Fish"}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{[]string{"Hamster Huey and the Gooey Kablooie"}}, + }, + { + mustEDNS0ResourceHeader(4096, 0xfe0|RCodeSuccess, false), + &OPTResource{ + Options: []Option{ + { + Code: 10, // see RFC 7873 + Data: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, + }, + }, + }, + }, + }, + } +} diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 0000000..cd0a8ac --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/atom_test.go b/vendor/golang.org/x/net/html/atom/atom_test.go new file mode 100644 index 0000000..6e33704 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom_test.go @@ -0,0 +1,109 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atom + +import ( + "sort" + "testing" +) + +func TestKnown(t *testing.T) { + for _, s := range testAtomList { + if atom := Lookup([]byte(s)); atom.String() != s { + t.Errorf("Lookup(%q) = %#x (%q)", s, uint32(atom), atom.String()) + } + } +} + +func TestHits(t *testing.T) { + for _, a := range table { + if a == 0 { + continue + } + got := Lookup([]byte(a.String())) + if got != a { + t.Errorf("Lookup(%q) = %#x, want %#x", a.String(), uint32(got), uint32(a)) + } + } +} + +func TestMisses(t *testing.T) { + testCases := []string{ + "", + "\x00", + "\xff", + "A", + "DIV", + "Div", + "dIV", + "aa", + "a\x00", + "ab", + "abb", + "abbr0", + "abbr ", + " abbr", + " a", + "acceptcharset", + "acceptCharset", + "accept_charset", + "h0", + "h1h2", + "h7", + "onClick", + "λ", + // The following string has the same hash (0xa1d7fab7) as "onmouseover". + "\x00\x00\x00\x00\x00\x50\x18\xae\x38\xd0\xb7", + } + for _, tc := range testCases { + got := Lookup([]byte(tc)) + if got != 0 { + t.Errorf("Lookup(%q): got %d, want 0", tc, got) + } + } +} + +func TestForeignObject(t *testing.T) { + const ( + afo = Foreignobject + afO = ForeignObject + sfo = "foreignobject" + sfO = "foreignObject" + ) + if got := Lookup([]byte(sfo)); got != afo { + t.Errorf("Lookup(%q): got %#v, want %#v", sfo, got, afo) + } + if got := Lookup([]byte(sfO)); got != afO { + t.Errorf("Lookup(%q): got %#v, want %#v", sfO, got, afO) + } + if got := afo.String(); got != sfo { + t.Errorf("Atom(%#v).String(): got %q, want %q", afo, got, sfo) + } + if got := afO.String(); got != sfO { + t.Errorf("Atom(%#v).String(): got %q, want %q", afO, got, sfO) + } +} + +func BenchmarkLookup(b *testing.B) { + sortedTable := make([]string, 0, len(table)) + for _, a := range table { + if a != 0 { + sortedTable = append(sortedTable, a.String()) + } + } + sort.Strings(sortedTable) + + x := make([][]byte, 1000) + for i := range x { + x[i] = []byte(sortedTable[i%len(sortedTable)]) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, s := range x { + Lookup(s) + } + } +} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go new file mode 100644 index 0000000..56cd842 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/gen.go @@ -0,0 +1,710 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go +//go:generate go run gen.go -test + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "math/rand" + "os" + "sort" + "strings" +) + +// identifier converts s to a Go exported identifier. +// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". +func identifier(s string) string { + b := make([]byte, 0, len(s)) + cap := true + for _, c := range s { + if c == '-' { + cap = true + continue + } + if cap && 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + cap = false + b = append(b, byte(c)) + } + return string(b) +} + +var test = flag.Bool("test", false, "generate table_test.go") + +func genFile(name string, buf *bytes.Buffer) { + b, err := format.Source(buf.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile(name, b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main() { + flag.Parse() + + var all []string + all = append(all, elements...) + all = append(all, attributes...) + all = append(all, eventHandlers...) + all = append(all, extra...) + sort.Strings(all) + + // uniq - lists have dups + w := 0 + for _, s := range all { + if w == 0 || all[w-1] != s { + all[w] = s + w++ + } + } + all = all[:w] + + if *test { + var buf bytes.Buffer + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") + fmt.Fprintln(&buf, "package atom\n") + fmt.Fprintln(&buf, "var testAtomList = []string{") + for _, s := range all { + fmt.Fprintf(&buf, "\t%q,\n", s) + } + fmt.Fprintln(&buf, "}") + + genFile("table_test.go", &buf) + return + } + + // Find hash that minimizes table size. + var best *table + for i := 0; i < 1000000; i++ { + if best != nil && 1<<(best.k-1) < len(all) { + break + } + h := rand.Uint32() + for k := uint(0); k <= 16; k++ { + if best != nil && k >= best.k { + break + } + var t table + if t.init(h, k, all) { + best = &t + break + } + } + } + if best == nil { + fmt.Fprintf(os.Stderr, "failed to construct string table\n") + os.Exit(1) + } + + // Lay out strings, using overlaps when possible. + layout := append([]string{}, all...) + + // Remove strings that are substrings of other strings + for changed := true; changed; { + changed = false + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i != j && t != "" && strings.Contains(s, t) { + changed = true + layout[j] = "" + } + } + } + } + + // Join strings where one suffix matches another prefix. + for { + // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], + // maximizing overlap length k. + besti := -1 + bestj := -1 + bestk := 0 + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i == j { + continue + } + for k := bestk + 1; k <= len(s) && k <= len(t); k++ { + if s[len(s)-k:] == t[:k] { + besti = i + bestj = j + bestk = k + } + } + } + } + if bestk > 0 { + layout[besti] += layout[bestj][bestk:] + layout[bestj] = "" + continue + } + break + } + + text := strings.Join(layout, "") + + atom := map[string]uint32{} + for _, s := range all { + off := strings.Index(text, s) + if off < 0 { + panic("lost string " + s) + } + atom[s] = uint32(off<<8 | len(s)) + } + + var buf bytes.Buffer + // Generate the Go code. + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go\n") + fmt.Fprintln(&buf, "package atom\n\nconst (") + + // compute max len + maxLen := 0 + for _, s := range all { + if maxLen < len(s) { + maxLen = len(s) + } + fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) + } + fmt.Fprintln(&buf, ")\n") + + fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) + fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) + + fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) + for i, s := range best.tab { + if s == "" { + continue + } + fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) + } + fmt.Fprintf(&buf, "}\n") + datasize := (1 << best.k) * 4 + + fmt.Fprintln(&buf, "const atomText =") + textsize := len(text) + for len(text) > 60 { + fmt.Fprintf(&buf, "\t%q +\n", text[:60]) + text = text[60:] + } + fmt.Fprintf(&buf, "\t%q\n\n", text) + + genFile("table.go", &buf) + + fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) +} + +type byLen []string + +func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } +func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byLen) Len() int { return len(x) } + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s string) uint32 { + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// A table represents an attempt at constructing the lookup table. +// The lookup table uses cuckoo hashing, meaning that each string +// can be found in one of two positions. +type table struct { + h0 uint32 + k uint + mask uint32 + tab []string +} + +// hash returns the two hashes for s. +func (t *table) hash(s string) (h1, h2 uint32) { + h := fnv(t.h0, s) + h1 = h & t.mask + h2 = (h >> 16) & t.mask + return +} + +// init initializes the table with the given parameters. +// h0 is the initial hash value, +// k is the number of bits of hash value to use, and +// x is the list of strings to store in the table. +// init returns false if the table cannot be constructed. +func (t *table) init(h0 uint32, k uint, x []string) bool { + t.h0 = h0 + t.k = k + t.tab = make([]string, 1< len(t.tab) { + return false + } + s := t.tab[i] + h1, h2 := t.hash(s) + j := h1 + h2 - i + if t.tab[j] != "" && !t.push(j, depth+1) { + return false + } + t.tab[j] = s + return true +} + +// The lists of element names and attribute keys were taken from +// https://html.spec.whatwg.org/multipage/indices.html#index +// as of the "HTML Living Standard - Last Updated 18 September 2017" version. + +// "command", "keygen" and "menuitem" have been removed from the spec, +// but are kept here for backwards compatibility. +var elements = []string{ + "a", + "abbr", + "address", + "area", + "article", + "aside", + "audio", + "b", + "base", + "bdi", + "bdo", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "cite", + "code", + "col", + "colgroup", + "command", + "data", + "datalist", + "dd", + "del", + "details", + "dfn", + "dialog", + "div", + "dl", + "dt", + "em", + "embed", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "hgroup", + "hr", + "html", + "i", + "iframe", + "img", + "input", + "ins", + "kbd", + "keygen", + "label", + "legend", + "li", + "link", + "main", + "map", + "mark", + "menu", + "menuitem", + "meta", + "meter", + "nav", + "noscript", + "object", + "ol", + "optgroup", + "option", + "output", + "p", + "param", + "picture", + "pre", + "progress", + "q", + "rp", + "rt", + "ruby", + "s", + "samp", + "script", + "section", + "select", + "slot", + "small", + "source", + "span", + "strong", + "style", + "sub", + "summary", + "sup", + "table", + "tbody", + "td", + "template", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "tr", + "track", + "u", + "ul", + "var", + "video", + "wbr", +} + +// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 +// +// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", +// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, +// but are kept here for backwards compatibility. +var attributes = []string{ + "abbr", + "accept", + "accept-charset", + "accesskey", + "action", + "allowfullscreen", + "allowpaymentrequest", + "allowusermedia", + "alt", + "as", + "async", + "autocomplete", + "autofocus", + "autoplay", + "challenge", + "charset", + "checked", + "cite", + "class", + "color", + "cols", + "colspan", + "command", + "content", + "contenteditable", + "contextmenu", + "controls", + "coords", + "crossorigin", + "data", + "datetime", + "default", + "defer", + "dir", + "dirname", + "disabled", + "download", + "draggable", + "dropzone", + "enctype", + "for", + "form", + "formaction", + "formenctype", + "formmethod", + "formnovalidate", + "formtarget", + "headers", + "height", + "hidden", + "high", + "href", + "hreflang", + "http-equiv", + "icon", + "id", + "inputmode", + "integrity", + "is", + "ismap", + "itemid", + "itemprop", + "itemref", + "itemscope", + "itemtype", + "keytype", + "kind", + "label", + "lang", + "list", + "loop", + "low", + "manifest", + "max", + "maxlength", + "media", + "mediagroup", + "method", + "min", + "minlength", + "multiple", + "muted", + "name", + "nomodule", + "nonce", + "novalidate", + "open", + "optimum", + "pattern", + "ping", + "placeholder", + "playsinline", + "poster", + "preload", + "radiogroup", + "readonly", + "referrerpolicy", + "rel", + "required", + "reversed", + "rows", + "rowspan", + "sandbox", + "spellcheck", + "scope", + "scoped", + "seamless", + "selected", + "shape", + "size", + "sizes", + "sortable", + "sorted", + "slot", + "span", + "spellcheck", + "src", + "srcdoc", + "srclang", + "srcset", + "start", + "step", + "style", + "tabindex", + "target", + "title", + "translate", + "type", + "typemustmatch", + "updateviacache", + "usemap", + "value", + "width", + "workertype", + "wrap", +} + +// "onautocomplete", "onautocompleteerror", "onmousewheel", +// "onshow" and "onsort" have been removed from the spec, +// but are kept here for backwards compatibility. +var eventHandlers = []string{ + "onabort", + "onautocomplete", + "onautocompleteerror", + "onauxclick", + "onafterprint", + "onbeforeprint", + "onbeforeunload", + "onblur", + "oncancel", + "oncanplay", + "oncanplaythrough", + "onchange", + "onclick", + "onclose", + "oncontextmenu", + "oncopy", + "oncuechange", + "oncut", + "ondblclick", + "ondrag", + "ondragend", + "ondragenter", + "ondragexit", + "ondragleave", + "ondragover", + "ondragstart", + "ondrop", + "ondurationchange", + "onemptied", + "onended", + "onerror", + "onfocus", + "onhashchange", + "oninput", + "oninvalid", + "onkeydown", + "onkeypress", + "onkeyup", + "onlanguagechange", + "onload", + "onloadeddata", + "onloadedmetadata", + "onloadend", + "onloadstart", + "onmessage", + "onmessageerror", + "onmousedown", + "onmouseenter", + "onmouseleave", + "onmousemove", + "onmouseout", + "onmouseover", + "onmouseup", + "onmousewheel", + "onwheel", + "onoffline", + "ononline", + "onpagehide", + "onpageshow", + "onpaste", + "onpause", + "onplay", + "onplaying", + "onpopstate", + "onprogress", + "onratechange", + "onreset", + "onresize", + "onrejectionhandled", + "onscroll", + "onsecuritypolicyviolation", + "onseeked", + "onseeking", + "onselect", + "onshow", + "onsort", + "onstalled", + "onstorage", + "onsubmit", + "onsuspend", + "ontimeupdate", + "ontoggle", + "onunhandledrejection", + "onunload", + "onvolumechange", + "onwaiting", +} + +// extra are ad-hoc values not covered by any of the lists above. +var extra = []string{ + "acronym", + "align", + "annotation", + "annotation-xml", + "applet", + "basefont", + "bgsound", + "big", + "blink", + "center", + "color", + "desc", + "face", + "font", + "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. + "foreignobject", + "frame", + "frameset", + "image", + "isindex", + "listing", + "malignmark", + "marquee", + "math", + "mglyph", + "mi", + "mn", + "mo", + "ms", + "mtext", + "nobr", + "noembed", + "noframes", + "plaintext", + "prompt", + "public", + "spacer", + "strike", + "svg", + "system", + "tt", + "xmp", +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 0000000..a91bd64 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,779 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0x6907 + Action Atom = 0x26a06 + Address Atom = 0x6f307 + Align Atom = 0x7005 + Allowfullscreen Atom = 0x2000f + Allowpaymentrequest Atom = 0x8013 + Allowusermedia Atom = 0x9c0e + Alt Atom = 0xc703 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31106 + Area Atom = 0x34e04 + Article Atom = 0x3f407 + As Atom = 0xd002 + Aside Atom = 0xd805 + Async Atom = 0xd005 + Audio Atom = 0xe605 + Autocomplete Atom = 0x2700c + Autofocus Atom = 0x10209 + Autoplay Atom = 0x11d08 + B Atom = 0x101 + Base Atom = 0x12c04 + Basefont Atom = 0x12c08 + Bdi Atom = 0x7903 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0xd406 + Caption Atom = 0x22907 + Center Atom = 0x21806 + Challenge Atom = 0x29309 + Charset Atom = 0x2107 + Checked Atom = 0x47107 + Cite Atom = 0x55c04 + Class Atom = 0x5bd05 + Code Atom = 0x1a004 + Col Atom = 0x1a703 + Colgroup Atom = 0x1a708 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58307 + Contenteditable Atom = 0x5830f + Contextmenu Atom = 0x3780b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1f30b + Data Atom = 0x49d04 + Datalist Atom = 0x49d08 + Datetime Atom = 0x2b008 + Dd Atom = 0x2cf02 + Default Atom = 0xdb07 + Defer Atom = 0x1a205 + Del Atom = 0x44a03 + Desc Atom = 0x55904 + Details Atom = 0x4607 + Dfn Atom = 0x5f03 + Dialog Atom = 0x7a06 + Dir Atom = 0xba03 + Dirname Atom = 0xba07 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x45b08 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x3fd08 + Dt Atom = 0x64b02 + Em Atom = 0x4202 + Embed Atom = 0x4205 + Enctype Atom = 0x28507 + Face Atom = 0x21604 + Fieldset Atom = 0x21e08 + Figcaption Atom = 0x2260a + Figure Atom = 0x24006 + Font Atom = 0x13004 + Footer Atom = 0xca06 + For Atom = 0x24c03 + ForeignObject Atom = 0x24c0d + Foreignobject Atom = 0x2590d + Form Atom = 0x26604 + Formaction Atom = 0x2660a + Formenctype Atom = 0x2810b + Formmethod Atom = 0x29c0a + Formnovalidate Atom = 0x2a60e + Formtarget Atom = 0x2b80a + Frame Atom = 0x5705 + Frameset Atom = 0x5708 + H1 Atom = 0x15c02 + H2 Atom = 0x2d602 + H3 Atom = 0x30502 + H4 Atom = 0x33d02 + H5 Atom = 0x34702 + H6 Atom = 0x64d02 + Head Atom = 0x32904 + Header Atom = 0x32906 + Headers Atom = 0x32907 + Height Atom = 0x14306 + Hgroup Atom = 0x2c206 + Hidden Atom = 0x2cd06 + High Atom = 0x2d304 + Hr Atom = 0x15702 + Href Atom = 0x2d804 + Hreflang Atom = 0x2d808 + Html Atom = 0x14704 + HttpEquiv Atom = 0x2e00a + I Atom = 0x601 + Icon Atom = 0x58204 + Id Atom = 0xda02 + Iframe Atom = 0x2f406 + Image Atom = 0x2fa05 + Img Atom = 0x2ff03 + Input Atom = 0x44305 + Inputmode Atom = 0x44309 + Ins Atom = 0x1fc03 + Integrity Atom = 0x23709 + Is Atom = 0x16502 + Isindex Atom = 0x30707 + Ismap Atom = 0x30e05 + Itemid Atom = 0x38306 + Itemprop Atom = 0x55d08 + Itemref Atom = 0x3c507 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31708 + Kbd Atom = 0x7803 + Keygen Atom = 0x3206 + Keytype Atom = 0x9507 + Kind Atom = 0x17704 + Label Atom = 0xf105 + Lang Atom = 0x2dc04 + Legend Atom = 0x18106 + Li Atom = 0x7102 + Link Atom = 0x17404 + List Atom = 0x4a104 + Listing Atom = 0x4a107 + Loop Atom = 0xf504 + Low Atom = 0x8203 + Main Atom = 0x1004 + Malignmark Atom = 0x6f0a + Manifest Atom = 0x6d708 + Map Atom = 0x31003 + Mark Atom = 0x7504 + Marquee Atom = 0x31f07 + Math Atom = 0x32604 + Max Atom = 0x33503 + Maxlength Atom = 0x33509 + Media Atom = 0xa505 + Mediagroup Atom = 0xa50a + Menu Atom = 0x37f04 + Menuitem Atom = 0x37f08 + Meta Atom = 0x4b004 + Meter Atom = 0xbf05 + Method Atom = 0x2a006 + Mglyph Atom = 0x30006 + Mi Atom = 0x33f02 + Min Atom = 0x33f03 + Minlength Atom = 0x33f09 + Mn Atom = 0x2a902 + Mo Atom = 0x6302 + Ms Atom = 0x67402 + Mtext Atom = 0x34905 + Multiple Atom = 0x35708 + Muted Atom = 0x35f05 + Name Atom = 0xbd04 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x4007 + Noframes Atom = 0x5508 + Nomodule Atom = 0x6108 + Nonce Atom = 0x56605 + Noscript Atom = 0x20e08 + Novalidate Atom = 0x2aa0a + Object Atom = 0x26006 + Ol Atom = 0x11802 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x22e0c + Onautocomplete Atom = 0x26e0e + Onautocompleteerror Atom = 0x26e13 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x5c606 + Oncancel Atom = 0xea08 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41308 + Onclick Atom = 0x2ed07 + Onclose Atom = 0x36407 + Oncontextmenu Atom = 0x3760d + Oncopy Atom = 0x38906 + Oncuechange Atom = 0x38f0b + Oncut Atom = 0x39a05 + Ondblclick Atom = 0x39f0a + Ondrag Atom = 0x3a906 + Ondragend Atom = 0x3a909 + Ondragenter Atom = 0x3b20b + Ondragexit Atom = 0x3bd0a + Ondragleave Atom = 0x3d70b + Ondragover Atom = 0x3e20a + Ondragstart Atom = 0x3ec0b + Ondrop Atom = 0x3fb06 + Ondurationchange Atom = 0x40b10 + Onemptied Atom = 0x40209 + Onended Atom = 0x41b07 + Onerror Atom = 0x42207 + Onfocus Atom = 0x42907 + Onhashchange Atom = 0x4350c + Oninput Atom = 0x44107 + Oninvalid Atom = 0x44d09 + Onkeydown Atom = 0x45609 + Onkeypress Atom = 0x4630a + Onkeyup Atom = 0x47807 + Onlanguagechange Atom = 0x48510 + Onload Atom = 0x49506 + Onloadeddata Atom = 0x4950c + Onloadedmetadata Atom = 0x4a810 + Onloadend Atom = 0x4be09 + Onloadstart Atom = 0x4c70b + Onmessage Atom = 0x4d209 + Onmessageerror Atom = 0x4d20e + Onmousedown Atom = 0x4e00b + Onmouseenter Atom = 0x4eb0c + Onmouseleave Atom = 0x4f70c + Onmousemove Atom = 0x5030b + Onmouseout Atom = 0x50e0a + Onmouseover Atom = 0x51b0b + Onmouseup Atom = 0x52609 + Onmousewheel Atom = 0x5340c + Onoffline Atom = 0x54009 + Ononline Atom = 0x54908 + Onpagehide Atom = 0x5510a + Onpageshow Atom = 0x56b0a + Onpaste Atom = 0x57707 + Onpause Atom = 0x59207 + Onplay Atom = 0x59c06 + Onplaying Atom = 0x59c09 + Onpopstate Atom = 0x5a50a + Onprogress Atom = 0x5af0a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x1310c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x56304 + Optgroup Atom = 0xf708 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51506 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x4f07 + Picture Atom = 0xae07 + Ping Atom = 0xfe04 + Placeholder Atom = 0x1120b + Plaintext Atom = 0x1ae09 + Playsinline Atom = 0x1210b + Poster Atom = 0x2c706 + Pre Atom = 0x46803 + Preload Atom = 0x47e07 + Progress Atom = 0x5b108 + Prompt Atom = 0x52e06 + Public Atom = 0x57e06 + Q Atom = 0x8e01 + Radiogroup Atom = 0x30a + Readonly Atom = 0x34f08 + Referrerpolicy Atom = 0x3c90e + Rel Atom = 0x47f03 + Required Atom = 0x24408 + Reversed Atom = 0xb308 + Rows Atom = 0x3a04 + Rowspan Atom = 0x3a07 + Rp Atom = 0x23402 + Rt Atom = 0x19a02 + Ruby Atom = 0xc304 + S Atom = 0x2501 + Samp Atom = 0x4c04 + Sandbox Atom = 0x10a07 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21006 + Seamless Atom = 0x36908 + Section Atom = 0x5c107 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x1fe05 + Sortable Atom = 0x65108 + Sorted Atom = 0x32f06 + Source Atom = 0x37006 + Spacer Atom = 0x42f06 + Span Atom = 0x3d04 + Spellcheck Atom = 0x46c0a + Src Atom = 0x5b803 + Srcdoc Atom = 0x5b806 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3f205 + Step Atom = 0x57b04 + Strike Atom = 0x9106 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4b608 + Table Atom = 0x58d05 + Target Atom = 0x2bc06 + Tbody Atom = 0x2705 + Td Atom = 0x5e02 + Template Atom = 0x71408 + Textarea Atom = 0x34a08 + Tfoot Atom = 0xc905 + Th Atom = 0x15602 + Thead Atom = 0x32805 + Time Atom = 0x13304 + Title Atom = 0xe105 + Tr Atom = 0x8b02 + Track Atom = 0x19b05 + Translate Atom = 0x1b609 + Tt Atom = 0x5102 + Type Atom = 0x9804 + Typemustmatch Atom = 0x2880d + U Atom = 0xb01 + Ul Atom = 0x6602 + Updateviacache Atom = 0x1370e + Usemap Atom = 0x59606 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2e905 + Wbr Atom = 0x57403 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x11003 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xa50a, // mediagroup + 0x2: 0x2dc04, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x5708, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2810b, // formenctype + 0xd: 0x11802, // ol + 0xe: 0x38f0b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0xe605, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2e905, // video + 0x15: 0x2a902, // mn + 0x16: 0x37f04, // menu + 0x17: 0x2c706, // poster + 0x19: 0xca06, // footer + 0x1a: 0x2a006, // method + 0x1b: 0x2b008, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x1370e, // updateviacache + 0x1e: 0xd005, // async + 0x1f: 0x49506, // onload + 0x21: 0xea08, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x2fa05, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51506, // output + 0x28: 0x32904, // head + 0x29: 0x4f70c, // onmouseleave + 0x2a: 0x57707, // onpaste + 0x2b: 0x59c09, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e00a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5510a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42207, // onerror + 0x3a: 0x12c08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x34f08, // readonly + 0x42: 0x30006, // mglyph + 0x44: 0x7102, // li + 0x46: 0x2cd06, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x57b04, // step + 0x49: 0x23709, // integrity + 0x4a: 0x57e06, // public + 0x4c: 0x1a703, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34702, // h5 + 0x50: 0x5b108, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x33d02, // h4 + 0x56: 0x32805, // thead + 0x57: 0x9507, // keytype + 0x58: 0x5af0a, // onprogress + 0x59: 0x44309, // inputmode + 0x5a: 0x3a909, // ondragend + 0x5d: 0x39a05, // oncut + 0x5e: 0x42f06, // spacer + 0x5f: 0x1a708, // colgroup + 0x62: 0x16502, // is + 0x65: 0xd002, // as + 0x66: 0x54009, // onoffline + 0x67: 0x32f06, // sorted + 0x69: 0x48510, // onlanguagechange + 0x6c: 0x4350c, // onhashchange + 0x6d: 0xbd04, // name + 0x6e: 0xc905, // tfoot + 0x6f: 0x55904, // desc + 0x70: 0x33503, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30502, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x3a04, // rows + 0x76: 0x63c06, // select + 0x77: 0xbf05, // meter + 0x78: 0x38306, // itemid + 0x79: 0x5340c, // onmousewheel + 0x7a: 0x5b806, // srcdoc + 0x7d: 0x19b05, // track + 0x7f: 0x31708, // itemtype + 0x82: 0x6302, // mo + 0x83: 0x41308, // onchange + 0x84: 0x32907, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x49d08, // datalist + 0x89: 0x4e00b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4a810, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26006, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x26e13, // onautocompleteerror + 0x94: 0x8013, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0xdb07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21604, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0x7504, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0xf504, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x41b07, // onended + 0xab: 0x6f0a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x34905, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x55d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3a906, // ondrag + 0xb7: 0x6602, // ul + 0xb8: 0x26604, // form + 0xb9: 0x10a07, // sandbox + 0xba: 0x5705, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0x6907, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x21e08, // fieldset + 0xc4: 0x2880d, // typemustmatch + 0xc5: 0x6108, // nomodule + 0xc6: 0x4007, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2ed07, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xc304, // ruby + 0xce: 0x5bd05, // class + 0xcf: 0x3ec0b, // ondragstart + 0xd0: 0x22907, // caption + 0xd4: 0x9c0e, // allowusermedia + 0xd5: 0x4c70b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a104, // list + 0xdb: 0x32604, // math + 0xdc: 0x44305, // input + 0xdf: 0x3e20a, // ondragover + 0xe0: 0x2d602, // h2 + 0xe2: 0x1ae09, // plaintext + 0xe4: 0x4eb0c, // onmouseenter + 0xe7: 0x47107, // checked + 0xe8: 0x46803, // pre + 0xea: 0x35708, // multiple + 0xeb: 0x7903, // bdi + 0xec: 0x33509, // maxlength + 0xed: 0x8e01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57403, // wbr + 0xf2: 0x12c04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x40b10, // ondurationchange + 0xf7: 0x5508, // noframes + 0xf9: 0x3fd08, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0xb308, // reversed + 0xfd: 0x3b20b, // ondragenter + 0xfe: 0x3f205, // start + 0xff: 0x11003, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x2ff03, // img + 0x104: 0x101, // b + 0x105: 0x24c03, // for + 0x106: 0xd805, // aside + 0x107: 0x44107, // oninput + 0x108: 0x34e04, // area + 0x109: 0x29c0a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23402, // rp + 0x10d: 0x4630a, // onkeypress + 0x10e: 0x5102, // tt + 0x110: 0x33f02, // mi + 0x111: 0x35f05, // muted + 0x112: 0xc703, // alt + 0x113: 0x1a004, // code + 0x114: 0x4202, // em + 0x115: 0x3bd0a, // ondragexit + 0x117: 0x3d04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x37f08, // menuitem + 0x11b: 0x58307, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4be09, // onloadend + 0x121: 0x3760d, // oncontextmenu + 0x123: 0x5c606, // onblur + 0x124: 0x3f407, // article + 0x125: 0xba03, // dir + 0x126: 0xfe04, // ping + 0x127: 0x24408, // required + 0x128: 0x44d09, // oninvalid + 0x129: 0x7005, // align + 0x12b: 0x58204, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x2260a, // figcaption + 0x12f: 0x45609, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40209, // onemptied + 0x136: 0x38906, // oncopy + 0x137: 0x55c04, // cite + 0x138: 0x39f0a, // ondblclick + 0x13a: 0x5030b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x47f03, // rel + 0x13e: 0xf708, // optgroup + 0x142: 0x3a07, // rowspan + 0x143: 0x37006, // source + 0x144: 0x20e08, // noscript + 0x145: 0x56304, // open + 0x146: 0x1fc03, // ins + 0x147: 0x24c0d, // foreignObject + 0x148: 0x5a50a, // onpopstate + 0x14a: 0x28507, // enctype + 0x14b: 0x26e0e, // onautocomplete + 0x14c: 0x34a08, // textarea + 0x14e: 0x2700c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0xda02, // id + 0x153: 0x22e0c, // onafterprint + 0x155: 0x2590d, // foreignobject + 0x156: 0x31f07, // marquee + 0x157: 0x59207, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x14306, // height + 0x15a: 0x33f03, // min + 0x15b: 0xba07, // dirname + 0x15c: 0x1b609, // translate + 0x15d: 0x14704, // html + 0x15e: 0x33f09, // minlength + 0x15f: 0x47e07, // preload + 0x160: 0x71408, // template + 0x161: 0x3d70b, // ondragleave + 0x164: 0x5b803, // src + 0x165: 0x6dd06, // strong + 0x167: 0x4c04, // samp + 0x168: 0x6f307, // address + 0x169: 0x54908, // ononline + 0x16b: 0x1120b, // placeholder + 0x16c: 0x2bc06, // target + 0x16d: 0x1fe05, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x46c0a, // spellcheck + 0x171: 0x4607, // details + 0x172: 0xd406, // canvas + 0x173: 0x10209, // autofocus + 0x174: 0xc05, // param + 0x176: 0x45b08, // download + 0x177: 0x44a03, // del + 0x178: 0x36407, // onclose + 0x179: 0x7803, // kbd + 0x17a: 0x31106, // applet + 0x17b: 0x2d804, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x4950c, // onloadeddata + 0x180: 0x8b02, // tr + 0x181: 0x2b80a, // formtarget + 0x182: 0xe105, // title + 0x183: 0x6ff05, // style + 0x184: 0x9106, // strike + 0x185: 0x59606, // usemap + 0x186: 0x2f406, // iframe + 0x187: 0x1004, // main + 0x189: 0xae07, // picture + 0x18c: 0x30e05, // ismap + 0x18e: 0x49d04, // data + 0x18f: 0xf105, // label + 0x191: 0x3c90e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x52e06, // prompt + 0x195: 0x5c107, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2d304, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x13304, // time + 0x19e: 0x67402, // ms + 0x19f: 0x32906, // header + 0x1a0: 0x4d209, // onmessage + 0x1a1: 0x56605, // nonce + 0x1a2: 0x2660a, // formaction + 0x1a3: 0x21806, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x58d05, // table + 0x1a6: 0x4a107, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29309, // challenge + 0x1aa: 0x24006, // figure + 0x1ab: 0xa505, // media + 0x1ae: 0x9804, // type + 0x1af: 0x13004, // font + 0x1b0: 0x4d20e, // onmessageerror + 0x1b1: 0x36908, // seamless + 0x1b2: 0x5f03, // dfn + 0x1b3: 0x1a205, // defer + 0x1b4: 0x8203, // low + 0x1b5: 0x63109, // onseeking + 0x1b6: 0x51b0b, // onmouseover + 0x1b7: 0x2aa0a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3c507, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31003, // map + 0x1bf: 0x1310c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x4f07, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2cf02, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x47807, // onkeyup + 0x1d5: 0x59c06, // onplay + 0x1d7: 0x4b004, // meta + 0x1d8: 0x3fb06, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1f30b, // crossorigin + 0x1dc: 0x56b0a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x5e02, // td + 0x1df: 0x5830f, // contenteditable + 0x1e0: 0x26a06, // action + 0x1e1: 0x1210b, // playsinline + 0x1e2: 0x42907, // onfocus + 0x1e3: 0x2d808, // hreflang + 0x1e5: 0x50e0a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x11d08, // autoplay + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3780b, // contextmenu + 0x1ef: 0x52609, // onmouseup + 0x1f1: 0x2c206, // hgroup + 0x1f2: 0x2000f, // allowfullscreen + 0x1f3: 0x4b608, // tabindex + 0x1f6: 0x30707, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2a60e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x4205, // embed + 0x1fd: 0x21006, // script + 0x1fe: 0x7a06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobro" + + "wspanoembedetailsampatternoframesetdfnomoduleacronymalignmar" + + "kbdialogallowpaymentrequestrikeytypeallowusermediagroupictur" + + "eversedirnameterubyaltfooterasyncanvasidefaultitleaudioncanc" + + "elabelooptgroupingautofocusandboxmplaceholderautoplaysinline" + + "basefontimeupdateviacacheightmlbdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortrackcode" + + "fercolgrouplaintextranslatecolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotcrossoriginsmallowfullscreenoscriptfacent" + + "erfieldsetfigcaptionafterprintegrityfigurequiredforeignObjec" + + "tforeignobjectformactionautocompleteerrorformenctypemustmatc" + + "hallengeformmethodformnovalidatetimeformtargethgrouposterhid" + + "denhigh2hreflanghttp-equivideonclickiframeimageimglyph3isind" + + "exismappletitemtypemarqueematheadersortedmaxlength4minlength" + + "5mtextareadonlymultiplemutedoncloseamlessourceoncontextmenui" + + "temidoncopyoncuechangeoncutondblclickondragendondragenterond" + + "ragexitemreferrerpolicyondragleaveondragoverondragstarticleo" + + "ndropzonemptiedondurationchangeonendedonerroronfocuspaceronh" + + "ashchangeoninputmodeloninvalidonkeydownloadonkeypresspellche" + + "ckedonkeyupreloadonlanguagechangeonloadeddatalistingonloaded" + + "metadatabindexonloadendonloadstartonmessageerroronmousedowno" + + "nmouseenteronmouseleaveonmousemoveonmouseoutputonmouseoveron" + + "mouseupromptonmousewheelonofflineononlineonpagehidescitempro" + + "penonceonpageshowbronpastepublicontenteditableonpausemaponpl" + + "ayingonpopstateonprogressrcdoclassectionbluronratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/atom/table_test.go b/vendor/golang.org/x/net/html/atom/table_test.go new file mode 100644 index 0000000..46d9d70 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table_test.go @@ -0,0 +1,374 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go -test + +package atom + +var testAtomList = []string{ + "a", + "abbr", + "accept", + "accept-charset", + "accesskey", + "acronym", + "action", + "address", + "align", + "allowfullscreen", + "allowpaymentrequest", + "allowusermedia", + "alt", + "annotation", + "annotation-xml", + "applet", + "area", + "article", + "as", + "aside", + "async", + "audio", + "autocomplete", + "autofocus", + "autoplay", + "b", + "base", + "basefont", + "bdi", + "bdo", + "bgsound", + "big", + "blink", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "center", + "challenge", + "charset", + "checked", + "cite", + "class", + "code", + "col", + "colgroup", + "color", + "cols", + "colspan", + "command", + "content", + "contenteditable", + "contextmenu", + "controls", + "coords", + "crossorigin", + "data", + "datalist", + "datetime", + "dd", + "default", + "defer", + "del", + "desc", + "details", + "dfn", + "dialog", + "dir", + "dirname", + "disabled", + "div", + "dl", + "download", + "draggable", + "dropzone", + "dt", + "em", + "embed", + "enctype", + "face", + "fieldset", + "figcaption", + "figure", + "font", + "footer", + "for", + "foreignObject", + "foreignobject", + "form", + "formaction", + "formenctype", + "formmethod", + "formnovalidate", + "formtarget", + "frame", + "frameset", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "headers", + "height", + "hgroup", + "hidden", + "high", + "hr", + "href", + "hreflang", + "html", + "http-equiv", + "i", + "icon", + "id", + "iframe", + "image", + "img", + "input", + "inputmode", + "ins", + "integrity", + "is", + "isindex", + "ismap", + "itemid", + "itemprop", + "itemref", + "itemscope", + "itemtype", + "kbd", + "keygen", + "keytype", + "kind", + "label", + "lang", + "legend", + "li", + "link", + "list", + "listing", + "loop", + "low", + "main", + "malignmark", + "manifest", + "map", + "mark", + "marquee", + "math", + "max", + "maxlength", + "media", + "mediagroup", + "menu", + "menuitem", + "meta", + "meter", + "method", + "mglyph", + "mi", + "min", + "minlength", + "mn", + "mo", + "ms", + "mtext", + "multiple", + "muted", + "name", + "nav", + "nobr", + "noembed", + "noframes", + "nomodule", + "nonce", + "noscript", + "novalidate", + "object", + "ol", + "onabort", + "onafterprint", + "onautocomplete", + "onautocompleteerror", + "onauxclick", + "onbeforeprint", + "onbeforeunload", + "onblur", + "oncancel", + "oncanplay", + "oncanplaythrough", + "onchange", + "onclick", + "onclose", + "oncontextmenu", + "oncopy", + "oncuechange", + "oncut", + "ondblclick", + "ondrag", + "ondragend", + "ondragenter", + "ondragexit", + "ondragleave", + "ondragover", + "ondragstart", + "ondrop", + "ondurationchange", + "onemptied", + "onended", + "onerror", + "onfocus", + "onhashchange", + "oninput", + "oninvalid", + "onkeydown", + "onkeypress", + "onkeyup", + "onlanguagechange", + "onload", + "onloadeddata", + "onloadedmetadata", + "onloadend", + "onloadstart", + "onmessage", + "onmessageerror", + "onmousedown", + "onmouseenter", + "onmouseleave", + "onmousemove", + "onmouseout", + "onmouseover", + "onmouseup", + "onmousewheel", + "onoffline", + "ononline", + "onpagehide", + "onpageshow", + "onpaste", + "onpause", + "onplay", + "onplaying", + "onpopstate", + "onprogress", + "onratechange", + "onrejectionhandled", + "onreset", + "onresize", + "onscroll", + "onsecuritypolicyviolation", + "onseeked", + "onseeking", + "onselect", + "onshow", + "onsort", + "onstalled", + "onstorage", + "onsubmit", + "onsuspend", + "ontimeupdate", + "ontoggle", + "onunhandledrejection", + "onunload", + "onvolumechange", + "onwaiting", + "onwheel", + "open", + "optgroup", + "optimum", + "option", + "output", + "p", + "param", + "pattern", + "picture", + "ping", + "placeholder", + "plaintext", + "playsinline", + "poster", + "pre", + "preload", + "progress", + "prompt", + "public", + "q", + "radiogroup", + "readonly", + "referrerpolicy", + "rel", + "required", + "reversed", + "rows", + "rowspan", + "rp", + "rt", + "ruby", + "s", + "samp", + "sandbox", + "scope", + "scoped", + "script", + "seamless", + "section", + "select", + "selected", + "shape", + "size", + "sizes", + "slot", + "small", + "sortable", + "sorted", + "source", + "spacer", + "span", + "spellcheck", + "src", + "srcdoc", + "srclang", + "srcset", + "start", + "step", + "strike", + "strong", + "style", + "sub", + "summary", + "sup", + "svg", + "system", + "tabindex", + "table", + "target", + "tbody", + "td", + "template", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "tr", + "track", + "translate", + "tt", + "type", + "typemustmatch", + "u", + "ul", + "updateviacache", + "usemap", + "value", + "var", + "video", + "wbr", + "width", + "workertype", + "wrap", + "xmp", +} diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go new file mode 100644 index 0000000..13bed15 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset.go @@ -0,0 +1,257 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package charset provides common text encodings for HTML documents. +// +// The mapping from encoding labels to encodings is defined at +// https://encoding.spec.whatwg.org/. +package charset // import "golang.org/x/net/html/charset" + +import ( + "bytes" + "fmt" + "io" + "mime" + "strings" + "unicode/utf8" + + "golang.org/x/net/html" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/htmlindex" + "golang.org/x/text/transform" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. Encoders will use HTML escape sequences for +// runes that are not supported by the character set. +func Lookup(label string) (e encoding.Encoding, name string) { + e, err := htmlindex.Get(label) + if err != nil { + return nil, "" + } + name, _ = htmlindex.Name(e) + return &htmlEncoding{e}, name +} + +type htmlEncoding struct{ encoding.Encoding } + +func (h *htmlEncoding) NewEncoder() *encoding.Encoder { + // HTML requires a non-terminating legacy encoder. We use HTML escapes to + // substitute unsupported code points. + return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) +} + +// DetermineEncoding determines the encoding of an HTML document by examining +// up to the first 1024 bytes of content and the declared Content-Type. +// +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { + if len(content) > 1024 { + content = content[:1024] + } + + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + e, name = Lookup(b.enc) + return e, name, true + } + } + + if _, params, err := mime.ParseMediaType(contentType); err == nil { + if cs, ok := params["charset"]; ok { + if e, name = Lookup(cs); e != nil { + return e, name, true + } + } + } + + if len(content) > 0 { + e, name = prescan(content) + if e != nil { + return e, name, false + } + } + + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return encoding.Nop, "utf-8", false + } + + // TODO: change default depending on user's locale? + return charmap.Windows1252, "windows-1252", false +} + +// NewReader returns an io.Reader that converts the content of r to UTF-8. +// It calls DetermineEncoding to find out what r's encoding is. +func NewReader(r io.Reader, contentType string) (io.Reader, error) { + preview := make([]byte, 1024) + n, err := io.ReadFull(r, preview) + switch { + case err == io.ErrUnexpectedEOF: + preview = preview[:n] + r = bytes.NewReader(preview) + case err != nil: + return nil, err + default: + r = io.MultiReader(bytes.NewReader(preview), r) + } + + if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { + r = transform.NewReader(r, e.NewDecoder()) + } + return r, nil +} + +// NewReaderLabel returns a reader that converts from the specified charset to +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and +// returns an error if Lookup returns nil. It is suitable for use as +// encoding/xml.Decoder's CharsetReader function. +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { + e, _ := Lookup(label) + if e == nil { + return nil, fmt.Errorf("unsupported charset: %q", label) + } + return transform.NewReader(input, e.NewDecoder()), nil +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} diff --git a/vendor/golang.org/x/net/html/charset/charset_test.go b/vendor/golang.org/x/net/html/charset/charset_test.go new file mode 100644 index 0000000..e4e7d86 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset_test.go @@ -0,0 +1,237 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package charset + +import ( + "bytes" + "encoding/xml" + "io/ioutil" + "runtime" + "strings" + "testing" + + "golang.org/x/text/transform" +) + +func transformString(t transform.Transformer, s string) (string, error) { + r := transform.NewReader(strings.NewReader(s), t) + b, err := ioutil.ReadAll(r) + return string(b), err +} + +type testCase struct { + utf8, other, otherEncoding string +} + +// testCases for encoding and decoding. +var testCases = []testCase{ + {"Résumé", "Résumé", "utf8"}, + {"Résumé", "R\xe9sum\xe9", "latin1"}, + {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "S0\x8c0o0\"oW[g0Y0\x020", "UTF-16LE"}, + {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "0S0\x8c0oo\"[W0g0Y0\x02", "UTF-16BE"}, + {"Hello, world", "Hello, world", "ASCII"}, + {"GdaÅ„sk", "Gda\xf1sk", "ISO-8859-2"}, + {"Ââ ÄŒÄ ÄÄ‘ ÅŠÅ‹ Õõ Å Å¡ Žž Ã…Ã¥ Ää", "\xc2\xe2 \xc8\xe8 \xa9\xb9 \xaf\xbf \xd5\xf5 \xaa\xba \xac\xbc \xc5\xe5 \xc4\xe4", "ISO-8859-10"}, + {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "ISO-8859-11"}, + {"latvieÅ¡u", "latvie\xf0u", "ISO-8859-13"}, + {"Seònaid", "Se\xf2naid", "ISO-8859-14"}, + {"€1 is cheap", "\xa41 is cheap", "ISO-8859-15"}, + {"româneÈ™te", "rom\xe2ne\xbate", "ISO-8859-16"}, + {"nutraĵo", "nutra\xbco", "ISO-8859-3"}, + {"Kalâdlit", "Kal\xe2dlit", "ISO-8859-4"}, + {"руÑÑкий", "\xe0\xe3\xe1\xe1\xda\xd8\xd9", "ISO-8859-5"}, + {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "ISO-8859-7"}, + {"KaÄŸan", "Ka\xf0an", "ISO-8859-9"}, + {"Résumé", "R\x8esum\x8e", "macintosh"}, + {"GdaÅ„sk", "Gda\xf1sk", "windows-1250"}, + {"руÑÑкий", "\xf0\xf3\xf1\xf1\xea\xe8\xe9", "windows-1251"}, + {"Résumé", "R\xe9sum\xe9", "windows-1252"}, + {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "windows-1253"}, + {"KaÄŸan", "Ka\xf0an", "windows-1254"}, + {"עִבְרִית", "\xf2\xc4\xe1\xc0\xf8\xc4\xe9\xfa", "windows-1255"}, + {"العربية", "\xc7\xe1\xda\xd1\xc8\xed\xc9", "windows-1256"}, + {"latvieÅ¡u", "latvie\xf0u", "windows-1257"}, + {"Việt", "Vi\xea\xf2t", "windows-1258"}, + {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "windows-874"}, + {"руÑÑкий", "\xd2\xd5\xd3\xd3\xcb\xc9\xca", "KOI8-R"}, + {"українÑька", "\xd5\xcb\xd2\xc1\xa7\xce\xd3\xd8\xcb\xc1", "KOI8-U"}, + {"Hello 常用國字標準字體表", "Hello \xb1`\xa5\u03b0\xea\xa6r\xbc\u0437\u01e6r\xc5\xe9\xaa\xed", "big5"}, + {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gbk"}, + {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gb18030"}, + {"עִבְרִית", "\x81\x30\xfb\x30\x81\x30\xf6\x34\x81\x30\xf9\x33\x81\x30\xf6\x30\x81\x30\xfb\x36\x81\x30\xf6\x34\x81\x30\xfa\x31\x81\x30\xfb\x38", "gb18030"}, + {"㧯", "\x82\x31\x89\x38", "gb18030"}, + {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "\x82\xb1\x82\xea\x82\xcd\x8a\xbf\x8e\x9a\x82\xc5\x82\xb7\x81B", "SJIS"}, + {"Hello, 世界!", "Hello, \x90\xa2\x8aE!", "SJIS"}, + {"イウエオカ", "\xb2\xb3\xb4\xb5\xb6", "SJIS"}, + {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "\xa4\xb3\xa4\xec\xa4\u03f4\xc1\xbb\xfa\xa4\u01e4\xb9\xa1\xa3", "EUC-JP"}, + {"Hello, 世界!", "Hello, \x1b$B@$3&\x1b(B!", "ISO-2022-JP"}, + {"다ìŒê³¼ ê°™ì€ ì¡°ê±´ì„ ë”°ë¼ì•¼ 합니다: 저작ìží‘œì‹œ", "\xb4\xd9\xc0\xbd\xb0\xfa \xb0\xb0\xc0\xba \xc1\xb6\xb0\xc7\xc0\xbb \xb5\xfb\xb6\xf3\xbe\xdf \xc7Õ´Ï´\xd9: \xc0\xfa\xc0\xdb\xc0\xdaÇ¥\xbd\xc3", "EUC-KR"}, +} + +func TestDecode(t *testing.T) { + testCases := append(testCases, []testCase{ + // Replace multi-byte maximum subpart of ill-formed subsequence with + // single replacement character (WhatWG requirement). + {"Rés\ufffdumé", "Rés\xe1\x80umé", "utf8"}, + }...) + for _, tc := range testCases { + e, _ := Lookup(tc.otherEncoding) + if e == nil { + t.Errorf("%s: not found", tc.otherEncoding) + continue + } + s, err := transformString(e.NewDecoder(), tc.other) + if err != nil { + t.Errorf("%s: decode %q: %v", tc.otherEncoding, tc.other, err) + continue + } + if s != tc.utf8 { + t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.utf8) + } + } +} + +func TestEncode(t *testing.T) { + testCases := append(testCases, []testCase{ + // Use Go-style replacement. + {"Rés\xe1\x80umé", "Rés\ufffd\ufffdumé", "utf8"}, + // U+0144 LATIN SMALL LETTER N WITH ACUTE not supported by encoding. + {"GdaÅ„sk", "Gdańsk", "ISO-8859-11"}, + {"\ufffd", "�", "ISO-8859-11"}, + {"a\xe1\x80b", "a��b", "ISO-8859-11"}, + }...) + for _, tc := range testCases { + e, _ := Lookup(tc.otherEncoding) + if e == nil { + t.Errorf("%s: not found", tc.otherEncoding) + continue + } + s, err := transformString(e.NewEncoder(), tc.utf8) + if err != nil { + t.Errorf("%s: encode %q: %s", tc.otherEncoding, tc.utf8, err) + continue + } + if s != tc.other { + t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.other) + } + } +} + +var sniffTestCases = []struct { + filename, declared, want string +}{ + {"HTTP-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"UTF-16LE-BOM.html", "", "utf-16le"}, + {"UTF-16BE-BOM.html", "", "utf-16be"}, + {"meta-content-attribute.html", "text/html", "iso-8859-15"}, + {"meta-charset-attribute.html", "text/html", "iso-8859-15"}, + {"No-encoding-declaration.html", "text/html", "utf-8"}, + {"HTTP-vs-UTF-8-BOM.html", "text/html; charset=iso-8859-15", "utf-8"}, + {"HTTP-vs-meta-content.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"HTTP-vs-meta-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"UTF-8-BOM-vs-meta-content.html", "text/html", "utf-8"}, + {"UTF-8-BOM-vs-meta-charset.html", "text/html", "utf-8"}, +} + +func TestSniff(t *testing.T) { + switch runtime.GOOS { + case "nacl": // platforms that don't permit direct file system access + t.Skipf("not supported on %q", runtime.GOOS) + } + + for _, tc := range sniffTestCases { + content, err := ioutil.ReadFile("testdata/" + tc.filename) + if err != nil { + t.Errorf("%s: error reading file: %v", tc.filename, err) + continue + } + + _, name, _ := DetermineEncoding(content, tc.declared) + if name != tc.want { + t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want) + continue + } + } +} + +func TestReader(t *testing.T) { + switch runtime.GOOS { + case "nacl": // platforms that don't permit direct file system access + t.Skipf("not supported on %q", runtime.GOOS) + } + + for _, tc := range sniffTestCases { + content, err := ioutil.ReadFile("testdata/" + tc.filename) + if err != nil { + t.Errorf("%s: error reading file: %v", tc.filename, err) + continue + } + + r, err := NewReader(bytes.NewReader(content), tc.declared) + if err != nil { + t.Errorf("%s: error creating reader: %v", tc.filename, err) + continue + } + + got, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err) + continue + } + + e, _ := Lookup(tc.want) + want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder())) + if err != nil { + t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err) + continue + } + + if !bytes.Equal(got, want) { + t.Errorf("%s: got %q, want %q", tc.filename, got, want) + continue + } + } +} + +var metaTestCases = []struct { + meta, want string +}{ + {"", ""}, + {"text/html", ""}, + {"text/html; charset utf-8", ""}, + {"text/html; charset=latin-2", "latin-2"}, + {"text/html; charset; charset = utf-8", "utf-8"}, + {`charset="big5"`, "big5"}, + {"charset='shift_jis'", "shift_jis"}, +} + +func TestFromMeta(t *testing.T) { + for _, tc := range metaTestCases { + got := fromMetaElement(tc.meta) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want) + } + } +} + +func TestXML(t *testing.T) { + const s = "r\xe9sum\xe9" + + d := xml.NewDecoder(strings.NewReader(s)) + d.CharsetReader = NewReaderLabel + + var a struct { + Word string + } + err := d.Decode(&a) + if err != nil { + t.Fatalf("Decode: %v", err) + } + + want := "résumé" + if a.Word != want { + t.Errorf("got %q, want %q", a.Word, want) + } +} diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html new file mode 100644 index 0000000..9915fa0 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html @@ -0,0 +1,48 @@ + + + + HTTP charset + + + + + + + + + + + +

HTTP charset

+ + +
+ + +
 
+ + + + + +
+

The character encoding of a page can be set using the HTTP header charset declaration.

+

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.

+
+
+
HTML5
+

the-input-byte-stream-001
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html new file mode 100644 index 0000000..26e5d8b --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html @@ -0,0 +1,48 @@ + + + + HTTP vs UTF-8 BOM + + + + + + + + + + + +

HTTP vs UTF-8 BOM

+ + +
+ + +
 
+ + + + + +
+

A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

If the test is unsuccessful, the characters  should appear at the top of the page. These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.

+
+
+
HTML5
+

the-input-byte-stream-034
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html new file mode 100644 index 0000000..2f07e95 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html @@ -0,0 +1,49 @@ + + + + HTTP vs meta charset + + + + + + + + + + + +

HTTP vs meta charset

+ + +
+ + +
 
+ + + + + +
+

The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-018
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html new file mode 100644 index 0000000..6853cdd --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html @@ -0,0 +1,49 @@ + + + + HTTP vs meta content + + + + + + + + + + + +

HTTP vs meta content

+ + +
+ + +
 
+ + + + + +
+

The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-016
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html new file mode 100644 index 0000000..612e26c --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html @@ -0,0 +1,47 @@ + + + + No encoding declaration + + + + + + + + + + + +

No encoding declaration

+ + +
+ + +
 
+ + + + + +
+

A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.

+

The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-015
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/README b/vendor/golang.org/x/net/html/charset/testdata/README new file mode 100644 index 0000000..38ef0f9 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/README @@ -0,0 +1,9 @@ +These test cases come from +http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics + +Distributed under both the W3C Test Suite License +(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license) +and the W3C 3-clause BSD License +(http://www.w3.org/Consortium/Legal/2008/03-bsd-license). +To contribute to a W3C Test Suite, see the policies and contribution +forms (http://www.w3.org/2004/10/27-testcases). diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html new file mode 100644 index 0000000000000000000000000000000000000000..3abf7a9343c20518e57dfea58b374fb0f4fb58a1 GIT binary patch literal 2670 zcmcJR?QRoS5Qc}JAoU&=BQ-(7b^;2j8i*i3RV1JlO@;VXIsPurV!WHiDdLW}i`*CO z^UnC>tih=KsVr;H&Y7?C&O3AV(?534uG?e##U9y_y|!QNi4``n+D>d{2lky^LnFNx z?9HrarH$>rwQR_$g)Hk0*&STI*EYq|47~&U9sfUB+ji})9eR{QqCUra7oDsZ5obtB zdxP%<)-$4Q;rSHJiM>U(#ZI=;?n^BC?Dp6lu=~_1-lnX3u03&2BlmQIY>L+!Uq7XoytKw^Q#oZSM?3*J?)&ojG&yzQRkC!Ml5JE?ax;lp_NYEcdUht`ZswOviB~L5hmJ|pXI71nn20w;>vG! zQGB$EE9&wC``&J#_Ym~PgRu-Bd>1!pOp0||k`kr=VJ zfH6I6rmRaeHA7U-A^OTsT+|d2a^i(>DePzZ{)ibXoCBvJnuYrd-3kkN$uy{qQK;=*Y;S87ro12aTgu^i*%f8zC3>a}9DIe4cfxOzsCw&(cqvP9{ud{N6f` z#TNDY(B6@Gpr|uN+%&x^XZjBHdc@2vsM(Tyc2=vshHQ5w+obmp>tuWT(t4BTUGAQw zxeI$UGSLUBg=WFbF;4f@4=^P2AgY@CFn8A`bcC=_&~)fiDe)#cUARRBzJ^k|%X)69 z+{Cb`wq}Rsg%B62CC_tK!AV(W{(MV?#mndR46CU#BUN<{8e?*oT+!pE5wF#O#TR#a z$9qRT)tpbw8zAI~QQJg2C3|6$I%(T(;`zOMy6SO+&;pG=c#2P|P-WZn$$DpWJlC3U z3*nvmz zwP{u~r$L?-m3uqp9I1+#3yE|3M$(s-BEtih=LQ>`qYoiktOop(wi%!;yh%+Rm z{e|xntY<{q!1F1Z6MKtngPm-p-4|H&+3m4AVE3_AyiHm6Tzlf4M(*ht*%YrezJ6kr zHGj45pc?64*$Cm%-zseWMA`x;)v*~jA=i}szqts9xmQkS`M11|(H7bTXAycsXU53+ zJ?120SRZeyiFjW7enPN`bxk$IaWV3o48oJF7D&2ysoY;6(s6%6vVfaYd&mC=erK!) zNGI^7upQgN)53OHe_VE<@J+G8*Y|p*)zB2Thdi}+YR<5QWHm!|a_*AoZXuv7)$xe| zm3Q$D7{|#}{m4X&UY!6(ZhyYi2(5JLzGE$H)W6BQklnjPMwn<Yvv7Z*TVWwD*=E3QpH37* z#lqXJA0A~J9T_<^W5smspmDg2p6ac5Bjn+~LAoow%1TCdZ*$K8`O zw_$HaCi+0N&@7la#_7KL5r$+QL{)Pi=I&aDjt~|Knht#`CEi4*3%97i_fSfASlwUz0=3V0GCxY}z81UC-nP=CGt2OqYV$ zoRCo+qM9YX*3FFORLC=E3B~S@+KROyk4r5 yX7?DaslDfIebqXgC!KKp4IYy+W~X?ddE6o=`A+x#x0AK&6MF#W&AXxbRrv+SX}PNa literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html new file mode 100644 index 0000000..83de433 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html @@ -0,0 +1,49 @@ + + + + UTF-8 BOM vs meta charset + + + + + + + + + + + +

UTF-8 BOM vs meta charset

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.

+

The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-038
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html new file mode 100644 index 0000000..501aac2 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html @@ -0,0 +1,48 @@ + + + + UTF-8 BOM vs meta content + + + + + + + + + + + +

UTF-8 BOM vs meta content

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.

+

The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-037
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html new file mode 100644 index 0000000..2d7d25a --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html @@ -0,0 +1,48 @@ + + + + meta charset attribute + + + + + + + + + + + +

meta charset attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with charset attribute.

+

The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-009
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html new file mode 100644 index 0000000..1c3f228 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html @@ -0,0 +1,48 @@ + + + + meta content attribute + + + + + + + + + + + +

meta content attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with http-equiv and content attributes.

+

The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-007
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 0000000..5eb7c5a --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,104 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. + "keygen": true, + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "svg": + return element.Data == "foreignObject" + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 0000000..822ed42 --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 0000000..c484e5a --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 0000000..a50c04c --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/entity_test.go b/vendor/golang.org/x/net/html/entity_test.go new file mode 100644 index 0000000..b53f866 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity_test.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "testing" + "unicode/utf8" +) + +func TestEntityLength(t *testing.T) { + // We verify that the length of UTF-8 encoding of each value is <= 1 + len(key). + // The +1 comes from the leading "&". This property implies that the length of + // unescaped text is <= the length of escaped text. + for k, v := range entity { + if 1+len(k) < utf8.RuneLen(v) { + t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v)) + } + if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' { + t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon) + } + } + for k, v := range entity2 { + if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) { + t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1])) + } + } +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 0000000..d856139 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/vendor/golang.org/x/net/html/escape_test.go b/vendor/golang.org/x/net/html/escape_test.go new file mode 100644 index 0000000..b405d4b --- /dev/null +++ b/vendor/golang.org/x/net/html/escape_test.go @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import "testing" + +type unescapeTest struct { + // A short description of the test case. + desc string + // The HTML text. + html string + // The unescaped text. + unescaped string +} + +var unescapeTests = []unescapeTest{ + // Handle no entities. + { + "copy", + "A\ttext\nstring", + "A\ttext\nstring", + }, + // Handle simple named entities. + { + "simple", + "& > <", + "& > <", + }, + // Handle hitting the end of the string. + { + "stringEnd", + "& &", + "& &", + }, + // Handle entities with two codepoints. + { + "multiCodepoint", + "text ⋛︀ blah", + "text \u22db\ufe00 blah", + }, + // Handle decimal numeric entities. + { + "decimalEntity", + "Delta = Δ ", + "Delta = Δ ", + }, + // Handle hexadecimal numeric entities. + { + "hexadecimalEntity", + "Lambda = λ = λ ", + "Lambda = λ = λ ", + }, + // Handle numeric early termination. + { + "numericEnds", + "&# &#x €43 © = ©f = ©", + "&# &#x €43 © = ©f = ©", + }, + // Handle numeric ISO-8859-1 entity replacements. + { + "numericReplacements", + "Footnote‡", + "Footnote‡", + }, +} + +func TestUnescape(t *testing.T) { + for _, tt := range unescapeTests { + unescaped := UnescapeString(tt.html) + if unescaped != tt.unescaped { + t.Errorf("TestUnescape %s: want %q, got %q", tt.desc, tt.unescaped, unescaped) + } + } +} + +func TestUnescapeEscape(t *testing.T) { + ss := []string{ + ``, + `abc def`, + `a & b`, + `a&b`, + `a & b`, + `"`, + `"`, + `"<&>"`, + `"<&>"`, + `3&5==1 && 0<1, "0<1", a+acute=á`, + `The special characters are: <, >, &, ' and "`, + } + for _, s := range ss { + if got := UnescapeString(EscapeString(s)); got != s { + t.Errorf("got %q want %q", got, s) + } + } +} diff --git a/vendor/golang.org/x/net/html/example_test.go b/vendor/golang.org/x/net/html/example_test.go new file mode 100644 index 0000000..0b06ed7 --- /dev/null +++ b/vendor/golang.org/x/net/html/example_test.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This example demonstrates parsing HTML data and walking the resulting tree. +package html_test + +import ( + "fmt" + "log" + "strings" + + "golang.org/x/net/html" +) + +func ExampleParse() { + s := `

Links:

` + doc, err := html.Parse(strings.NewReader(s)) + if err != nil { + log.Fatal(err) + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + for _, a := range n.Attr { + if a.Key == "href" { + fmt.Println(a.Val) + break + } + } + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + // Output: + // foo + // /bar/baz +} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 0000000..01477a9 --- /dev/null +++ b/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,226 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.6.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.6.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.6.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go new file mode 100644 index 0000000..6f136c4 --- /dev/null +++ b/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,194 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + scopeMarkerNode +) + +// Section 12.2.4.3 says "The markers are inserted when entering applet, +// object, marquee, template, td, th, and caption elements, and are used +// to prevent formatting from "leaking" into applet, object, marquee, +// template, td, th, and caption elements". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} diff --git a/vendor/golang.org/x/net/html/node_test.go b/vendor/golang.org/x/net/html/node_test.go new file mode 100644 index 0000000..471102f --- /dev/null +++ b/vendor/golang.org/x/net/html/node_test.go @@ -0,0 +1,146 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "fmt" +) + +// checkTreeConsistency checks that a node and its descendants are all +// consistent in their parent/child/sibling relationships. +func checkTreeConsistency(n *Node) error { + return checkTreeConsistency1(n, 0) +} + +func checkTreeConsistency1(n *Node, depth int) error { + if depth == 1e4 { + return fmt.Errorf("html: tree looks like it contains a cycle") + } + if err := checkNodeConsistency(n); err != nil { + return err + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + if err := checkTreeConsistency1(c, depth+1); err != nil { + return err + } + } + return nil +} + +// checkNodeConsistency checks that a node's parent/child/sibling relationships +// are consistent. +func checkNodeConsistency(n *Node) error { + if n == nil { + return nil + } + + nParent := 0 + for p := n.Parent; p != nil; p = p.Parent { + nParent++ + if nParent == 1e4 { + return fmt.Errorf("html: parent list looks like an infinite loop") + } + } + + nForward := 0 + for c := n.FirstChild; c != nil; c = c.NextSibling { + nForward++ + if nForward == 1e6 { + return fmt.Errorf("html: forward list of children looks like an infinite loop") + } + if c.Parent != n { + return fmt.Errorf("html: inconsistent child/parent relationship") + } + } + + nBackward := 0 + for c := n.LastChild; c != nil; c = c.PrevSibling { + nBackward++ + if nBackward == 1e6 { + return fmt.Errorf("html: backward list of children looks like an infinite loop") + } + if c.Parent != n { + return fmt.Errorf("html: inconsistent child/parent relationship") + } + } + + if n.Parent != nil { + if n.Parent == n { + return fmt.Errorf("html: inconsistent parent relationship") + } + if n.Parent == n.FirstChild { + return fmt.Errorf("html: inconsistent parent/first relationship") + } + if n.Parent == n.LastChild { + return fmt.Errorf("html: inconsistent parent/last relationship") + } + if n.Parent == n.PrevSibling { + return fmt.Errorf("html: inconsistent parent/prev relationship") + } + if n.Parent == n.NextSibling { + return fmt.Errorf("html: inconsistent parent/next relationship") + } + + parentHasNAsAChild := false + for c := n.Parent.FirstChild; c != nil; c = c.NextSibling { + if c == n { + parentHasNAsAChild = true + break + } + } + if !parentHasNAsAChild { + return fmt.Errorf("html: inconsistent parent/child relationship") + } + } + + if n.PrevSibling != nil && n.PrevSibling.NextSibling != n { + return fmt.Errorf("html: inconsistent prev/next relationship") + } + if n.NextSibling != nil && n.NextSibling.PrevSibling != n { + return fmt.Errorf("html: inconsistent next/prev relationship") + } + + if (n.FirstChild == nil) != (n.LastChild == nil) { + return fmt.Errorf("html: inconsistent first/last relationship") + } + if n.FirstChild != nil && n.FirstChild == n.LastChild { + // We have a sole child. + if n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil { + return fmt.Errorf("html: inconsistent sole child's sibling relationship") + } + } + + seen := map[*Node]bool{} + + var last *Node + for c := n.FirstChild; c != nil; c = c.NextSibling { + if seen[c] { + return fmt.Errorf("html: inconsistent repeated child") + } + seen[c] = true + last = c + } + if last != n.LastChild { + return fmt.Errorf("html: inconsistent last relationship") + } + + var first *Node + for c := n.LastChild; c != nil; c = c.PrevSibling { + if !seen[c] { + return fmt.Errorf("html: inconsistent missing child") + } + delete(seen, c) + first = c + } + if first != n.FirstChild { + return fmt.Errorf("html: inconsistent first relationship") + } + + if len(seen) != 0 { + return fmt.Errorf("html: inconsistent forwards/backwards child list") + } + + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 0000000..2a5abdd --- /dev/null +++ b/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2094 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.4.2) and active formatting + // elements (section 12.2.4.3). + oe, afe nodeStack + // Element pointers (section 12.2.4.4). + head, form *Node + // Other parsing state flags (section 12.2.4.5). + scripting, framesetOK bool + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.6.1). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.4.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.4.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type == ElementNode { + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.6.1, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.4.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.4.3. +func (p *parser) clearActiveFormattingElements() { + for { + n := p.afe.pop() + if len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.4.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.5. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.4.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.4.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.4.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if i == 0 && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + p.im = inSelectIM + case a.Td, a.Th: + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Head: + p.im = inBodyIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + p.im = beforeHeadIM + default: + continue + } + return + } + p.im = inBodyIM +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.6.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.6.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.6.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head: + n := p.oe.pop() + if n.DataAtom != a.Head { + panic("html: bad parser state: element not found, in the in-head insertion mode") + } + p.im = afterHeadIM + return true + case a.Body, a.Html, a.Br: + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.6. +func afterHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Body: + p.addElement() + p.framesetOK = false + p.im = inBodyIM + return true + case a.Frameset: + p.addElement() + p.im = inFramesetIM + return true + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: + p.oe = append(p.oe, p.head) + defer p.oe.remove(p.head) + return inHeadIM(p) + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Body, a.Html, a.Br: + // Drop down to creating an implied tag. + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) + p.framesetOK = true + return false +} + +// copyAttributes copies attributes of src not found on dst to dst. +func copyAttributes(dst *Node, src Token) { + if len(src.Attr) == 0 { + return + } + attr := map[string]string{} + for _, t := range dst.Attr { + attr[t.Key] = t.Val + } + for _, t := range src.Attr { + if _, ok := attr[t.Key]; !ok { + dst.Attr = append(dst.Attr, t) + attr[t.Key] = t.Val + } + } +} + +// Section 12.2.6.4.7. +func inBodyIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + d := p.tok.Data + switch n := p.oe.top(); n.DataAtom { + case a.Pre, a.Listing: + if n.FirstChild == nil { + // Ignore a newline at the start of a
 block.
+				if d != "" && d[0] == '\r' {
+					d = d[1:]
+				}
+				if d != "" && d[0] == '\n' {
+					d = d[1:]
+				}
+			}
+		}
+		d = strings.Replace(d, "\x00", "", -1)
+		if d == "" {
+			return true
+		}
+		p.reconstructActiveFormattingElements()
+		p.addText(d)
+		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+			// There were non-whitespace characters inserted.
+			p.framesetOK = false
+		}
+	case StartTagToken:
+		switch p.tok.DataAtom {
+		case a.Html:
+			copyAttributes(p.oe[0], p.tok)
+		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+			return inHeadIM(p)
+		case a.Body:
+			if len(p.oe) >= 2 {
+				body := p.oe[1]
+				if body.Type == ElementNode && body.DataAtom == a.Body {
+					p.framesetOK = false
+					copyAttributes(body, p.tok)
+				}
+			}
+		case a.Frameset:
+			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+				// Ignore the token.
+				return true
+			}
+			body := p.oe[1]
+			if body.Parent != nil {
+				body.Parent.RemoveChild(body)
+			}
+			p.oe = p.oe[:1]
+			p.addElement()
+			p.im = inFramesetIM
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(buttonScope, a.P)
+			switch n := p.top(); n.DataAtom {
+			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+				p.oe.pop()
+			}
+			p.addElement()
+		case a.Pre, a.Listing:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			// The newline, if any, will be dealt with by the TextToken case.
+			p.framesetOK = false
+		case a.Form:
+			if p.form == nil {
+				p.popUntil(buttonScope, a.P)
+				p.addElement()
+				p.form = p.top()
+			}
+		case a.Li:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Li:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Dd, a.Dt:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Dd, a.Dt:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Plaintext:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Button:
+			p.popUntil(defaultScope, a.Button)
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+		case a.A:
+			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+					p.inBodyEndTagFormatting(a.A)
+					p.oe.remove(n)
+					p.afe.remove(n)
+					break
+				}
+			}
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.Nobr:
+			p.reconstructActiveFormattingElements()
+			if p.elementInScope(defaultScope, a.Nobr) {
+				p.inBodyEndTagFormatting(a.Nobr)
+				p.reconstructActiveFormattingElements()
+			}
+			p.addFormattingElement()
+		case a.Applet, a.Marquee, a.Object:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.afe = append(p.afe, &scopeMarker)
+			p.framesetOK = false
+		case a.Table:
+			if !p.quirks {
+				p.popUntil(buttonScope, a.P)
+			}
+			p.addElement()
+			p.framesetOK = false
+			p.im = inTableIM
+			return true
+		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			if p.tok.DataAtom == a.Input {
+				for _, t := range p.tok.Attr {
+					if t.Key == "type" {
+						if strings.ToLower(t.Val) == "hidden" {
+							// Skip setting framesetOK = false
+							return true
+						}
+					}
+				}
+			}
+			p.framesetOK = false
+		case a.Param, a.Source, a.Track:
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+		case a.Hr:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			p.framesetOK = false
+		case a.Image:
+			p.tok.DataAtom = a.Img
+			p.tok.Data = a.Img.String()
+			return false
+		case a.Isindex:
+			if p.form != nil {
+				// Ignore the token.
+				return true
+			}
+			action := ""
+			prompt := "This is a searchable index. Enter search keywords: "
+			attr := []Attribute{{Key: "name", Val: "isindex"}}
+			for _, t := range p.tok.Attr {
+				switch t.Key {
+				case "action":
+					action = t.Val
+				case "name":
+					// Ignore the attribute.
+				case "prompt":
+					prompt = t.Val
+				default:
+					attr = append(attr, t)
+				}
+			}
+			p.acknowledgeSelfClosingTag()
+			p.popUntil(buttonScope, a.P)
+			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+			if action != "" {
+				p.form.Attr = []Attribute{{Key: "action", Val: action}}
+			}
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+			p.addText(prompt)
+			p.addChild(&Node{
+				Type:     ElementNode,
+				DataAtom: a.Input,
+				Data:     a.Input.String(),
+				Attr:     attr,
+			})
+			p.oe.pop()
+			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+		case a.Textarea:
+			p.addElement()
+			p.setOriginalIM()
+			p.framesetOK = false
+			p.im = textIM
+		case a.Xmp:
+			p.popUntil(buttonScope, a.P)
+			p.reconstructActiveFormattingElements()
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Iframe:
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Noembed, a.Noscript:
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Select:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+			p.im = inSelectIM
+			return true
+		case a.Optgroup, a.Option:
+			if p.top().DataAtom == a.Option {
+				p.oe.pop()
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		case a.Rp, a.Rt:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags()
+			}
+			p.addElement()
+		case a.Math, a.Svg:
+			p.reconstructActiveFormattingElements()
+			if p.tok.DataAtom == a.Math {
+				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+			} else {
+				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+			}
+			adjustForeignAttributes(p.tok.Attr)
+			p.addElement()
+			p.top().Namespace = p.tok.Data
+			if p.hasSelfClosingToken {
+				p.oe.pop()
+				p.acknowledgeSelfClosingTag()
+			}
+			return true
+		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+			// Ignore the token.
+		default:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		}
+	case EndTagToken:
+		switch p.tok.DataAtom {
+		case a.Body:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.im = afterBodyIM
+			}
+		case a.Html:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+				return false
+			}
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.Form:
+			node := p.form
+			p.form = nil
+			i := p.indexOfElementInScope(defaultScope, a.Form)
+			if node == nil || i == -1 || p.oe[i] != node {
+				// Ignore the token.
+				return true
+			}
+			p.generateImpliedEndTags()
+			p.oe.remove(node)
+		case a.P:
+			if !p.elementInScope(buttonScope, a.P) {
+				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+			}
+			p.popUntil(buttonScope, a.P)
+		case a.Li:
+			p.popUntil(listItemScope, a.Li)
+		case a.Dd, a.Dt:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.inBodyEndTagFormatting(p.tok.DataAtom)
+		case a.Applet, a.Marquee, a.Object:
+			if p.popUntil(defaultScope, p.tok.DataAtom) {
+				p.clearActiveFormattingElements()
+			}
+		case a.Br:
+			p.tok.Type = StartTagToken
+			return false
+		default:
+			p.inBodyEndTagOther(p.tok.DataAtom)
+		}
+	case CommentToken:
+		p.addChild(&Node{
+			Type: CommentNode,
+			Data: p.tok.Data,
+		})
+	}
+
+	return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+	// This is the "adoption agency" algorithm, described at
+	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+	// TODO: this is a fairly literal line-by-line translation of that algorithm.
+	// Once the code successfully parses the comprehensive test suite, we should
+	// refactor this code to be more idiomatic.
+
+	// Steps 1-4. The outer loop.
+	for i := 0; i < 8; i++ {
+		// Step 5. Find the formatting element.
+		var formattingElement *Node
+		for j := len(p.afe) - 1; j >= 0; j-- {
+			if p.afe[j].Type == scopeMarkerNode {
+				break
+			}
+			if p.afe[j].DataAtom == tagAtom {
+				formattingElement = p.afe[j]
+				break
+			}
+		}
+		if formattingElement == nil {
+			p.inBodyEndTagOther(tagAtom)
+			return
+		}
+		feIndex := p.oe.index(formattingElement)
+		if feIndex == -1 {
+			p.afe.remove(formattingElement)
+			return
+		}
+		if !p.elementInScope(defaultScope, tagAtom) {
+			// Ignore the tag.
+			return
+		}
+
+		// Steps 9-10. Find the furthest block.
+		var furthestBlock *Node
+		for _, e := range p.oe[feIndex:] {
+			if isSpecialElement(e) {
+				furthestBlock = e
+				break
+			}
+		}
+		if furthestBlock == nil {
+			e := p.oe.pop()
+			for e != formattingElement {
+				e = p.oe.pop()
+			}
+			p.afe.remove(e)
+			return
+		}
+
+		// Steps 11-12. Find the common ancestor and bookmark node.
+		commonAncestor := p.oe[feIndex-1]
+		bookmark := p.afe.index(formattingElement)
+
+		// Step 13. The inner loop. Find the lastNode to reparent.
+		lastNode := furthestBlock
+		node := furthestBlock
+		x := p.oe.index(node)
+		// Steps 13.1-13.2
+		for j := 0; j < 3; j++ {
+			// Step 13.3.
+			x--
+			node = p.oe[x]
+			// Step 13.4 - 13.5.
+			if p.afe.index(node) == -1 {
+				p.oe.remove(node)
+				continue
+			}
+			// Step 13.6.
+			if node == formattingElement {
+				break
+			}
+			// Step 13.7.
+			clone := node.clone()
+			p.afe[p.afe.index(node)] = clone
+			p.oe[p.oe.index(node)] = clone
+			node = clone
+			// Step 13.8.
+			if lastNode == furthestBlock {
+				bookmark = p.afe.index(node) + 1
+			}
+			// Step 13.9.
+			if lastNode.Parent != nil {
+				lastNode.Parent.RemoveChild(lastNode)
+			}
+			node.AppendChild(lastNode)
+			// Step 13.10.
+			lastNode = node
+		}
+
+		// Step 14. Reparent lastNode to the common ancestor,
+		// or for misnested table nodes, to the foster parent.
+		if lastNode.Parent != nil {
+			lastNode.Parent.RemoveChild(lastNode)
+		}
+		switch commonAncestor.DataAtom {
+		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+			p.fosterParent(lastNode)
+		default:
+			commonAncestor.AppendChild(lastNode)
+		}
+
+		// Steps 15-17. Reparent nodes from the furthest block's children
+		// to a clone of the formatting element.
+		clone := formattingElement.clone()
+		reparentChildren(clone, furthestBlock)
+		furthestBlock.AppendChild(clone)
+
+		// Step 18. Fix up the list of active formatting elements.
+		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+			// Move the bookmark with the rest of the list.
+			bookmark--
+		}
+		p.afe.remove(formattingElement)
+		p.afe.insert(bookmark, clone)
+
+		// Step 19. Fix up the stack of open elements.
+		p.oe.remove(formattingElement)
+		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+	}
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+	for i := len(p.oe) - 1; i >= 0; i-- {
+		if p.oe[i].DataAtom == tagAtom {
+			p.oe = p.oe[:i]
+			break
+		}
+		if isSpecialElement(p.oe[i]) {
+			break
+		}
+	}
+}
+
+// Section 12.2.6.4.8.
+func textIM(p *parser) bool {
+	switch p.tok.Type {
+	case ErrorToken:
+		p.oe.pop()
+	case TextToken:
+		d := p.tok.Data
+		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+			// Ignore a newline at the start of a -->
+#errors
+#document
+| 
+|   
+|   
+|     -->
+#errors
+#document
+| 
+|   
+|   
+|     
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+#document
+| 
+|   
+|   
+|     
+#errors
+Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE.
+Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element.
+Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element.
+Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element.
+Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element.
+Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element.
+Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element.
+Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element.
+Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element.
+Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element.
+Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element.
+Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element.
+Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element.
+Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element.
+Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element.
+Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element.
+Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element.
+Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element.
+Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element.
+Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element.
+Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element.
+Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element.
+Line: 1 Col: 130 Unexpected end tag (br). Treated as br element.
+Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 140 This element (img) has no end tag.
+Line: 1 Col: 148 Unexpected end tag (title). Ignored.
+Line: 1 Col: 155 Unexpected end tag (span). Ignored.
+Line: 1 Col: 163 Unexpected end tag (style). Ignored.
+Line: 1 Col: 172 Unexpected end tag (script). Ignored.
+Line: 1 Col: 180 Unexpected end tag (table). Ignored.
+Line: 1 Col: 185 Unexpected end tag (th). Ignored.
+Line: 1 Col: 190 Unexpected end tag (td). Ignored.
+Line: 1 Col: 195 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 203 This element (frame) has no end tag.
+Line: 1 Col: 210 This element (area) has no end tag.
+Line: 1 Col: 217 Unexpected end tag (link). Ignored.
+Line: 1 Col: 225 This element (param) has no end tag.
+Line: 1 Col: 230 This element (hr) has no end tag.
+Line: 1 Col: 238 This element (input) has no end tag.
+Line: 1 Col: 244 Unexpected end tag (col). Ignored.
+Line: 1 Col: 251 Unexpected end tag (base). Ignored.
+Line: 1 Col: 258 Unexpected end tag (meta). Ignored.
+Line: 1 Col: 269 This element (basefont) has no end tag.
+Line: 1 Col: 279 This element (bgsound) has no end tag.
+Line: 1 Col: 287 This element (embed) has no end tag.
+Line: 1 Col: 296 This element (spacer) has no end tag.
+Line: 1 Col: 300 Unexpected end tag (p). Ignored.
+Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag.
+Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag.
+Line: 1 Col: 320 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 339 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 355 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag.
+Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag.
+Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag.
+Line: 1 Col: 393 Unexpected end tag (dir). Ignored.
+Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag.
+Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag.
+Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag.
+Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag.
+Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag.
+Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag.
+Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag.
+Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag.
+Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 460 This element (wbr) has no end tag.
+Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag.
+Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag.
+Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag.
+Line: 1 Col: 513 Unexpected end tag (html). Ignored.
+Line: 1 Col: 513 Unexpected end tag (frameset). Ignored.
+Line: 1 Col: 520 Unexpected end tag (head). Ignored.
+Line: 1 Col: 529 Unexpected end tag (iframe). Ignored.
+Line: 1 Col: 537 This element (image) has no end tag.
+Line: 1 Col: 547 This element (isindex) has no end tag.
+Line: 1 Col: 557 Unexpected end tag (noembed). Ignored.
+Line: 1 Col: 568 Unexpected end tag (noframes). Ignored.
+Line: 1 Col: 579 Unexpected end tag (noscript). Ignored.
+Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored.
+Line: 1 Col: 599 Unexpected end tag (option). Ignored.
+Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored.
+Line: 1 Col: 622 Unexpected end tag (textarea). Ignored.
+#document
+| 
+|   
+|   
+|     
+|

+ +#data +

+#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode. +Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode. +Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode. +Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode. +Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode. +Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode. +Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode. +Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode. +Line: 1 Col: 58 Unexpected end tag (blink). Ignored. +Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode. +Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode. +Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag. +Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode. +Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode. +Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode. +Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode. +Line: 1 Col: 99 Unexpected end tag (select). Ignored. +Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode. +Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag. +Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode. +Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag. +Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode. +Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag. +Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode. +Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag. +Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode. +Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag. +Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode. +Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag. +Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored. +Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode. +Line: 1 Col: 141 Unexpected end tag (br). Treated as br element. +Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode. +Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode. +Line: 1 Col: 151 This element (img) has no end tag. +Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode. +Line: 1 Col: 159 Unexpected end tag (title). Ignored. +Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode. +Line: 1 Col: 166 Unexpected end tag (span). Ignored. +Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode. +Line: 1 Col: 174 Unexpected end tag (style). Ignored. +Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode. +Line: 1 Col: 183 Unexpected end tag (script). Ignored. +Line: 1 Col: 196 Unexpected end tag (th). Ignored. +Line: 1 Col: 201 Unexpected end tag (td). Ignored. +Line: 1 Col: 206 Unexpected end tag (tr). Ignored. +Line: 1 Col: 214 This element (frame) has no end tag. +Line: 1 Col: 221 This element (area) has no end tag. +Line: 1 Col: 228 Unexpected end tag (link). Ignored. +Line: 1 Col: 236 This element (param) has no end tag. +Line: 1 Col: 241 This element (hr) has no end tag. +Line: 1 Col: 249 This element (input) has no end tag. +Line: 1 Col: 255 Unexpected end tag (col). Ignored. +Line: 1 Col: 262 Unexpected end tag (base). Ignored. +Line: 1 Col: 269 Unexpected end tag (meta). Ignored. +Line: 1 Col: 280 This element (basefont) has no end tag. +Line: 1 Col: 290 This element (bgsound) has no end tag. +Line: 1 Col: 298 This element (embed) has no end tag. +Line: 1 Col: 307 This element (spacer) has no end tag. +Line: 1 Col: 311 Unexpected end tag (p). Ignored. +Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag. +Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag. +Line: 1 Col: 331 Unexpected end tag (caption). Ignored. +Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 350 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 366 Unexpected end tag (thead). Ignored. +Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag. +Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag. +Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag. +Line: 1 Col: 404 Unexpected end tag (dir). Ignored. +Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag. +Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag. +Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag. +Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag. +Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag. +Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag. +Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag. +Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag. +Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 471 This element (wbr) has no end tag. +Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag. +Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag. +Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag. +Line: 1 Col: 524 Unexpected end tag (html). Ignored. +Line: 1 Col: 524 Unexpected end tag (frameset). Ignored. +Line: 1 Col: 531 Unexpected end tag (head). Ignored. +Line: 1 Col: 540 Unexpected end tag (iframe). Ignored. +Line: 1 Col: 548 This element (image) has no end tag. +Line: 1 Col: 558 This element (isindex) has no end tag. +Line: 1 Col: 568 Unexpected end tag (noembed). Ignored. +Line: 1 Col: 579 Unexpected end tag (noframes). Ignored. +Line: 1 Col: 590 Unexpected end tag (noscript). Ignored. +Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored. +Line: 1 Col: 610 Unexpected end tag (option). Ignored. +Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored. +Line: 1 Col: 633 Unexpected end tag (textarea). Ignored. +#document +| +| +| +|
+| +| +| +|

+ +#data + +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 1 Col: 10 Expected closing tag. Unexpected end of file. +#document +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat new file mode 100644 index 0000000..4f8df86 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat @@ -0,0 +1,799 @@ +#data + +#errors +#document +| +| +| +| +| + +#data +a +#errors +29: Bogus comment +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +35: Stray “svg†start tag. +42: Stray end tag “svg†+#document +| +| +| +| +| +#errors +43: Stray “svg†start tag. +50: Stray end tag “svg†+#document +| +| +| +| +|

+#errors +34: Start tag “svg†seen in “tableâ€. +41: Stray end tag “svgâ€. +#document +| +| +| +| +| +| + +#data +
foo
+#errors +34: Start tag “svg†seen in “tableâ€. +46: Stray end tag “gâ€. +53: Stray end tag “svgâ€. +#document +| +| +| +| +| +| +| "foo" +| + +#data +
foobar
+#errors +34: Start tag “svg†seen in “tableâ€. +46: Stray end tag “gâ€. +58: Stray end tag “gâ€. +65: Stray end tag “svgâ€. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| + +#data +
foobar
+#errors +41: Start tag “svg†seen in “tableâ€. +53: Stray end tag “gâ€. +65: Stray end tag “gâ€. +72: Stray end tag “svgâ€. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| + +#data +
foobar
+#errors +45: Start tag “svg†seen in “tableâ€. +57: Stray end tag “gâ€. +69: Stray end tag “gâ€. +76: Stray end tag “svgâ€. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| +| + +#data +
foobar
+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" + +#data +
foobar

baz

+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +
foobar

baz

+#errors +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +
foobar

baz

quux +#errors +70: HTML start tag “p†in a foreign namespace context. +81: “table†closed but “caption†was still open. +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" +|

+| "quux" + +#data +
foobarbaz

quux +#errors +78: “table†closed but “caption†was still open. +78: Unclosed elements on stack. +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +| "baz" +|

+| "quux" + +#data +foobar

baz

quux +#errors +44: Start tag “svg†seen in “tableâ€. +56: Stray end tag “gâ€. +68: Stray end tag “gâ€. +71: HTML start tag “p†in a foreign namespace context. +71: Start tag “p†seen in “tableâ€. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" +| +| +|

+| "quux" + +#data +

quux +#errors +50: Stray “svg†start tag. +54: Stray “g†start tag. +62: Stray end tag “g†+66: Stray “g†start tag. +74: Stray end tag “g†+77: Stray “p†start tag. +88: “table†end tag with “select†open. +#document +| +| +| +| +| +| +| +|
+|

quux +#errors +36: Start tag “select†seen in “tableâ€. +42: Stray “svg†start tag. +46: Stray “g†start tag. +54: Stray end tag “g†+58: Stray “g†start tag. +66: Stray end tag “g†+69: Stray “p†start tag. +80: “table†end tag with “select†open. +#document +| +| +| +| +| +|

+| "quux" + +#data +foobar

baz +#errors +41: Stray “svg†start tag. +68: HTML start tag “p†in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +foobar

baz +#errors +34: Stray “svg†start tag. +61: HTML start tag “p†in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +

+#errors +31: Stray “svg†start tag. +35: Stray “g†start tag. +40: Stray end tag “g†+44: Stray “g†start tag. +49: Stray end tag “g†+52: Stray “p†start tag. +58: Stray “span†start tag. +58: End of file seen and there were open elements. +#document +| +| +| +| + +#data +

+#errors +42: Stray “svg†start tag. +46: Stray “g†start tag. +51: Stray end tag “g†+55: Stray “g†start tag. +60: Stray end tag “g†+63: Stray “p†start tag. +69: Stray “span†start tag. +#document +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| +| xlink href="foo" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data +bar +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" +| "bar" + +#data + +#errors +#document +| +| +| +| + +#data +

a +#errors +#document +| +| +| +|
+| +| "a" + +#data +
a +#errors +#document +| +| +| +|
+| +| +| "a" + +#data +
+#errors +#document +| +| +| +|
+| +| +| + +#data +
a +#errors +#document +| +| +| +|
+| +| +| +| +| "a" + +#data +

a +#errors +#document +| +| +| +|

+| +| +| +|

+| "a" + +#data +
    a +#errors +40: HTML start tag “ul†in a foreign namespace context. +41: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +|
    +| +|
      +| "a" + +#data +
        a +#errors +35: HTML start tag “ul†in a foreign namespace context. +36: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +| +|
          +| "a" + +#data +

          +#errors +#document +| +| +| +| +|

          +| +| +|

          + +#data +

          +#errors +#document +| +| +| +| +|

          +| +| +|

          + +#data +

          +#errors +#document +| +| +| +|

          +| +| +| +|

          +|

          + +#data +
          +#errors +#document +| +| +| +| +| +|
          +| +|
          +| +| + +#data +
          +#errors +#document +| +| +| +| +| +| +| +|
          +|
          +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data +

+#errors +#document +| +| +| +| +|
+| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| + +#data +
+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat new file mode 100644 index 0000000..638cde4 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat @@ -0,0 +1,482 @@ +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributename="" +| attributetype="" +| basefrequency="" +| baseprofile="" +| calcmode="" +| clippathunits="" +| contentscripttype="" +| contentstyletype="" +| diffuseconstant="" +| edgemode="" +| externalresourcesrequired="" +| filterres="" +| filterunits="" +| glyphref="" +| gradienttransform="" +| gradientunits="" +| kernelmatrix="" +| kernelunitlength="" +| keypoints="" +| keysplines="" +| keytimes="" +| lengthadjust="" +| limitingconeangle="" +| markerheight="" +| markerunits="" +| markerwidth="" +| maskcontentunits="" +| maskunits="" +| numoctaves="" +| pathlength="" +| patterncontentunits="" +| patterntransform="" +| patternunits="" +| pointsatx="" +| pointsaty="" +| pointsatz="" +| preservealpha="" +| preserveaspectratio="" +| primitiveunits="" +| refx="" +| refy="" +| repeatcount="" +| repeatdur="" +| requiredextensions="" +| requiredfeatures="" +| specularconstant="" +| specularexponent="" +| spreadmethod="" +| startoffset="" +| stddeviation="" +| stitchtiles="" +| surfacescale="" +| systemlanguage="" +| tablevalues="" +| targetx="" +| targety="" +| textlength="" +| viewbox="" +| viewtarget="" +| xchannelselector="" +| ychannelselector="" +| zoomandpan="" + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat new file mode 100644 index 0000000..63107d2 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat @@ -0,0 +1,62 @@ +#data +

foobazeggs

spam

quuxbar +#errors +#document +| +| +| +| +|

+| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

+| "spam" +| +| +| +|
+| +| +| "quux" +| "bar" + +#data +foobazeggs

spam
quuxbar +#errors +#document +| +| +| +| +| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

+| "spam" +| +| +| +|
+| +| +| "quux" +| "bar" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat new file mode 100644 index 0000000..b8713f8 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat @@ -0,0 +1,74 @@ +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| abc:def="gh" +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| xml:lang="bar" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| 789="012" +| +| + +#data + +#errors +#document +| +| +| +| +| 789="012" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat new file mode 100644 index 0000000..6ce1c0d --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat @@ -0,0 +1,208 @@ +#data +

X +#errors +Line: 1 Col: 31 Unexpected end tag (p). Ignored. +Line: 1 Col: 36 Expected closing tag. Unexpected end of file. +#document +| +| +| +| +|

+| +| +| +| +| +| +| " " +|

+| "X" + +#data +

+

X +#errors +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end tag (p). Ignored. +Line: 2 Col: 4 Expected closing tag. Unexpected end of file. +#document +| +| +| +|

+| +| +| +| +| +| +| " +" +|

+| "X" + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| +| " " + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| + +#data + +#errors +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| + +#data +X +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| +| "X" + +#data +<!doctype html><table> X<meta></table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " X" +| <meta> +| <table> + +#data +<!doctype html><table> x</table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> + +#data +<!doctype html><table> x </table> +#errors +Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x " +| <table> + +#data +<!doctype html><table><tr> x</table> +#errors +Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table>X<style> <tr>x </style> </table> +#errors +Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" +| <table> +| <style> +| " <tr>x " +| " " + +#data +<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div> +#errors +Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode. +Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <a> +| "foo" +| <table> +| " " +| <tbody> +| <tr> +| <td> +| "bar" +| " " + +#data +<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes> +#errors +6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€. +13: Stray start tag “frameâ€. +21: Stray end tag “frameâ€. +29: Stray end tag “frameâ€. +39: “frameset†start tag after “body†already open. +105: End of file seen inside an [R]CDATA element. +105: End of file seen and there were open elements. +XXX: These errors are wrong, please fix me! +#document +| <html> +| <head> +| <frameset> +| <frame> +| <frameset> +| <frame> +| <noframes> +| "</frameset><noframes>" + +#data +<!DOCTYPE html><object></html> +#errors +1: Expected closing tag. Unexpected end of file +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat new file mode 100644 index 0000000..c8ef66f --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat @@ -0,0 +1,2299 @@ +#data +<!doctype html><script> +#errors +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script>a +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<!doctype html><script>< +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<!doctype html><script></ +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<!doctype html><script></S +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<!doctype html><script></SC +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<!doctype html><script></SCR +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<!doctype html><script></SCRI +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<!doctype html><script></SCRIP +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script></s +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<!doctype html><script></sc +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<!doctype html><script></scr +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<!doctype html><script></scri +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<!doctype html><script></scrip +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script><! +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<!doctype html><script><!a +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<!doctype html><script><!- +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<!doctype html><script><!-a +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<!doctype html><script><!-- +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--a +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<!doctype html><script><!--< +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<!doctype html><script><!--<a +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<!doctype html><script><!--</ +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--<s +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<!doctype html><script><!--<script < +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<!doctype html><script><!--<script <a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<!doctype html><script><!--<script </ +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<!doctype html><script><!--<script </s +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 43 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<!doctype html><script><!--<script </scripta +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script> +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<!doctype html><script><!--<script </script/ +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<!doctype html><script><!--<script </script < +#errors +Line: 1 Col: 45 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<!doctype html><script><!--<script </script <a +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<!doctype html><script><!--<script </script </ +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script/ +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script - +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<!doctype html><script><!--<script -a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<!doctype html><script><!--<script -< +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -<" +| <body> + +#data +<!doctype html><script><!--<script -- +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<!doctype html><script><!--<script --a +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<!doctype html><script><!--<script --< +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --<" +| <body> + +#data +<!doctype html><script><!--<script --> +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script -->< +#errors +Line: 1 Col: 39 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<!doctype html><script><!--<script --></ +#errors +Line: 1 Col: 40 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script/ +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script><\/script>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<!doctype html><script><!--<script></scr'+'ipt>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>--><!--</script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-- ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- -></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- - ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<!doctype html><script><!--<script>--!></script>X +#errors +Line: 1 Col: 49 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<!doctype html><script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 59 Unexpected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<!doctype html><script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 57 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<!doctype html><style><!--<style></style>--></style> +#errors +Line: 1 Col: 52 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<!doctype html><style><!--</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<!doctype html><style><!--...</style>...--></style> +#errors +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<!doctype html><style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 66 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<!doctype html><style><!--...</style><!-- --><style>@import ...</style> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<!doctype html><style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 63 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<!doctype html><style>...<!--[if IE]><style>...</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<!doctype html><title><!--<title>--> +#errors +Line: 1 Col: 52 Unexpected end tag (title). +#document +| +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<!doctype html><title></title> +#errors +#document +| +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (title). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<!doctype html><noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 64 Unexpected end tag (noscript). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<!doctype html><noscript><!--</noscript>X<noscript>--></noscript> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<!doctype html><noscript><iframe></noscript>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<!doctype html><noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 64 Unexpected end tag (noframes). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<!doctype html><noframes><body><script><!--...</script></body></noframes></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<!doctype html><textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 64 Unexpected end tag (textarea). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<!doctype html><textarea></textarea></textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<!doctype html><textarea><</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<" + +#data +<!doctype html><textarea>a<b</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "a<b" + +#data +<!doctype html><iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 56 Unexpected end tag (iframe). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<!doctype html><iframe>...<!--X->...<!--/X->...</iframe> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<!doctype html><xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 44 Unexpected end tag (xmp). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<!doctype html><noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 60 Unexpected end tag (noembed). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script>a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<script>< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<script></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<script></S +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<script></SC +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<script></SCR +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<script></SCRI +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<script></SCRIP +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script></s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<script></sc +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<script></scr +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<script></scri +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<script></scrip +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script><! +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<script><!a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<script><!- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<script><!-a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<script><!-- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<script><!--< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<script><!--<a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<script><!--</ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--<s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 19 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<script><!--<script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<script><!--<script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<script><!--<script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<script><!--<script </s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<script><!--<script </scripta +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<script><!--<script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<script><!--<script </script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<script><!--<script </script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<script><!--<script </script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script - +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<script><!--<script -a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<script><!--<script -- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<script><!--<script --a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<script><!--<script --> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script -->< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<script><!--<script --></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script><\/script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<script><!--<script></scr'+'ipt>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<script><!--<script></script><script></script></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<script><!--<script></script><script></script>--><!--</script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<script><!--<script></script><script></script>-- ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<script><!--<script></script><script></script>- -></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<script><!--<script></script><script></script>- - ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<script><!--<script></script><script></script>-></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<script><!--<script>--!></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 44 Unexpected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 42 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<style><!--<style></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<style><!--</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<style><!--...</style>...--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 36 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<style><!--...</style><!-- --><style>@import ...</style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 48 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<style>...<!--[if IE]><style>...</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<title><!--<title>--> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (title). +#document +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<title></title> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +#document +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end of file. Expected end tag (title). +#document +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noscript). +#document +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<noscript><!--</noscript>X<noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<noscript><iframe></noscript>X +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noframes). +#document +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<noframes><body><script><!--...</script></body></noframes></html> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +#document +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (textarea). +#document +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<textarea></textarea></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +Line: 1 Col: 41 Unexpected end tag (iframe). +#document +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<iframe>...<!--X->...<!--/X->...</iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end tag (xmp). +#document +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE. +Line: 1 Col: 45 Unexpected end tag (noembed). +#document +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<!doctype html><table> + +#errors +Line 2 Col 0 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " +" + +#data +<!doctype html><table><td><span><font></span><span> +#errors +Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase. +Line 1 Col 45 Unexpected end tag (span). +Line 1 Col 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <span> +| <font> +| <font> +| <span> + +#data +<!doctype html><form><table></form><form></table></form> +#errors +35: Stray end tag “formâ€. +41: Start tag “form†seen in “tableâ€. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <table> +| <form> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat new file mode 100644 index 0000000..7b555f8 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat @@ -0,0 +1,153 @@ +#data +<!doctype html><table><tbody><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tr><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<!doctype html><table><tr><td><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <select> +| <td> + +#data +<!doctype html><table><tr><th><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <th> +| <select> +| <td> + +#data +<!doctype html><table><caption><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <select> +| <tbody> +| <tr> + +#data +<!doctype html><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><th> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tbody> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><thead> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><caption> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><table><tr></table>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| "a" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat new file mode 100644 index 0000000..680e1f0 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat @@ -0,0 +1,269 @@ +#data +<!doctype html><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> + +#data +<!doctype html><table><tbody><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><caption><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><tr><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <style> +| "</script>" + +#data +<!doctype html><table><tr><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <script> +| "</style>" + +#data +<!doctype html><table><caption><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><table><td><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" + +#data +<!doctype html><table><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> + +#data +<!doctype html><table><tr><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><frameset></frameset><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><frameset></frameset></html><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><table><tr></tbody><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <tfoot> + +#data +<!doctype html><table><td><svg></svg>abc<td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <svg svg> +| "abc" +| <td> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat new file mode 100644 index 0000000..0d62f5a --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat @@ -0,0 +1,1237 @@ +#data +<!doctype html><math><mn DefinitionUrl="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mn> +| definitionURL="foo" + +#data +<!doctype html><html></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <!-- foo --> +| <head> +| <body> + +#data +<!doctype html><head></head></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- foo --> +| <body> + +#data +<!doctype html><body><p><pre> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <pre> + +#data +<!doctype html><body><p><listing> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <listing> + +#data +<!doctype html><p><plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <plaintext> + +#data +<!doctype html><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <h1> + +#data +<!doctype html><form><isindex> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> + +#data +<!doctype html><isindex action="POST"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| action="POST" +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex prompt="this is isindex"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "this is isindex" +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex type="hidden"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| type="hidden" +| <hr> + +#data +<!doctype html><isindex name="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><ruby><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rp> + +#data +<!doctype html><ruby><div><span><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rp> + +#data +<!doctype html><ruby><div><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rp> + +#data +<!doctype html><ruby><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rt> + +#data +<!doctype html><ruby><div><span><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rt> + +#data +<!doctype html><ruby><div><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rt> + +#data +<!doctype html><math/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <foo> + +#data +<!doctype html><svg/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| <foo> + +#data +<!doctype html><div></body><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <!-- foo --> + +#data +<!doctype html><h1><div><h3><span></h1>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h1> +| <div> +| <h3> +| <span> +| "foo" + +#data +<!doctype html><p></h3>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "foo" + +#data +<!doctype html><h3><li>abc</h2>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h3> +| <li> +| "abc" +| "foo" + +#data +<!doctype html><table>abc<!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <!-- foo --> + +#data +<!doctype html><table> <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " " +| <!-- foo --> + +#data +<!doctype html><table> b <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " b " +| <table> +| <!-- foo --> + +#data +<!doctype html><select><option><option> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><p><math><mi><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mi> +| <p> +| <h1> + +#data +<!doctype html><p><math><mo><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mo> +| <p> +| <h1> + +#data +<!doctype html><p><math><mn><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <p> +| <h1> + +#data +<!doctype html><p><math><ms><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math ms> +| <p> +| <h1> + +#data +<!doctype html><p><math><mtext><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mtext> +| <p> +| <h1> + +#data +<!doctype html><frameset></noframes> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html c=d><body></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><html c=d><frameset></frameset></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <!-- foo --> + +#data +<!doctype html><html><frameset></frameset></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| " " + +#data +<!doctype html><html><frameset></frameset></html>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html></p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<html><frameset></frameset></html><!doctype html> +#errors +#document +| <html> +| <head> +| <frameset> + +#data +<!doctype html><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!doctype html><p><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><p>a<frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "a" + +#data +<!doctype html><p> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><pre><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <pre> + +#data +<!doctype html><listing><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <listing> + +#data +<!doctype html><li><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <li> + +#data +<!doctype html><dd><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dd> + +#data +<!doctype html><dt><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> + +#data +<!doctype html><button><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <button> + +#data +<!doctype html><applet><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <applet> + +#data +<!doctype html><marquee><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <marquee> + +#data +<!doctype html><object><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> + +#data +<!doctype html><table><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> + +#data +<!doctype html><area><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <area> + +#data +<!doctype html><basefont><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <basefont> +| <frameset> + +#data +<!doctype html><bgsound><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <bgsound> +| <frameset> + +#data +<!doctype html><br><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <br> + +#data +<!doctype html><embed><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <embed> + +#data +<!doctype html><img><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html><input><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <input> + +#data +<!doctype html><keygen><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <keygen> + +#data +<!doctype html><wbr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <wbr> + +#data +<!doctype html><hr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <hr> + +#data +<!doctype html><textarea></textarea><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> + +#data +<!doctype html><xmp></xmp><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> + +#data +<!doctype html><iframe></iframe><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> + +#data +<!doctype html><select></select><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><svg></svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><math></math><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg><foreignObject><div> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg>a</svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| "a" + +#data +<!doctype html><svg> </svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<html>aaa<frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "aaa" + +#data +<html> a <frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "a " + +#data +<!doctype html><div><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><div><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> + +#data +<!doctype html><p><math></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| "a" + +#data +<!doctype html><p><math><mn><span></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <span> +| <p> +| "a" + +#data +<!doctype html><math></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> + +#data +<!doctype html><meta charset="ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| charset="ascii" +| <body> + +#data +<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| content="text/html;charset=ascii" +| http-equiv="content-type" +| <body> + +#data +<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa --> +| <meta> +| charset="utf8" +| <body> + +#data +<!doctype html><html a=b><head></head><html c=d> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><image/> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html>a<i>b<table>c<b>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "a" +| <i> +| "bc" +| <b> +| "de" +| "f" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" + +#data +<!doctype html><table><i>a<b>b<div>c</i> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <i> +| "c" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><table><i>a<div>b<tr>c<b>d</i>e +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <div> +| "b" +| <i> +| "c" +| <b> +| "d" +| <b> +| "e" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><table><i>a<div>b<b>c</i>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <i> +| "a" +| <div> +| <i> +| "b" +| <b> +| "c" +| <b> +| "d" +| <table> + +#data +<!doctype html><body><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <bgsound> + +#data +<!doctype html><body><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <basefont> + +#data +<!doctype html><a><b></a><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <basefont> + +#data +<!doctype html><a><b></a><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <bgsound> + +#data +<!doctype html><figcaption><article></figcaption>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <figcaption> +| <article> +| "a" + +#data +<!doctype html><summary><article></summary>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <summary> +| <article> +| "a" + +#data +<!doctype html><p><a><plaintext>b +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <a> +| <plaintext> +| <a> +| "b" + +#data +<!DOCTYPE html><div>a<a></div>b<p>c</p>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| "a" +| <a> +| <a> +| "b" +| <p> +| "c" +| "d" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat new file mode 100644 index 0000000..60d8592 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat @@ -0,0 +1,763 @@ +#data +<!DOCTYPE html>Test +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "Test" + +#data +<textarea>test</div>test +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 24 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <textarea> +| "test</div>test" + +#data +<table><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 11 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<table><td>test</tbody></table> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| "test" + +#data +<frame>test +#errors +Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE. +Line: 1 Col: 7 Unexpected start tag frame. Ignored. +#document +| <html> +| <head> +| <body> +| "test" + +#data +<!DOCTYPE html><frameset>test +#errors +Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored. +Line: 1 Col: 29 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><frameset><!DOCTYPE html> +#errors +Line: 1 Col: 40 Unexpected DOCTYPE. Ignored. +Line: 1 Col: 40 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><font><p><b>test</font> +#errors +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <p> +| <font> +| <b> +| "test" + +#data +<!DOCTYPE html><dt><div><dd> +#errors +Line: 1 Col: 28 Missing end tag (div, dt). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> +| <div> +| <dd> + +#data +<script></x +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</x" +| <body> + +#data +<table><plaintext><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode. +Line: 1 Col: 22 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "<td>" +| <table> + +#data +<plaintext></plaintext> +#errors +Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!DOCTYPE html><table><tr>TEST +#errors +Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "TEST" +| <table> +| <tbody> +| <tr> + +#data +<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4> +#errors +Line: 1 Col: 37 Unexpected start tag (body). +Line: 1 Col: 53 Unexpected start tag (body). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| t1="1" +| t2="2" +| t3="3" +| t4="4" + +#data +</b test +#errors +Line: 1 Col: 8 Unexpected end of file in attribute name. +Line: 1 Col: 8 End tag contains unexpected attributes. +Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html></b test<b &=&>X +#errors +Line: 1 Col: 32 Named entity didn't end with ';'. +Line: 1 Col: 33 End tag contains unexpected attributes. +Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" + +#data +<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 54 Unexpected end of file in the tag name. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| type="text/x-foobar;baz" +| "X</SCRipt" +| <body> + +#data +& +#errors +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&# +#errors +Line: 1 Col: 1 Numeric entity expected. Got end of file instead. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#" + +#data +&#X +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#X" + +#data +&#x +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#x" + +#data +- +#errors +Line: 1 Col: 4 Numeric entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "-" + +#data +&x-test +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&x-test" + +#data +<!doctypehtml><p><li> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <li> + +#data +<!doctypehtml><p><dt> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dt> + +#data +<!doctypehtml><p><dd> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dd> + +#data +<!doctypehtml><p><form> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <form> + +#data +<!DOCTYPE html><p></P>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "X" + +#data +& +#errors +Line: 1 Col: 4 Named entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&AMp; +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&AMp;" + +#data +<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY> +#errors +Line: 1 Col: 110 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly> + +#data +<!DOCTYPE html>X</body>X +#errors +Line: 1 Col: 24 Unexpected non-space characters in the after body phase. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "XX" + +#data +<!DOCTYPE html><!-- X +#errors +Line: 1 Col: 21 Unexpected end of file in comment. +#document +| <!DOCTYPE html> +| <!-- X --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><table><caption>test TEST</caption><td>test +#errors +Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 58 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| "test TEST" +| <tbody> +| <tr> +| <td> +| "test" + +#data +<!DOCTYPE html><select><option><optgroup> +#errors +Line: 1 Col: 41 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option> +#errors +Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag. +Line: 1 Col: 76 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <option> +| <option> + +#data +<!DOCTYPE html><select><optgroup><option><optgroup> +#errors +Line: 1 Col: 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><datalist><option>foo</datalist>bar +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <datalist> +| <option> +| "foo" +| "bar" + +#data +<!DOCTYPE html><font><input><input></font> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <input> +| <input> + +#data +<!DOCTYPE html><!-- XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX +#errors +Line: 1 Col: 29 Unexpected end of file in comment (-) +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<isindex test=x name=x> +#errors +Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected start tag isindex. Don't use it! +#document +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| test="x" +| <hr> + +#data +test +test +#errors +Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "test +test" + +#data +<!DOCTYPE html><body><title>test</body> +#errors +#document +| +| +| +| +| +| "test</body>" + +#data +<!DOCTYPE html><body><title>X +#errors +#document +| +| +| +| +| +| "X" +| <meta> +| name="z" +| <link> +| rel="foo" +| <style> +| " +x { content:"</style" } " + +#data +<!DOCTYPE html><select><optgroup></optgroup></select> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> + +#data + + +#errors +Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html> <html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><script> +</script> <title>x +#errors +#document +| +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +#document +| +| +| +| +| "x" +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (style). +#document +| +| +| --> x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +|

+#errors +#document +| +| +| +| +| +| ddd +#errors +#document +| +| +| +#errors +#document +| +| +| +| +|
  • +| +| ", + " +
    << Back to Go HTTP/2 demo server`) + }) +} + +func httpsHost() string { + if *hostHTTPS != "" { + return *hostHTTPS + } + if v := *httpsAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func httpHost() string { + if *hostHTTP != "" { + return *hostHTTP + } + if v := *httpAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func serveProdTLS(autocertManager *autocert.Manager) error { + srv := &http.Server{ + TLSConfig: &tls.Config{ + GetCertificate: autocertManager.GetCertificate, + }, + } + http2.ConfigureServer(srv, &http2.Server{ + NewWriteScheduler: func() http2.WriteScheduler { + return http2.NewPriorityWriteScheduler(nil) + }, + }) + ln, err := net.Listen("tcp", ":443") + if err != nil { + return err + } + return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)) +} + +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func serveProd() error { + log.Printf("running in production mode") + + storageClient, err := storage.NewClient(context.Background()) + if err != nil { + log.Fatalf("storage.NewClient: %v", err) + } + autocertManager := &autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("http2.golang.org"), + Cache: autocertcache.NewGoogleCloudStorageCache(storageClient, "golang-h2demo-autocert"), + } + + errc := make(chan error, 2) + go func() { errc <- http.ListenAndServe(":80", autocertManager.HTTPHandler(http.DefaultServeMux)) }() + go func() { errc <- serveProdTLS(autocertManager) }() + return <-errc +} + +const idleTimeout = 5 * time.Minute +const activeTimeout = 10 * time.Minute + +// TODO: put this into the standard library and actually send +// PING frames and GOAWAY, etc: golang.org/issue/14204 +func idleTimeoutHook() func(net.Conn, http.ConnState) { + var mu sync.Mutex + m := map[net.Conn]*time.Timer{} + return func(c net.Conn, cs http.ConnState) { + mu.Lock() + defer mu.Unlock() + if t, ok := m[c]; ok { + delete(m, c) + t.Stop() + } + var d time.Duration + switch cs { + case http.StateNew, http.StateIdle: + d = idleTimeout + case http.StateActive: + d = activeTimeout + default: + return + } + m[c] = time.AfterFunc(d, func() { + log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d) + go c.Close() + }) + } +} + +func main() { + var srv http.Server + flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") + flag.Parse() + srv.Addr = *httpsAddr + srv.ConnState = idleTimeoutHook() + + registerHandlers() + + if *prod { + *hostHTTP = "http2.golang.org" + *hostHTTPS = "http2.golang.org" + log.Fatal(serveProd()) + } + + url := "https://" + httpsHost() + "/" + log.Printf("Listening on " + url) + http2.ConfigureServer(&srv, &http2.Server{}) + + if *httpAddr != "" { + go func() { + log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)") + log.Fatal(http.ListenAndServe(*httpAddr, nil)) + }() + } + + go func() { + log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key")) + }() + select {} +} diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go new file mode 100644 index 0000000..df0866a --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/launch.go @@ -0,0 +1,302 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + proj = flag.String("project", "symbolic-datum-552", "name of Project") + zone = flag.String("zone", "us-central1-a", "GCE zone") + mach = flag.String("machinetype", "n1-standard-1", "Machine type") + instName = flag.String("instance_name", "http2-demo", "Name of VM instance.") + sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") + staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.") + + writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.") + publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.") +) + +func readFile(v string) string { + slurp, err := ioutil.ReadFile(v) + if err != nil { + log.Fatalf("Error reading %s: %v", v, err) + } + return strings.TrimSpace(string(slurp)) +} + +var config = &oauth2.Config{ + // The client-id and secret should be for an "Installed Application" when using + // the CLI. Later we'll use a web application with a callback. + ClientID: readFile("client-id.dat"), + ClientSecret: readFile("client-secret.dat"), + Endpoint: google.Endpoint, + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + "https://www.googleapis.com/auth/sqlservice", + "https://www.googleapis.com/auth/sqlservice.admin", + }, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", +} + +const baseConfig = `#cloud-config +coreos: + units: + - name: h2demo.service + command: start + content: | + [Unit] + Description=HTTP2 Demo + + [Service] + ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo' + ExecStart=/opt/bin/h2demo --prod + RestartSec=5s + Restart=always + Type=simple + + [Install] + WantedBy=multi-user.target +` + +func main() { + flag.Parse() + if *proj == "" { + log.Fatalf("Missing --project flag") + } + prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj + machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach + + const tokenFileName = "token.dat" + tokenFile := tokenCacheFile(tokenFileName) + tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) + token, err := tokenSource.Token() + if err != nil { + if *writeObject != "" { + log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") + } + log.Printf("Error getting token from %s: %v", tokenFileName, err) + log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) + fmt.Print("\nEnter auth code: ") + sc := bufio.NewScanner(os.Stdin) + sc.Scan() + authCode := strings.TrimSpace(sc.Text()) + token, err = config.Exchange(oauth2.NoContext, authCode) + if err != nil { + log.Fatalf("Error exchanging auth code for a token: %v", err) + } + if err := tokenFile.WriteToken(token); err != nil { + log.Fatalf("Error writing to %s: %v", tokenFileName, err) + } + tokenSource = oauth2.ReuseTokenSource(token, nil) + } + + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + + if *writeObject != "" { + writeCloudStorageObject(oauthClient) + return + } + + computeService, _ := compute.New(oauthClient) + + natIP := *staticIP + if natIP == "" { + // Try to find it by name. + aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() + if err != nil { + log.Fatal(err) + } + // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList + IPLoop: + for _, asl := range aggAddrList.Items { + for _, addr := range asl.Addresses { + if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { + natIP = addr.Address + break IPLoop + } + } + } + } + + cloudConfig := baseConfig + if *sshPub != "" { + key := strings.TrimSpace(readFile(*sshPub)) + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) + } + if os.Getenv("USER") == "bradfitz" { + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") + } + const maxCloudConfig = 32 << 10 // per compute API docs + if len(cloudConfig) > maxCloudConfig { + log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) + } + + instance := &compute.Instance{ + Name: *instName, + Description: "Go Builder", + MachineType: machType, + Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, + Tags: &compute.Tags{ + Items: []string{"http-server", "https-server"}, + }, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "user-data", + Value: &cloudConfig, + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + { + AccessConfigs: []*compute.AccessConfig{ + { + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + NatIP: natIP, + }, + }, + Network: prefix + "/global/networks/default", + }, + }, + ServiceAccounts: []*compute.ServiceAccount{ + { + Email: "default", + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + }, + }, + }, + } + + log.Printf("Creating instance...") + op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() + if err != nil { + log.Fatalf("Failed to create instance: %v", err) + } + opName := op.Name + log.Printf("Created. Waiting on operation %v", opName) +OpLoop: + for { + time.Sleep(2 * time.Second) + op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() + if err != nil { + log.Fatalf("Failed to get op %s: %v", opName, err) + } + switch op.Status { + case "PENDING", "RUNNING": + log.Printf("Waiting on operation %v", opName) + continue + case "DONE": + if op.Error != nil { + for _, operr := range op.Error.Errors { + log.Printf("Error: %+v", operr) + } + log.Fatalf("Failed to start.") + } + log.Printf("Success. %+v", op) + break OpLoop + default: + log.Fatalf("Unknown status %q: %+v", op.Status, op) + } + } + + inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() + if err != nil { + log.Fatalf("Error getting instance after creation: %v", err) + } + ij, _ := json.MarshalIndent(inst, "", " ") + log.Printf("Instance: %s", ij) +} + +func instanceDisk(svc *compute.Service) *compute.AttachedDisk { + const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" + diskName := *instName + "-disk" + + return &compute.AttachedDisk{ + AutoDelete: true, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: diskName, + SourceImage: imageURL, + DiskSizeGb: 50, + }, + } +} + +func writeCloudStorageObject(httpClient *http.Client) { + content := os.Stdin + const maxSlurp = 1 << 20 + var buf bytes.Buffer + n, err := io.CopyN(&buf, content, maxSlurp) + if err != nil && err != io.EOF { + log.Fatalf("Error reading from stdin: %v, %v", n, err) + } + contentType := http.DetectContentType(buf.Bytes()) + + req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content)) + if err != nil { + log.Fatal(err) + } + req.Header.Set("x-goog-api-version", "2") + if *publicObject { + req.Header.Set("x-goog-acl", "public-read") + } + req.Header.Set("Content-Type", contentType) + res, err := httpClient.Do(req) + if err != nil { + log.Fatal(err) + } + if res.StatusCode != 200 { + res.Write(os.Stderr) + log.Fatalf("Failed.") + } + log.Printf("Success.") + os.Exit(0) +} + +type tokenCacheFile string + +func (f tokenCacheFile) Token() (*oauth2.Token, error) { + slurp, err := ioutil.ReadFile(string(f)) + if err != nil { + return nil, err + } + t := new(oauth2.Token) + if err := json.Unmarshal(slurp, t); err != nil { + return nil, err + } + return t, nil +} + +func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { + jt, err := json.Marshal(t) + if err != nil { + return err + } + return ioutil.WriteFile(string(f), jt, 0600) +} diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key new file mode 100644 index 0000000..a15a6ab --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q +62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby +XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV +mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ +JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ +SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA +nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e +/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx +qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser +hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j +NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E +LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7 +8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c +0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws +K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd +bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo +QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt +Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1 +nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy +b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7 +gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev +WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr +C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj +x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA +hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem new file mode 100644 index 0000000..3a323e7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG +A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 +DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 +NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG +cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv +c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS +R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT +ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk +JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 +mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW +caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G +A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt +hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB +MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES +MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv +bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h +U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao +eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 +UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD +58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n +sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF +kPe6XoSbiLm/kxk32T0= +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl new file mode 100644 index 0000000..6db3891 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl @@ -0,0 +1 @@ +E2CE26BF3285059C diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt new file mode 100644 index 0000000..c59059b --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT +C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW +DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow +RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE +ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l +gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2 +dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL +A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws +/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88 +F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB +AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R +rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD +EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19 +KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI +dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU +90p6/CbU71bGbfpM2PHot2fm +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key new file mode 100644 index 0000000..f329c14 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi +fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm +J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef +b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55 +mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/ +fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p +3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3 +qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4 +NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80 +LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN +a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+ +Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL +W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO +gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm +S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS +Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp +V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4 +KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4 +yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5 +drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e +ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R +48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5 +c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY +nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl +IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/service.yaml b/vendor/golang.org/x/net/http2/h2demo/service.yaml new file mode 100644 index 0000000..2b7d541 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: h2demo +spec: + externalTrafficPolicy: Local + ports: + - port: 80 + targetPort: 80 + name: http + - port: 443 + targetPort: 443 + name: https + selector: + app: h2demo + type: LoadBalancer + loadBalancerIP: 130.211.116.44 diff --git a/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/vendor/golang.org/x/net/http2/h2demo/tmpl.go new file mode 100644 index 0000000..504d6a7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/tmpl.go @@ -0,0 +1,1991 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build h2demo + +package main + +import "html/template" + +var pushTmpl = template.Must(template.New("serverpush").Parse(` + + + + + + + + + HTTP/2 Server Push Demo + + + + + + + + + +
    +Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to golang.org/cmd/go. +
    + + + +
    +... +
    + + + + +
    +
    +
    +
    + Run + Format + + + +
    +
    + + +
    +
    + + +

    Command go

    + + + + + + + + + + + + + + +

    +Go is a tool for managing Go source code. +

    +

    +Usage: +

    +
    go command [arguments]
    +
    +

    +The commands are: +

    +
    build       compile packages and dependencies
    +clean       remove object files
    +doc         show documentation for package or symbol
    +env         print Go environment information
    +bug         start a bug report
    +fix         run go tool fix on packages
    +fmt         run gofmt on package sources
    +generate    generate Go files by processing source
    +get         download and install packages and dependencies
    +install     compile and install packages and dependencies
    +list        list packages
    +run         compile and run Go program
    +test        test packages
    +tool        run specified go tool
    +version     print Go version
    +vet         run go tool vet on packages
    +
    +

    +Use "go help [command]" for more information about a command. +

    +

    +Additional help topics: +

    +
    c           calling between Go and C
    +buildmode   description of build modes
    +filetype    file types
    +gopath      GOPATH environment variable
    +environment environment variables
    +importpath  import path syntax
    +packages    description of package lists
    +testflag    description of testing flags
    +testfunc    description of testing functions
    +
    +

    +Use "go help [topic]" for more information about that topic. +

    +

    Compile packages and dependencies

    +

    +Usage: +

    +
    go build [-o output] [-i] [build flags] [packages]
    +
    +

    +Build compiles the packages named by the import paths, +along with their dependencies, but it does not install the results. +

    +

    +If the arguments to build are a list of .go files, build treats +them as a list of source files specifying a single package. +

    +

    +When compiling a single main package, build writes +the resulting executable to an output file named after +the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') +or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). +The '.exe' suffix is added when writing a Windows executable. +

    +

    +When compiling multiple packages or a single non-main package, +build compiles the packages but discards the resulting object, +serving only as a check that the packages can be built. +

    +

    +When compiling packages, build ignores files that end in '_test.go'. +

    +

    +The -o flag, only allowed when compiling a single package, +forces build to write the resulting executable or object +to the named output file, instead of the default behavior described +in the last two paragraphs. +

    +

    +The -i flag installs the packages that are dependencies of the target. +

    +

    +The build flags are shared by the build, clean, get, install, list, run, +and test commands: +

    +
    -a
    +	force rebuilding of packages that are already up-to-date.
    +-n
    +	print the commands but do not run them.
    +-p n
    +	the number of programs, such as build commands or
    +	test binaries, that can be run in parallel.
    +	The default is the number of CPUs available.
    +-race
    +	enable data race detection.
    +	Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
    +-msan
    +	enable interoperation with memory sanitizer.
    +	Supported only on linux/amd64,
    +	and only with Clang/LLVM as the host C compiler.
    +-v
    +	print the names of packages as they are compiled.
    +-work
    +	print the name of the temporary work directory and
    +	do not delete it when exiting.
    +-x
    +	print the commands.
    +
    +-asmflags 'flag list'
    +	arguments to pass on each go tool asm invocation.
    +-buildmode mode
    +	build mode to use. See 'go help buildmode' for more.
    +-compiler name
    +	name of compiler to use, as in runtime.Compiler (gccgo or gc).
    +-gccgoflags 'arg list'
    +	arguments to pass on each gccgo compiler/linker invocation.
    +-gcflags 'arg list'
    +	arguments to pass on each go tool compile invocation.
    +-installsuffix suffix
    +	a suffix to use in the name of the package installation directory,
    +	in order to keep output separate from default builds.
    +	If using the -race flag, the install suffix is automatically set to race
    +	or, if set explicitly, has _race appended to it.  Likewise for the -msan
    +	flag.  Using a -buildmode option that requires non-default compile flags
    +	has a similar effect.
    +-ldflags 'flag list'
    +	arguments to pass on each go tool link invocation.
    +-linkshared
    +	link against shared libraries previously created with
    +	-buildmode=shared.
    +-pkgdir dir
    +	install and load all packages from dir instead of the usual locations.
    +	For example, when building with a non-standard configuration,
    +	use -pkgdir to keep generated packages in a separate location.
    +-tags 'tag list'
    +	a list of build tags to consider satisfied during the build.
    +	For more information about build tags, see the description of
    +	build constraints in the documentation for the go/build package.
    +-toolexec 'cmd args'
    +	a program to use to invoke toolchain programs like vet and asm.
    +	For example, instead of running asm, the go command will run
    +	'cmd args /path/to/asm <arguments for asm>'.
    +
    +

    +The list flags accept a space-separated list of strings. To embed spaces +in an element in the list, surround it with either single or double quotes. +

    +

    +For more about specifying packages, see 'go help packages'. +For more about where packages and binaries are installed, +run 'go help gopath'. +For more about calling between Go and C/C++, run 'go help c'. +

    +

    +Note: Build adheres to certain conventions such as those described +by 'go help gopath'. Not all projects can follow these conventions, +however. Installations that have their own conventions or that use +a separate software build system may choose to use lower-level +invocations such as 'go tool compile' and 'go tool link' to avoid +some of the overheads and design decisions of the build tool. +

    +

    +See also: go install, go get, go clean. +

    +

    Remove object files

    +

    +Usage: +

    +
    go clean [-i] [-r] [-n] [-x] [build flags] [packages]
    +
    +

    +Clean removes object files from package source directories. +The go command builds most objects in a temporary directory, +so go clean is mainly concerned with object files left by other +tools or by manual invocations of go build. +

    +

    +Specifically, clean removes the following files from each of the +source directories corresponding to the import paths: +

    +
    _obj/            old object directory, left from Makefiles
    +_test/           old test directory, left from Makefiles
    +_testmain.go     old gotest file, left from Makefiles
    +test.out         old test log, left from Makefiles
    +build.out        old test log, left from Makefiles
    +*.[568ao]        object files, left from Makefiles
    +
    +DIR(.exe)        from go build
    +DIR.test(.exe)   from go test -c
    +MAINFILE(.exe)   from go build MAINFILE.go
    +*.so             from SWIG
    +
    +

    +In the list, DIR represents the final path element of the +directory, and MAINFILE is the base name of any Go source +file in the directory that is not included when building +the package. +

    +

    +The -i flag causes clean to remove the corresponding installed +archive or binary (what 'go install' would create). +

    +

    +The -n flag causes clean to print the remove commands it would execute, +but not run them. +

    +

    +The -r flag causes clean to be applied recursively to all the +dependencies of the packages named by the import paths. +

    +

    +The -x flag causes clean to print remove commands as it executes them. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Show documentation for package or symbol

    +

    +Usage: +

    +
    go doc [-u] [-c] [package|[package.]symbol[.method]]
    +
    +

    +Doc prints the documentation comments associated with the item identified by its +arguments (a package, const, func, type, var, or method) followed by a one-line +summary of each of the first-level items "under" that item (package-level +declarations for a package, methods for a type, etc.). +

    +

    +Doc accepts zero, one, or two arguments. +

    +

    +Given no arguments, that is, when run as +

    +
    go doc
    +
    +

    +it prints the package documentation for the package in the current directory. +If the package is a command (package main), the exported symbols of the package +are elided from the presentation unless the -cmd flag is provided. +

    +

    +When run with one argument, the argument is treated as a Go-syntax-like +representation of the item to be documented. What the argument selects depends +on what is installed in GOROOT and GOPATH, as well as the form of the argument, +which is schematically one of these: +

    +
    go doc <pkg>
    +go doc <sym>[.<method>]
    +go doc [<pkg>.]<sym>[.<method>]
    +go doc [<pkg>.][<sym>.]<method>
    +
    +

    +The first item in this list matched by the argument is the one whose documentation +is printed. (See the examples below.) However, if the argument starts with a capital +letter it is assumed to identify a symbol or method in the current directory. +

    +

    +For packages, the order of scanning is determined lexically in breadth-first order. +That is, the package presented is the one that matches the search and is nearest +the root and lexically first at its level of the hierarchy. The GOROOT tree is +always scanned in its entirety before GOPATH. +

    +

    +If there is no package specified or matched, the package in the current +directory is selected, so "go doc Foo" shows the documentation for symbol Foo in +the current package. +

    +

    +The package path must be either a qualified path or a proper suffix of a +path. The go tool's usual package mechanism does not apply: package path +elements like . and ... are not implemented by go doc. +

    +

    +When run with two arguments, the first must be a full package path (not just a +suffix), and the second is a symbol or symbol and method; this is similar to the +syntax accepted by godoc: +

    +
    go doc <pkg> <sym>[.<method>]
    +
    +

    +In all forms, when matching symbols, lower-case letters in the argument match +either case but upper-case letters match exactly. This means that there may be +multiple matches of a lower-case argument in a package if different symbols have +different cases. If this occurs, documentation for all matches is printed. +

    +

    +Examples: +

    +
    go doc
    +	Show documentation for current package.
    +go doc Foo
    +	Show documentation for Foo in the current package.
    +	(Foo starts with a capital letter so it cannot match
    +	a package path.)
    +go doc encoding/json
    +	Show documentation for the encoding/json package.
    +go doc json
    +	Shorthand for encoding/json.
    +go doc json.Number (or go doc json.number)
    +	Show documentation and method summary for json.Number.
    +go doc json.Number.Int64 (or go doc json.number.int64)
    +	Show documentation for json.Number's Int64 method.
    +go doc cmd/doc
    +	Show package docs for the doc command.
    +go doc -cmd cmd/doc
    +	Show package docs and exported symbols within the doc command.
    +go doc template.new
    +	Show documentation for html/template's New function.
    +	(html/template is lexically before text/template)
    +go doc text/template.new # One argument
    +	Show documentation for text/template's New function.
    +go doc text/template new # Two arguments
    +	Show documentation for text/template's New function.
    +
    +At least in the current tree, these invocations all print the
    +documentation for json.Decoder's Decode method:
    +
    +go doc json.Decoder.Decode
    +go doc json.decoder.decode
    +go doc json.decode
    +cd go/src/encoding/json; go doc decode
    +
    +

    +Flags: +

    +
    -c
    +	Respect case when matching symbols.
    +-cmd
    +	Treat a command (package main) like a regular package.
    +	Otherwise package main's exported symbols are hidden
    +	when showing the package's top-level documentation.
    +-u
    +	Show documentation for unexported as well as exported
    +	symbols and methods.
    +
    +

    Print Go environment information

    +

    +Usage: +

    +
    go env [var ...]
    +
    +

    +Env prints Go environment information. +

    +

    +By default env prints information as a shell script +(on Windows, a batch file). If one or more variable +names is given as arguments, env prints the value of +each named variable on its own line. +

    +

    Start a bug report

    +

    +Usage: +

    +
    go bug
    +
    +

    +Bug opens the default browser and starts a new bug report. +The report includes useful system information. +

    +

    Run go tool fix on packages

    +

    +Usage: +

    +
    go fix [packages]
    +
    +

    +Fix runs the Go fix command on the packages named by the import paths. +

    +

    +For more about fix, see 'go doc cmd/fix'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run fix with specific options, run 'go tool fix'. +

    +

    +See also: go fmt, go vet. +

    +

    Run gofmt on package sources

    +

    +Usage: +

    +
    go fmt [-n] [-x] [packages]
    +
    +

    +Fmt runs the command 'gofmt -l -w' on the packages named +by the import paths. It prints the names of the files that are modified. +

    +

    +For more about gofmt, see 'go doc cmd/gofmt'. +For more about specifying packages, see 'go help packages'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +To run gofmt with specific options, run gofmt itself. +

    +

    +See also: go fix, go vet. +

    +

    Generate Go files by processing source

    +

    +Usage: +

    +
    go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
    +
    +

    +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files. +

    +

    +Go generate is never run automatically by go build, go get, go test, +and so on. It must be run explicitly. +

    +

    +Go generate scans the file for directives, which are lines of +the form, +

    +
    //go:generate command argument...
    +
    +

    +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. +

    +

    +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. +

    +

    +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. +

    +

    +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. +

    +

    +Go generate sets several variables when it runs the generator: +

    +
    $GOARCH
    +	The execution architecture (arm, amd64, etc.)
    +$GOOS
    +	The execution operating system (linux, windows, etc.)
    +$GOFILE
    +	The base name of the file.
    +$GOLINE
    +	The line number of the directive in the source file.
    +$GOPACKAGE
    +	The name of the package of the file containing the directive.
    +$DOLLAR
    +	A dollar sign.
    +
    +

    +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. +

    +

    +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. +

    +

    +A directive of the form, +

    +
    //go:generate -command xxx args...
    +
    +

    +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, +

    +
    //go:generate -command foo go tool foo
    +
    +

    +specifies that the command "foo" represents the generator +"go tool foo". +

    +

    +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files, they are treated +as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. +

    +

    +If any generator returns an error exit status, "go generate" skips +all further processing for that package. +

    +

    +The generator is run in the package's source directory. +

    +

    +Go generate accepts one specific flag: +

    +
    -run=""
    +	if non-empty, specifies a regular expression to select
    +	directives whose full original source text (excluding
    +	any trailing spaces and final newline) matches the
    +	expression.
    +
    +

    +It also accepts the standard build flags including -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Download and install packages and dependencies

    +

    +Usage: +

    +
    go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
    +
    +

    +Get downloads the packages named by the import paths, along with their +dependencies. It then installs the named packages, like 'go install'. +

    +

    +The -d flag instructs get to stop after downloading the packages; that is, +it instructs get not to install the packages. +

    +

    +The -f flag, valid only when -u is set, forces get -u not to verify that +each package has been checked out from the source control repository +implied by its import path. This can be useful if the source is a local fork +of the original. +

    +

    +The -fix flag instructs get to run the fix tool on the downloaded packages +before resolving dependencies or building the code. +

    +

    +The -insecure flag permits fetching from repositories and resolving +custom domains using insecure schemes such as HTTP. Use with caution. +

    +

    +The -t flag instructs get to also download the packages required to build +the tests for the specified packages. +

    +

    +The -u flag instructs get to use the network to update the named packages +and their dependencies. By default, get uses the network to check out +missing packages but does not use it to look for updates to existing packages. +

    +

    +The -v flag enables verbose progress and debug output. +

    +

    +Get also accepts build flags to control the installation. See 'go help build'. +

    +

    +When checking out a new package, get creates the target directory +GOPATH/src/<import-path>. If the GOPATH contains multiple entries, +get uses the first one. For more details see: 'go help gopath'. +

    +

    +When checking out or updating a package, get looks for a branch or tag +that matches the locally installed version of Go. The most important +rule is that if the local installation is running version "go1", get +searches for a branch or tag named "go1". If no such version exists it +retrieves the most recent version of the package. +

    +

    +When go get checks out or updates a Git repository, +it also updates any git submodules referenced by the repository. +

    +

    +Get never checks out or updates code stored in vendor directories. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    +For more about how 'go get' finds source code to +download, see 'go help importpath'. +

    +

    +See also: go build, go install, go clean. +

    +

    Compile and install packages and dependencies

    +

    +Usage: +

    +
    go install [build flags] [packages]
    +
    +

    +Install compiles and installs the packages named by the import paths, +along with their dependencies. +

    +

    +For more about the build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go get, go clean. +

    +

    List packages

    +

    +Usage: +

    +
    go list [-e] [-f format] [-json] [build flags] [packages]
    +
    +

    +List lists the packages named by the import paths, one per line. +

    +

    +The default output shows the package import path: +

    +
    bytes
    +encoding/json
    +github.com/gorilla/mux
    +golang.org/x/net/html
    +
    +

    +The -f flag specifies an alternate format for the list, using the +syntax of package template. The default output is equivalent to -f +''. The struct being passed to the template is: +

    +
    type Package struct {
    +    Dir           string // directory containing package sources
    +    ImportPath    string // import path of package in dir
    +    ImportComment string // path in import comment on package statement
    +    Name          string // package name
    +    Doc           string // package documentation string
    +    Target        string // install path
    +    Shlib         string // the shared library that contains this package (only set when -linkshared)
    +    Goroot        bool   // is this package in the Go root?
    +    Standard      bool   // is this package part of the standard Go library?
    +    Stale         bool   // would 'go install' do anything for this package?
    +    StaleReason   string // explanation for Stale==true
    +    Root          string // Go root or Go path dir containing this package
    +    ConflictDir   string // this directory shadows Dir in $GOPATH
    +    BinaryOnly    bool   // binary-only package: cannot be recompiled from sources
    +
    +    // Source files
    +    GoFiles        []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
    +    CgoFiles       []string // .go sources files that import "C"
    +    IgnoredGoFiles []string // .go sources ignored due to build constraints
    +    CFiles         []string // .c source files
    +    CXXFiles       []string // .cc, .cxx and .cpp source files
    +    MFiles         []string // .m source files
    +    HFiles         []string // .h, .hh, .hpp and .hxx source files
    +    FFiles         []string // .f, .F, .for and .f90 Fortran source files
    +    SFiles         []string // .s source files
    +    SwigFiles      []string // .swig files
    +    SwigCXXFiles   []string // .swigcxx files
    +    SysoFiles      []string // .syso object files to add to archive
    +    TestGoFiles    []string // _test.go files in package
    +    XTestGoFiles   []string // _test.go files outside package
    +
    +    // Cgo directives
    +    CgoCFLAGS    []string // cgo: flags for C compiler
    +    CgoCPPFLAGS  []string // cgo: flags for C preprocessor
    +    CgoCXXFLAGS  []string // cgo: flags for C++ compiler
    +    CgoFFLAGS    []string // cgo: flags for Fortran compiler
    +    CgoLDFLAGS   []string // cgo: flags for linker
    +    CgoPkgConfig []string // cgo: pkg-config names
    +
    +    // Dependency information
    +    Imports      []string // import paths used by this package
    +    Deps         []string // all (recursively) imported dependencies
    +    TestImports  []string // imports from TestGoFiles
    +    XTestImports []string // imports from XTestGoFiles
    +
    +    // Error information
    +    Incomplete bool            // this package or a dependency has an error
    +    Error      *PackageError   // error loading package
    +    DepsErrors []*PackageError // errors loading dependencies
    +}
    +
    +

    +Packages stored in vendor directories report an ImportPath that includes the +path to the vendor directory (for example, "d/vendor/p" instead of "p"), +so that the ImportPath uniquely identifies a given copy of a package. +The Imports, Deps, TestImports, and XTestImports lists also contain these +expanded imports paths. See golang.org/s/go15vendor for more about vendoring. +

    +

    +The error information, if any, is +

    +
    type PackageError struct {
    +    ImportStack   []string // shortest path from package named on command line to this one
    +    Pos           string   // position of error (if present, file:line:col)
    +    Err           string   // the error itself
    +}
    +
    +

    +The template function "join" calls strings.Join. +

    +

    +The template function "context" returns the build context, defined as: +

    +
    type Context struct {
    +	GOARCH        string   // target architecture
    +	GOOS          string   // target operating system
    +	GOROOT        string   // Go root
    +	GOPATH        string   // Go path
    +	CgoEnabled    bool     // whether cgo can be used
    +	UseAllFiles   bool     // use files regardless of +build lines, file names
    +	Compiler      string   // compiler to assume when computing target paths
    +	BuildTags     []string // build constraints to match in +build lines
    +	ReleaseTags   []string // releases the current release is compatible with
    +	InstallSuffix string   // suffix to use in the name of the install dir
    +}
    +
    +

    +For more information about the meaning of these fields see the documentation +for the go/build package's Context type. +

    +

    +The -json flag causes the package data to be printed in JSON format +instead of using the template format. +

    +

    +The -e flag changes the handling of erroneous packages, those that +cannot be found or are malformed. By default, the list command +prints an error to standard error for each erroneous package and +omits the packages from consideration during the usual printing. +With the -e flag, the list command never prints errors to standard +error and instead processes the erroneous packages with the usual +printing. Erroneous packages will have a non-empty ImportPath and +a non-nil Error field; other information may or may not be missing +(zeroed). +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Compile and run Go program

    +

    +Usage: +

    +
    go run [build flags] [-exec xprog] gofiles... [arguments...]
    +
    +

    +Run compiles and runs the main package comprising the named Go source files. +A Go source file is defined to be a file ending in a literal ".go" suffix. +

    +

    +By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. +If the -exec flag is given, 'go run' invokes the binary using xprog: +

    +
    'xprog a.out arguments...'.
    +
    +

    +If the -exec flag is not given, GOOS or GOARCH is different from the system +default, and a program named go_$GOOS_$GOARCH_exec can be found +on the current search path, 'go run' invokes the binary using that program, +for example 'go_nacl_386_exec a.out arguments...'. This allows execution of +cross-compiled programs when a simulator or other execution method is +available. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go build. +

    +

    Test packages

    +

    +Usage: +

    +
    go test [build/test flags] [packages] [build/test flags & test binary flags]
    +
    +

    +'Go test' automates testing the packages named by the import paths. +It prints a summary of the test results in the format: +

    +
    ok   archive/tar   0.011s
    +FAIL archive/zip   0.022s
    +ok   compress/gzip 0.033s
    +...
    +
    +

    +followed by detailed output for each failed package. +

    +

    +'Go test' recompiles each package along with any files with names matching +the file pattern "*_test.go". +Files whose names begin with "_" (including "_test.go") or "." are ignored. +These additional files can contain test functions, benchmark functions, and +example functions. See 'go help testfunc' for more. +Each listed package causes the execution of a separate test binary. +

    +

    +Test files that declare a package with the suffix "_test" will be compiled as a +separate package, and then linked and run with the main test binary. +

    +

    +The go tool will ignore a directory named "testdata", making it available +to hold ancillary data needed by the tests. +

    +

    +By default, go test needs no arguments. It compiles and tests the package +with source in the current directory, including tests, and runs the tests. +

    +

    +The package is built in a temporary directory so it does not interfere with the +non-test installation. +

    +

    +In addition to the build flags, the flags handled by 'go test' itself are: +

    +
    -args
    +    Pass the remainder of the command line (everything after -args)
    +    to the test binary, uninterpreted and unchanged.
    +    Because this flag consumes the remainder of the command line,
    +    the package list (if present) must appear before this flag.
    +
    +-c
    +    Compile the test binary to pkg.test but do not run it
    +    (where pkg is the last element of the package's import path).
    +    The file name can be changed with the -o flag.
    +
    +-exec xprog
    +    Run the test binary using xprog. The behavior is the same as
    +    in 'go run'. See 'go help run' for details.
    +
    +-i
    +    Install packages that are dependencies of the test.
    +    Do not run the test.
    +
    +-o file
    +    Compile the test binary to the named file.
    +    The test still runs (unless -c or -i is specified).
    +
    +

    +The test binary also accepts flags that control execution of the test; these +flags are also accessible by 'go test'. See 'go help testflag' for details. +

    +

    +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go vet. +

    +

    Run specified go tool

    +

    +Usage: +

    +
    go tool [-n] command [args...]
    +
    +

    +Tool runs the go tool command identified by the arguments. +With no arguments it prints the list of known tools. +

    +

    +The -n flag causes tool to print the command that would be +executed but not execute it. +

    +

    +For more about each tool command, see 'go tool command -h'. +

    +

    Print Go version

    +

    +Usage: +

    +
    go version
    +
    +

    +Version prints the Go version, as reported by runtime.Version. +

    +

    Run go tool vet on packages

    +

    +Usage: +

    +
    go vet [-n] [-x] [build flags] [packages]
    +
    +

    +Vet runs the Go vet command on the packages named by the import paths. +

    +

    +For more about vet, see 'go doc cmd/vet'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run the vet tool with specific options, run 'go tool vet'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go fmt, go fix. +

    +

    Calling between Go and C

    +

    +There are two different ways to call between Go and C/C++ code. +

    +

    +The first is the cgo tool, which is part of the Go distribution. For +information on how to use it see the cgo documentation (go doc cmd/cgo). +

    +

    +The second is the SWIG program, which is a general tool for +interfacing between languages. For information on SWIG see +http://swig.org/. When running go build, any file with a .swig +extension will be passed to SWIG. Any file with a .swigcxx extension +will be passed to SWIG with the -c++ option. +

    +

    +When either cgo or SWIG is used, go build will pass any .c, .m, .s, +or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++ +compiler. The CC or CXX environment variables may be set to determine +the C or C++ compiler, respectively, to use. +

    +

    Description of build modes

    +

    +The 'go build' and 'go install' commands take a -buildmode argument which +indicates which kind of object file is to be built. Currently supported values +are: +

    +
    -buildmode=archive
    +	Build the listed non-main packages into .a files. Packages named
    +	main are ignored.
    +
    +-buildmode=c-archive
    +	Build the listed main package, plus all packages it imports,
    +	into a C archive file. The only callable symbols will be those
    +	functions exported using a cgo //export comment. Requires
    +	exactly one main package to be listed.
    +
    +-buildmode=c-shared
    +	Build the listed main packages, plus all packages that they
    +	import, into C shared libraries. The only callable symbols will
    +	be those functions exported using a cgo //export comment.
    +	Non-main packages are ignored.
    +
    +-buildmode=default
    +	Listed main packages are built into executables and listed
    +	non-main packages are built into .a files (the default
    +	behavior).
    +
    +-buildmode=shared
    +	Combine all the listed non-main packages into a single shared
    +	library that will be used when building with the -linkshared
    +	option. Packages named main are ignored.
    +
    +-buildmode=exe
    +	Build the listed main packages and everything they import into
    +	executables. Packages not named main are ignored.
    +
    +-buildmode=pie
    +	Build the listed main packages and everything they import into
    +	position independent executables (PIE). Packages not named
    +	main are ignored.
    +
    +-buildmode=plugin
    +	Build the listed main packages, plus all packages that they
    +	import, into a Go plugin. Packages not named main are ignored.
    +
    +

    File types

    +

    +The go command examines the contents of a restricted set of files +in each directory. It identifies which files to examine based on +the extension of the file name. These extensions are: +

    +
    .go
    +	Go source files.
    +.c, .h
    +	C source files.
    +	If the package uses cgo or SWIG, these will be compiled with the
    +	OS-native compiler (typically gcc); otherwise they will
    +	trigger an error.
    +.cc, .cpp, .cxx, .hh, .hpp, .hxx
    +	C++ source files. Only useful with cgo or SWIG, and always
    +	compiled with the OS-native compiler.
    +.m
    +	Objective-C source files. Only useful with cgo, and always
    +	compiled with the OS-native compiler.
    +.s, .S
    +	Assembler source files.
    +	If the package uses cgo or SWIG, these will be assembled with the
    +	OS-native assembler (typically gcc (sic)); otherwise they
    +	will be assembled with the Go assembler.
    +.swig, .swigcxx
    +	SWIG definition files.
    +.syso
    +	System object files.
    +
    +

    +Files of each of these types except .syso may contain build +constraints, but the go command stops scanning for build constraints +at the first item in the file that is not a blank line or //-style +line comment. See the go/build package documentation for +more details. +

    +

    +Non-test Go source files can also include a //go:binary-only-package +comment, indicating that the package sources are included +for documentation only and must not be used to build the +package binary. This enables distribution of Go packages in +their compiled form alone. See the go/build package documentation +for more details. +

    +

    GOPATH environment variable

    +

    +The Go path is used to resolve import statements. +It is implemented by and documented in the go/build package. +

    +

    +The GOPATH environment variable lists places to look for Go code. +On Unix, the value is a colon-separated string. +On Windows, the value is a semicolon-separated string. +On Plan 9, the value is a list. +

    +

    +If the environment variable is unset, GOPATH defaults +to a subdirectory named "go" in the user's home directory +($HOME/go on Unix, %USERPROFILE%\go on Windows), +unless that directory holds a Go distribution. +Run "go env GOPATH" to see the current GOPATH. +

    +

    +See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. +

    +

    +Each directory listed in GOPATH must have a prescribed structure: +

    +

    +The src directory holds source code. The path below src +determines the import path or executable name. +

    +

    +The pkg directory holds installed package objects. +As in the Go tree, each target operating system and +architecture pair has its own subdirectory of pkg +(pkg/GOOS_GOARCH). +

    +

    +If DIR is a directory listed in the GOPATH, a package with +source in DIR/src/foo/bar can be imported as "foo/bar" and +has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". +

    +

    +The bin directory holds compiled commands. +Each command is named for its source directory, but only +the final element, not the entire path. That is, the +command with source in DIR/src/foo/quux is installed into +DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped +so that you can add DIR/bin to your PATH to get at the +installed commands. If the GOBIN environment variable is +set, commands are installed to the directory it names instead +of DIR/bin. GOBIN must be an absolute path. +

    +

    +Here's an example directory layout: +

    +
    GOPATH=/home/user/go
    +
    +/home/user/go/
    +    src/
    +        foo/
    +            bar/               (go code in package bar)
    +                x.go
    +            quux/              (go code in package main)
    +                y.go
    +    bin/
    +        quux                   (installed command)
    +    pkg/
    +        linux_amd64/
    +            foo/
    +                bar.a          (installed package object)
    +
    +

    +Go searches each directory listed in GOPATH to find source code, +but new packages are always downloaded into the first directory +in the list. +

    +

    +See https://golang.org/doc/code.html for an example. +

    +

    Internal Directories

    +

    +Code in or below a directory named "internal" is importable only +by code in the directory tree rooted at the parent of "internal". +Here's an extended version of the directory layout above: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            internal/
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The code in z.go is imported as "foo/internal/baz", but that +import statement can only appear in source files in the subtree +rooted at foo. The source files foo/f.go, foo/bar/x.go, and +foo/quux/y.go can all import "foo/internal/baz", but the source file +crash/bang/b.go cannot. +

    +

    +See https://golang.org/s/go14internal for details. +

    +

    Vendor Directories

    +

    +Go 1.6 includes support for using local copies of external dependencies +to satisfy imports of those dependencies, often referred to as vendoring. +

    +

    +Code below a directory named "vendor" is importable only +by code in the directory tree rooted at the parent of "vendor", +and only using an import path that omits the prefix up to and +including the vendor element. +

    +

    +Here's the example from the previous section, +but with the "internal" directory renamed to "vendor" +and a new foo/vendor/crash/bang directory added: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            vendor/
    +                crash/
    +                    bang/      (go code in package bang)
    +                        b.go
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The same visibility rules apply as for internal, but the code +in z.go is imported as "baz", not as "foo/vendor/baz". +

    +

    +Code in vendor directories deeper in the source tree shadows +code in higher directories. Within the subtree rooted at foo, an import +of "crash/bang" resolves to "foo/vendor/crash/bang", not the +top-level "crash/bang". +

    +

    +Code in vendor directories is not subject to import path +checking (see 'go help importpath'). +

    +

    +When 'go get' checks out or updates a git repository, it now also +updates submodules. +

    +

    +Vendor directories do not affect the placement of new repositories +being checked out for the first time by 'go get': those are always +placed in the main GOPATH, never in a vendor subtree. +

    +

    +See https://golang.org/s/go15vendor for details. +

    +

    Environment variables

    +

    +The go command, and the tools it invokes, examine a few different +environment variables. For many of these, you can see the default +value of on your system by running 'go env NAME', where NAME is the +name of the variable. +

    +

    +General-purpose environment variables: +

    +
    GCCGO
    +	The gccgo command to run for 'go build -compiler=gccgo'.
    +GOARCH
    +	The architecture, or processor, for which to compile code.
    +	Examples are amd64, 386, arm, ppc64.
    +GOBIN
    +	The directory where 'go install' will install a command.
    +GOOS
    +	The operating system for which to compile code.
    +	Examples are linux, darwin, windows, netbsd.
    +GOPATH
    +	For more details see: 'go help gopath'.
    +GORACE
    +	Options for the race detector.
    +	See https://golang.org/doc/articles/race_detector.html.
    +GOROOT
    +	The root of the go tree.
    +
    +

    +Environment variables for use with cgo: +

    +
    CC
    +	The command to use to compile C code.
    +CGO_ENABLED
    +	Whether the cgo command is supported.  Either 0 or 1.
    +CGO_CFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C code.
    +CGO_CPPFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C or C++ code.
    +CGO_CXXFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C++ code.
    +CGO_FFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	Fortran code.
    +CGO_LDFLAGS
    +	Flags that cgo will pass to the compiler when linking.
    +CXX
    +	The command to use to compile C++ code.
    +PKG_CONFIG
    +	Path to pkg-config tool.
    +
    +

    +Architecture-specific environment variables: +

    +
    GOARM
    +	For GOARCH=arm, the ARM architecture for which to compile.
    +	Valid values are 5, 6, 7.
    +GO386
    +	For GOARCH=386, the floating point instruction set.
    +	Valid values are 387, sse2.
    +
    +

    +Special-purpose environment variables: +

    +
    GOROOT_FINAL
    +	The root of the installed Go tree, when it is
    +	installed in a location other than where it is built.
    +	File names in stack traces are rewritten from GOROOT to
    +	GOROOT_FINAL.
    +GO_EXTLINK_ENABLED
    +	Whether the linker should use external linking mode
    +	when using -linkmode=auto with code that uses cgo.
    +	Set to 0 to disable external linking mode, 1 to enable it.
    +GIT_ALLOW_PROTOCOL
    +	Defined by Git. A colon-separated list of schemes that are allowed to be used
    +	with git fetch/clone. If set, any scheme not explicitly mentioned will be
    +	considered insecure by 'go get'.
    +
    +

    Import path syntax

    +

    +An import path (see 'go help packages') denotes a package stored in the local +file system. In general, an import path denotes either a standard package (such +as "unicode/utf8") or a package found in one of the work spaces (For more +details see: 'go help gopath'). +

    +

    Relative import paths

    +

    +An import path beginning with ./ or ../ is called a relative path. +The toolchain supports relative import paths as a shortcut in two ways. +

    +

    +First, a relative path can be used as a shorthand on the command line. +If you are working in the directory containing the code imported as +"unicode" and want to run the tests for "unicode/utf8", you can type +"go test ./utf8" instead of needing to specify the full path. +Similarly, in the reverse situation, "go test .." will test "unicode" from +the "unicode/utf8" directory. Relative patterns are also allowed, like +"go test ./..." to test all subdirectories. See 'go help packages' for details +on the pattern syntax. +

    +

    +Second, if you are compiling a Go program not in a work space, +you can use a relative path in an import statement in that program +to refer to nearby code also not in a work space. +This makes it easy to experiment with small multipackage programs +outside of the usual work spaces, but such programs cannot be +installed with "go install" (there is no work space in which to install them), +so they are rebuilt from scratch each time they are built. +To avoid ambiguity, Go programs cannot use relative import paths +within a work space. +

    +

    Remote import paths

    +

    +Certain import paths also +describe how to obtain the source code for the package using +a revision control system. +

    +

    +A few common code hosting sites have special syntax: +

    +
    Bitbucket (Git, Mercurial)
    +
    +	import "bitbucket.org/user/project"
    +	import "bitbucket.org/user/project/sub/directory"
    +
    +GitHub (Git)
    +
    +	import "github.com/user/project"
    +	import "github.com/user/project/sub/directory"
    +
    +Launchpad (Bazaar)
    +
    +	import "launchpad.net/project"
    +	import "launchpad.net/project/series"
    +	import "launchpad.net/project/series/sub/directory"
    +
    +	import "launchpad.net/~user/project/branch"
    +	import "launchpad.net/~user/project/branch/sub/directory"
    +
    +IBM DevOps Services (Git)
    +
    +	import "hub.jazz.net/git/user/project"
    +	import "hub.jazz.net/git/user/project/sub/directory"
    +
    +

    +For code hosted on other servers, import paths may either be qualified +with the version control type, or the go tool can dynamically fetch +the import path over https/http and discover where the code resides +from a <meta> tag in the HTML. +

    +

    +To declare the code location, an import path of the form +

    +
    repository.vcs/path
    +
    +

    +specifies the given repository, with or without the .vcs suffix, +using the named version control system, and then the path inside +that repository. The supported version control systems are: +

    +
    Bazaar      .bzr
    +Git         .git
    +Mercurial   .hg
    +Subversion  .svn
    +
    +

    +For example, +

    +
    import "example.org/user/foo.hg"
    +
    +

    +denotes the root directory of the Mercurial repository at +example.org/user/foo or foo.hg, and +

    +
    import "example.org/repo.git/foo/bar"
    +
    +

    +denotes the foo/bar directory of the Git repository at +example.org/repo or repo.git. +

    +

    +When a version control system supports multiple protocols, +each is tried in turn when downloading. For example, a Git +download tries https://, then git+ssh://. +

    +

    +By default, downloads are restricted to known secure protocols +(e.g. https, ssh). To override this setting for Git downloads, the +GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: +'go help environment'). +

    +

    +If the import path is not a known code hosting site and also lacks a +version control qualifier, the go tool attempts to fetch the import +over https/http and looks for a <meta> tag in the document's HTML +<head>. +

    +

    +The meta tag has the form: +

    +
    <meta name="go-import" content="import-prefix vcs repo-root">
    +
    +

    +The import-prefix is the import path corresponding to the repository +root. It must be a prefix or an exact match of the package being +fetched with "go get". If it's not an exact match, another http +request is made at the prefix to verify the <meta> tags match. +

    +

    +The meta tag should appear as early in the file as possible. +In particular, it should appear before any raw JavaScript or CSS, +to avoid confusing the go command's restricted parser. +

    +

    +The vcs is one of "git", "hg", "svn", etc, +

    +

    +The repo-root is the root of the version control system +containing a scheme and not containing a .vcs qualifier. +

    +

    +For example, +

    +
    import "example.org/pkg/foo"
    +
    +

    +will result in the following requests: +

    +
    https://example.org/pkg/foo?go-get=1 (preferred)
    +http://example.org/pkg/foo?go-get=1  (fallback, only with -insecure)
    +
    +

    +If that page contains the meta tag +

    +
    <meta name="go-import" content="example.org git https://code.org/r/p/exproj">
    +
    +

    +the go tool will verify that https://example.org/?go-get=1 contains the +same meta tag and then git clone https://code.org/r/p/exproj into +GOPATH/src/example.org. +

    +

    +New downloaded packages are written to the first directory listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +The go command attempts to download the version of the +package appropriate for the Go release being used. +Run 'go help get' for more. +

    +

    Import path checking

    +

    +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. +

    +

    +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: +

    +
    package math // import "path"
    +package math /* import "path" */
    +
    +

    +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. +

    +

    +Import path checking is disabled for code found within vendor trees. +This makes it possible to copy code into alternate locations in vendor trees +without needing to update import comments. +

    +

    +See https://golang.org/s/go14customimport for details. +

    +

    Description of package lists

    +

    +Many commands apply to a set of packages: +

    +
    go action [packages]
    +
    +

    +Usually, [packages] is a list of import paths. +

    +

    +An import path that is a rooted path or that begins with +a . or .. element is interpreted as a file system path and +denotes the package in that directory. +

    +

    +Otherwise, the import path P denotes the package found in +the directory DIR/src/P for some DIR listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +If no import paths are given, the action applies to the +package in the current directory. +

    +

    +There are four reserved names for paths that should not be used +for packages to be built with the go tool: +

    +

    +- "main" denotes the top-level package in a stand-alone executable. +

    +

    +- "all" expands to all package directories found in all the GOPATH +trees. For example, 'go list all' lists all the packages on the local +system. +

    +

    +- "std" is like all but expands to just the packages in the standard +Go library. +

    +

    +- "cmd" expands to the Go repository's commands and their +internal libraries. +

    +

    +Import paths beginning with "cmd/" only match source code in +the Go repository. +

    +

    +An import path is a pattern if it includes one or more "..." wildcards, +each of which can match any string, including the empty string and +strings containing slashes. Such a pattern expands to all package +directories found in the GOPATH trees with names matching the +patterns. As a special case, x/... matches x as well as x's subdirectories. +For example, net/... expands to net and packages in its subdirectories. +

    +

    +An import path can also name a package to be downloaded from +a remote repository. Run 'go help importpath' for details. +

    +

    +Every package in a program must have a unique import path. +By convention, this is arranged by starting each path with a +unique prefix that belongs to you. For example, paths used +internally at Google all begin with 'google', and paths +denoting remote repositories begin with the path to the code, +such as 'github.com/user/repo'. +

    +

    +Packages in a program need not have unique package names, +but there are two reserved package names with special meaning. +The name main indicates a command, not a library. +Commands are built into binaries and cannot be imported. +The name documentation indicates documentation for +a non-Go program in the directory. Files in package documentation +are ignored by the go command. +

    +

    +As a special case, if the package list is a list of .go files from a +single directory, the command is applied to a single synthesized +package made up of exactly those files, ignoring any build constraints +in those files and ignoring any other files in the directory. +

    +

    +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". +

    +

    Description of testing flags

    +

    +The 'go test' command takes both flags that apply to 'go test' itself +and flags that apply to the resulting test binary. +

    +

    +Several of the flags control profiling and write an execution profile +suitable for "go tool pprof"; run "go tool pprof -h" for more +information. The --alloc_space, --alloc_objects, and --show_bytes +options of pprof control how the information is presented. +

    +

    +The following flags are recognized by the 'go test' command and +control the execution of any test: +

    +
    -bench regexp
    +    Run (sub)benchmarks matching a regular expression.
    +    The given regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    benchmark's identifier.
    +    By default, no benchmarks run. To run all benchmarks,
    +    use '-bench .' or '-bench=.'.
    +
    +-benchtime t
    +    Run enough iterations of each benchmark to take t, specified
    +    as a time.Duration (for example, -benchtime 1h30s).
    +    The default is 1 second (1s).
    +
    +-count n
    +    Run each test and benchmark n times (default 1).
    +    If -cpu is set, run n times for each GOMAXPROCS value.
    +    Examples are always run once.
    +
    +-cover
    +    Enable coverage analysis.
    +
    +-covermode set,count,atomic
    +    Set the mode for coverage analysis for the package[s]
    +    being tested. The default is "set" unless -race is enabled,
    +    in which case it is "atomic".
    +    The values:
    +	set: bool: does this statement run?
    +	count: int: how many times does this statement run?
    +	atomic: int: count, but correct in multithreaded tests;
    +		significantly more expensive.
    +    Sets -cover.
    +
    +-coverpkg pkg1,pkg2,pkg3
    +    Apply coverage analysis in each test to the given list of packages.
    +    The default is for each test to analyze only the package being tested.
    +    Packages are specified as import paths.
    +    Sets -cover.
    +
    +-cpu 1,2,4
    +    Specify a list of GOMAXPROCS values for which the tests or
    +    benchmarks should be executed.  The default is the current value
    +    of GOMAXPROCS.
    +
    +-parallel n
    +    Allow parallel execution of test functions that call t.Parallel.
    +    The value of this flag is the maximum number of tests to run
    +    simultaneously; by default, it is set to the value of GOMAXPROCS.
    +    Note that -parallel only applies within a single test binary.
    +    The 'go test' command may run tests for different packages
    +    in parallel as well, according to the setting of the -p flag
    +    (see 'go help build').
    +
    +-run regexp
    +    Run only those tests and examples matching the regular expression.
    +    For tests the regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    test's identifier.
    +
    +-short
    +    Tell long-running tests to shorten their run time.
    +    It is off by default but set during all.bash so that installing
    +    the Go tree can run a sanity check but not spend time running
    +    exhaustive tests.
    +
    +-timeout t
    +    If a test runs longer than t, panic.
    +    The default is 10 minutes (10m).
    +
    +-v
    +    Verbose output: log all tests as they are run. Also print all
    +    text from Log and Logf calls even if the test succeeds.
    +
    +

    +The following flags are also recognized by 'go test' and can be used to +profile the tests during execution: +

    +
    -benchmem
    +    Print memory allocation statistics for benchmarks.
    +
    +-blockprofile block.out
    +    Write a goroutine blocking profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-blockprofilerate n
    +    Control the detail provided in goroutine blocking profiles by
    +    calling runtime.SetBlockProfileRate with n.
    +    See 'go doc runtime.SetBlockProfileRate'.
    +    The profiler aims to sample, on average, one blocking event every
    +    n nanoseconds the program spends blocked.  By default,
    +    if -test.blockprofile is set without this flag, all blocking events
    +    are recorded, equivalent to -test.blockprofilerate=1.
    +
    +-coverprofile cover.out
    +    Write a coverage profile to the file after all tests have passed.
    +    Sets -cover.
    +
    +-cpuprofile cpu.out
    +    Write a CPU profile to the specified file before exiting.
    +    Writes test binary as -c would.
    +
    +-memprofile mem.out
    +    Write a memory profile to the file after all tests have passed.
    +    Writes test binary as -c would.
    +
    +-memprofilerate n
    +    Enable more precise (and expensive) memory profiles by setting
    +    runtime.MemProfileRate.  See 'go doc runtime.MemProfileRate'.
    +    To profile all memory allocations, use -test.memprofilerate=1
    +    and pass --alloc_space flag to the pprof tool.
    +
    +-mutexprofile mutex.out
    +    Write a mutex contention profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-mutexprofilefraction n
    +    Sample 1 in n stack traces of goroutines holding a
    +    contended mutex.
    +
    +-outputdir directory
    +    Place output files from profiling in the specified directory,
    +    by default the directory in which "go test" is running.
    +
    +-trace trace.out
    +    Write an execution trace to the specified file before exiting.
    +
    +

    +Each of these flags is also recognized with an optional 'test.' prefix, +as in -test.v. When invoking the generated test binary (the result of +'go test -c') directly, however, the prefix is mandatory. +

    +

    +The 'go test' command rewrites or removes recognized flags, +as appropriate, both before and after the optional package list, +before invoking the test binary. +

    +

    +For instance, the command +

    +
    go test -v -myflag testdata -cpuprofile=prof.out -x
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
    +
    +

    +(The -x flag is removed because it applies only to the go command's +execution, not to the test itself.) +

    +

    +The test flags that generate profiles (other than for coverage) also +leave the test binary in pkg.test for use when analyzing the profiles. +

    +

    +When 'go test' runs a test binary, it does so from within the +corresponding package's source code directory. Depending on the test, +it may be necessary to do the same when invoking a generated test +binary directly. +

    +

    +The command-line package list, if present, must appear before any +flag not known to the go test command. Continuing the example above, +the package list would have to appear before -myflag, but could appear +on either side of -v. +

    +

    +To keep an argument for a test binary from being interpreted as a +known flag or a package name, use -args (see 'go help test') which +passes the remainder of the command line through to the test binary +uninterpreted and unaltered. +

    +

    +For instance, the command +

    +
    go test -v -args -x -v
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -x -v
    +
    +

    +Similarly, +

    +
    go test -args math
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test math
    +
    +

    +In the first example, the -x and the second -v are passed through to the +test binary unchanged and with no effect on the go command itself. +In the second example, the argument math is passed through to the test +binary, instead of being interpreted as the package list. +

    +

    Description of testing functions

    +

    +The 'go test' command expects to find test, benchmark, and example functions +in the "*_test.go" files corresponding to the package under test. +

    +

    +A test function is one named TestXXX (where XXX is any alphanumeric string +not starting with a lower case letter) and should have the signature, +

    +
    func TestXXX(t *testing.T) { ... }
    +
    +

    +A benchmark function is one named BenchmarkXXX and should have the signature, +

    +
    func BenchmarkXXX(b *testing.B) { ... }
    +
    +

    +An example function is similar to a test function but, instead of using +*testing.T to report success or failure, prints output to os.Stdout. +If the last comment in the function starts with "Output:" then the output +is compared exactly against the comment (see examples below). If the last +comment begins with "Unordered output:" then the output is compared to the +comment, however the order of the lines is ignored. An example with no such +comment is compiled but not executed. An example with no text after +"Output:" is compiled, executed, and expected to produce no output. +

    +

    +Godoc displays the body of ExampleXXX to demonstrate the use +of the function, constant, or variable XXX. An example of a method M with +receiver type T or *T is named ExampleT_M. There may be multiple examples +for a given function, constant, or variable, distinguished by a trailing _xxx, +where xxx is a suffix not beginning with an upper case letter. +

    +

    +Here is an example of an example: +

    +
    func ExamplePrintln() {
    +	Println("The output of\nthis example.")
    +	// Output: The output of
    +	// this example.
    +}
    +
    +

    +Here is another example where the ordering of the output is ignored: +

    +
    func ExamplePerm() {
    +	for _, value := range Perm(4) {
    +		fmt.Println(value)
    +	}
    +
    +	// Unordered output: 4
    +	// 2
    +	// 1
    +	// 3
    +	// 0
    +}
    +
    +

    +The entire test file is presented as the example when it contains a single +example function, at least one other function, type, variable, or constant +declaration, and no test or benchmark functions. +

    +

    +See the documentation of the testing package for more information. +

    + + + +
    +
    + + + + + + + + +`)) diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md new file mode 100644 index 0000000..fb5c5ef --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/README.md @@ -0,0 +1,97 @@ +# h2i + +**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' +days of telnetting to your HTTP/1.n servers? We're bringing you +back. + +Features: +- send raw HTTP/2 frames + - PING + - SETTINGS + - HEADERS + - etc +- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 +- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) +- tab completion of commands, options + +Not yet features, but soon: +- unnecessary CONTINUATION frames on short boundaries, to test peer implementations +- request bodies (DATA frames) +- send invalid frames for testing server implementations (supported by underlying Framer) + +Later: +- act like a server + +## Installation + +``` +$ go get golang.org/x/net/http2/h2i +$ h2i +``` + +## Demo + +``` +$ h2i +Usage: h2i + + -insecure + Whether to skip TLS cert validation + -nextproto string + Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") + +$ h2i google.com +Connecting to google.com:443 ... +Connected to 74.125.224.41:443 +Negotiated protocol "h2-14" +[FrameHeader SETTINGS len=18] + [MAX_CONCURRENT_STREAMS = 100] + [INITIAL_WINDOW_SIZE = 1048576] + [MAX_FRAME_SIZE = 16384] +[FrameHeader WINDOW_UPDATE len=4] + Window-Increment = 983041 + +h2i> PING h2iSayHI +[FrameHeader PING flags=ACK len=8] + Data = "h2iSayHI" +h2i> headers +(as HTTP/1.1)> GET / HTTP/1.1 +(as HTTP/1.1)> Host: ip.appspot.com +(as HTTP/1.1)> User-Agent: h2i/brad-n-blake +(as HTTP/1.1)> +Opening Stream-ID 1: + :authority = ip.appspot.com + :method = GET + :path = / + :scheme = https + user-agent = h2i/brad-n-blake +[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] + :status = "200" + alternate-protocol = "443:quic,p=1" + content-length = "15" + content-type = "text/html" + date = "Fri, 01 May 2015 23:06:56 GMT" + server = "Google Frontend" +[FrameHeader DATA flags=END_STREAM stream=1 len=15] + "173.164.155.78\n" +[FrameHeader PING len=8] + Data = "\x00\x00\x00\x00\x00\x00\x00\x00" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader GOAWAY len=22] + Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) + +ReadFrame: EOF +``` + +## Status + +Quick few hour hack. So much yet to do. Feel free to file issues for +bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) +and I aren't yet accepting pull requests until things settle down. + diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go new file mode 100644 index 0000000..62e5752 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/h2i.go @@ -0,0 +1,522 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +/* +The h2i command is an interactive HTTP/2 console. + +Usage: + $ h2i [flags] + +Interactive commands in the console: (all parts case-insensitive) + + ping [data] + settings ack + settings FOO=n BAR=z + headers (open a new stream by typing HTTP/1.1) +*/ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "strconv" + "strings" + + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// Flags +var ( + flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") + flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") + flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.") + flagDial = flag.String("dial", "", "optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)") +) + +type command struct { + run func(*h2i, []string) error // required + + // complete optionally specifies tokens (case-insensitive) which are + // valid for this subcommand. + complete func() []string +} + +var commands = map[string]command{ + "ping": {run: (*h2i).cmdPing}, + "settings": { + run: (*h2i).cmdSettings, + complete: func() []string { + return []string{ + "ACK", + http2.SettingHeaderTableSize.String(), + http2.SettingEnablePush.String(), + http2.SettingMaxConcurrentStreams.String(), + http2.SettingInitialWindowSize.String(), + http2.SettingMaxFrameSize.String(), + http2.SettingMaxHeaderListSize.String(), + } + }, + }, + "quit": {run: (*h2i).cmdQuit}, + "headers": {run: (*h2i).cmdHeaders}, +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") + flag.PrintDefaults() +} + +// withPort adds ":443" if another port isn't already present. +func withPort(host string) string { + if _, _, err := net.SplitHostPort(host); err != nil { + return net.JoinHostPort(host, "443") + } + return host +} + +// withoutPort strips the port from addr if present. +func withoutPort(addr string) string { + if h, _, err := net.SplitHostPort(addr); err == nil { + return h + } + return addr +} + +// h2i is the app's state. +type h2i struct { + host string + tc *tls.Conn + framer *http2.Framer + term *terminal.Terminal + + // owned by the command loop: + streamID uint32 + hbuf bytes.Buffer + henc *hpack.Encoder + + // owned by the readFrames loop: + peerSetting map[http2.SettingID]uint32 + hdec *hpack.Decoder +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + os.Exit(2) + } + log.SetFlags(0) + + host := flag.Arg(0) + app := &h2i{ + host: host, + peerSetting: make(map[http2.SettingID]uint32), + } + app.henc = hpack.NewEncoder(&app.hbuf) + + if err := app.Main(); err != nil { + if app.term != nil { + app.logf("%v\n", err) + } else { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "\n") +} + +func (app *h2i) Main() error { + cfg := &tls.Config{ + ServerName: withoutPort(app.host), + NextProtos: strings.Split(*flagNextProto, ","), + InsecureSkipVerify: *flagInsecure, + } + + hostAndPort := *flagDial + if hostAndPort == "" { + hostAndPort = withPort(app.host) + } + log.Printf("Connecting to %s ...", hostAndPort) + tc, err := tls.Dial("tcp", hostAndPort, cfg) + if err != nil { + return fmt.Errorf("Error dialing %s: %v", hostAndPort, err) + } + log.Printf("Connected to %v", tc.RemoteAddr()) + defer tc.Close() + + if err := tc.Handshake(); err != nil { + return fmt.Errorf("TLS handshake: %v", err) + } + if !*flagInsecure { + if err := tc.VerifyHostname(app.host); err != nil { + return fmt.Errorf("VerifyHostname: %v", err) + } + } + state := tc.ConnectionState() + log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) + if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { + return fmt.Errorf("Could not negotiate protocol mutually") + } + + if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { + return err + } + + app.framer = http2.NewFramer(tc, tc) + + oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) + if err != nil { + return err + } + defer terminal.Restore(0, oldState) + + var screen = struct { + io.Reader + io.Writer + }{os.Stdin, os.Stdout} + + app.term = terminal.NewTerminal(screen, "h2i> ") + lastWord := regexp.MustCompile(`.+\W(\w+)$`) + app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { + if key != '\t' { + return + } + if pos != len(line) { + // TODO: we're being lazy for now, only supporting tab completion at the end. + return + } + // Auto-complete for the command itself. + if !strings.Contains(line, " ") { + var name string + name, _, ok = lookupCommand(line) + if !ok { + return + } + return name, len(name), true + } + _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) + if !ok || c.complete == nil { + return + } + if strings.HasSuffix(line, " ") { + app.logf("%s", strings.Join(c.complete(), " ")) + return line, pos, true + } + m := lastWord.FindStringSubmatch(line) + if m == nil { + return line, len(line), true + } + soFar := m[1] + var match []string + for _, cand := range c.complete() { + if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { + continue + } + match = append(match, cand) + } + if len(match) == 0 { + return + } + if len(match) > 1 { + // TODO: auto-complete any common prefix + app.logf("%s", strings.Join(match, " ")) + return line, pos, true + } + newLine = line[:len(line)-len(soFar)] + match[0] + return newLine, len(newLine), true + + } + + errc := make(chan error, 2) + go func() { errc <- app.readFrames() }() + go func() { errc <- app.readConsole() }() + return <-errc +} + +func (app *h2i) logf(format string, args ...interface{}) { + fmt.Fprintf(app.term, format+"\r\n", args...) +} + +func (app *h2i) readConsole() error { + if s := *flagSettings; s != "omit" { + var args []string + if s != "empty" { + args = strings.Split(s, ",") + } + _, c, ok := lookupCommand("settings") + if !ok { + panic("settings command not found") + } + c.run(app, args) + } + + for { + line, err := app.term.ReadLine() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("terminal.ReadLine: %v", err) + } + f := strings.Fields(line) + if len(f) == 0 { + continue + } + cmd, args := f[0], f[1:] + if _, c, ok := lookupCommand(cmd); ok { + err = c.run(app, args) + } else { + app.logf("Unknown command %q", line) + } + if err == errExitApp { + return nil + } + if err != nil { + return err + } + } +} + +func lookupCommand(prefix string) (name string, c command, ok bool) { + prefix = strings.ToLower(prefix) + if c, ok = commands[prefix]; ok { + return prefix, c, ok + } + + for full, candidate := range commands { + if strings.HasPrefix(full, prefix) { + if c.run != nil { + return "", command{}, false // ambiguous + } + c = candidate + name = full + } + } + return name, c, c.run != nil +} + +var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") + +func (a *h2i) cmdQuit(args []string) error { + if len(args) > 0 { + a.logf("the QUIT command takes no argument") + return nil + } + return errExitApp +} + +func (a *h2i) cmdSettings(args []string) error { + if len(args) == 1 && strings.EqualFold(args[0], "ACK") { + return a.framer.WriteSettingsAck() + } + var settings []http2.Setting + for _, arg := range args { + if strings.EqualFold(arg, "ACK") { + a.logf("Error: ACK must be only argument with the SETTINGS command") + return nil + } + eq := strings.Index(arg, "=") + if eq == -1 { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + sid, ok := settingByName(arg[:eq]) + if !ok { + a.logf("Error: unknown setting name %q", arg[:eq]) + return nil + } + val, err := strconv.ParseUint(arg[eq+1:], 10, 32) + if err != nil { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + settings = append(settings, http2.Setting{ + ID: sid, + Val: uint32(val), + }) + } + a.logf("Sending: %v", settings) + return a.framer.WriteSettings(settings...) +} + +func settingByName(name string) (http2.SettingID, bool) { + for _, sid := range [...]http2.SettingID{ + http2.SettingHeaderTableSize, + http2.SettingEnablePush, + http2.SettingMaxConcurrentStreams, + http2.SettingInitialWindowSize, + http2.SettingMaxFrameSize, + http2.SettingMaxHeaderListSize, + } { + if strings.EqualFold(sid.String(), name) { + return sid, true + } + } + return 0, false +} + +func (app *h2i) cmdPing(args []string) error { + if len(args) > 1 { + app.logf("invalid PING usage: only accepts 0 or 1 args") + return nil // nil means don't end the program + } + var data [8]byte + if len(args) == 1 { + copy(data[:], args[0]) + } else { + copy(data[:], "h2i_ping") + } + return app.framer.WritePing(false, data) +} + +func (app *h2i) cmdHeaders(args []string) error { + if len(args) > 0 { + app.logf("Error: HEADERS doesn't yet take arguments.") + // TODO: flags for restricting window size, to force CONTINUATION + // frames. + return nil + } + var h1req bytes.Buffer + app.term.SetPrompt("(as HTTP/1.1)> ") + defer app.term.SetPrompt("h2i> ") + for { + line, err := app.term.ReadLine() + if err != nil { + return err + } + h1req.WriteString(line) + h1req.WriteString("\r\n") + if line == "" { + break + } + } + req, err := http.ReadRequest(bufio.NewReader(&h1req)) + if err != nil { + app.logf("Invalid HTTP/1.1 request: %v", err) + return nil + } + if app.streamID == 0 { + app.streamID = 1 + } else { + app.streamID += 2 + } + app.logf("Opening Stream-ID %d:", app.streamID) + hbf := app.encodeHeaders(req) + if len(hbf) > 16<<10 { + app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") + return nil + } + return app.framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: app.streamID, + BlockFragment: hbf, + EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now + EndHeaders: true, // for now + }) +} + +func (app *h2i) readFrames() error { + for { + f, err := app.framer.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame: %v", err) + } + app.logf("%v", f) + switch f := f.(type) { + case *http2.PingFrame: + app.logf(" Data = %q", f.Data) + case *http2.SettingsFrame: + f.ForeachSetting(func(s http2.Setting) error { + app.logf(" %v", s) + app.peerSetting[s.ID] = s.Val + return nil + }) + case *http2.WindowUpdateFrame: + app.logf(" Window-Increment = %v", f.Increment) + case *http2.GoAwayFrame: + app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode) + case *http2.DataFrame: + app.logf(" %q", f.Data()) + case *http2.HeadersFrame: + if f.HasPriority() { + app.logf(" PRIORITY = %v", f.Priority) + } + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + case *http2.PushPromiseFrame: + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + } + } +} + +// called from readLoop +func (app *h2i) onNewHeaderField(f hpack.HeaderField) { + if f.Sensitive { + app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) + } + app.logf(" %s = %q", f.Name, f.Value) +} + +func (app *h2i) encodeHeaders(req *http.Request) []byte { + app.hbuf.Reset() + + // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go + host := req.Host + if host == "" { + host = req.URL.Host + } + + path := req.RequestURI + if path == "" { + path = "/" + } + + app.writeHeader(":authority", host) // probably not right for all sites + app.writeHeader(":method", req.Method) + app.writeHeader(":path", path) + app.writeHeader(":scheme", "https") + + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + if lowKey == "host" { + continue + } + for _, v := range vv { + app.writeHeader(lowKey, v) + } + } + return app.hbuf.Bytes() +} + +func (app *h2i) writeHeader(name, value string) { + app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) + app.logf(" %s = %s", name, value) +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 0000000..c2805f6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 0000000..1565cf2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go new file mode 100644 index 0000000..05f12db --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go @@ -0,0 +1,386 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" +) + +func TestEncoderTableSizeUpdate(t *testing.T) { + tests := []struct { + size1, size2 uint32 + wantHex string + }{ + // Should emit 2 table size updates (2048 and 4096) + {2048, 4096, "3fe10f 3fe11f 82"}, + + // Should emit 1 table size update (2048) + {16384, 2048, "3fe10f 82"}, + } + for _, tt := range tests { + var buf bytes.Buffer + e := NewEncoder(&buf) + e.SetMaxDynamicTableSize(tt.size1) + e.SetMaxDynamicTableSize(tt.size2) + if err := e.WriteField(pair(":method", "GET")); err != nil { + t.Fatal(err) + } + want := removeSpace(tt.wantHex) + if got := hex.EncodeToString(buf.Bytes()); got != want { + t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want) + } + } +} + +func TestEncoderWriteField(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + var got []HeaderField + d := NewDecoder(4<<10, func(f HeaderField) { + got = append(got, f) + }) + + tests := []struct { + hdrs []HeaderField + }{ + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }}, + } + for i, tt := range tests { + buf.Reset() + got = got[:0] + for _, hf := range tt.hdrs { + if err := e.WriteField(hf); err != nil { + t.Fatal(err) + } + } + _, err := d.Write(buf.Bytes()) + if err != nil { + t.Errorf("%d. Decoder Write = %v", i, err) + } + if !reflect.DeepEqual(got, tt.hdrs) { + t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs) + } + } +} + +func TestEncoderSearchTable(t *testing.T) { + e := NewEncoder(nil) + + e.dynTab.add(pair("foo", "bar")) + e.dynTab.add(pair("blake", "miz")) + e.dynTab.add(pair(":method", "GET")) + + tests := []struct { + hf HeaderField + wantI uint64 + wantMatch bool + }{ + // Name and Value match + {pair("foo", "bar"), uint64(staticTable.len()) + 3, true}, + {pair("blake", "miz"), uint64(staticTable.len()) + 2, true}, + {pair(":method", "GET"), 2, true}, + + // Only name match because Sensitive == true. This is allowed to match + // any ":method" entry. The current implementation uses the last entry + // added in newStaticTable. + {HeaderField{":method", "GET", true}, 3, false}, + + // Only Name matches + {pair("foo", "..."), uint64(staticTable.len()) + 3, false}, + {pair("blake", "..."), uint64(staticTable.len()) + 2, false}, + // As before, this is allowed to match any ":method" entry. + {pair(":method", "..."), 3, false}, + + // None match + {pair("foo-", "bar"), 0, false}, + } + for _, tt := range tests { + if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { + t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) + } + } +} + +func TestAppendVarInt(t *testing.T) { + tests := []struct { + n byte + i uint64 + want []byte + }{ + // Fits in a byte: + {1, 0, []byte{0}}, + {2, 2, []byte{2}}, + {3, 6, []byte{6}}, + {4, 14, []byte{14}}, + {5, 30, []byte{30}}, + {6, 62, []byte{62}}, + {7, 126, []byte{126}}, + {8, 254, []byte{254}}, + + // Multiple bytes: + {5, 1337, []byte{31, 154, 10}}, + } + for _, tt := range tests { + got := appendVarInt(nil, tt.n, tt.i) + if !bytes.Equal(got, tt.want) { + t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want) + } + } +} + +func TestAppendHpackString(t *testing.T) { + tests := []struct { + s, wantHex string + }{ + // Huffman encoded + {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + + // Not Huffman encoded + {"a", "01 61"}, + + // zero length + {"", "00"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendHpackString(nil, tt.s) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want) + } + } +} + +func TestAppendIndexed(t *testing.T) { + tests := []struct { + i uint64 + wantHex string + }{ + // 1 byte + {1, "81"}, + {126, "fe"}, + + // 2 bytes + {127, "ff00"}, + {128, "ff01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexed(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestAppendNewName(t *testing.T) { + tests := []struct { + f HeaderField + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Without indexing + {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Never indexed + {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendNewName(nil, tt.f, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendIndexedName(t *testing.T) { + tests := []struct { + f HeaderField + i uint64 + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{":status", "302", false}, 8, true, "48 82 6402"}, + + // Without indexing + {HeaderField{":status", "302", false}, 8, false, "08 82 6402"}, + + // Never indexed + {HeaderField{":status", "302", true}, 8, true, "18 82 6402"}, + {HeaderField{":status", "302", true}, 8, false, "18 82 6402"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendTableSize(t *testing.T) { + tests := []struct { + i uint32 + wantHex string + }{ + // Fits into 1 byte + {30, "3e"}, + + // Extra byte + {31, "3f00"}, + {32, "3f01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendTableSize(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestEncoderSetMaxDynamicTableSize(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + tests := []struct { + v uint32 + wantUpdate bool + wantMinSize uint32 + wantMaxSize uint32 + }{ + // Set new table size to 2048 + {2048, true, 2048, 2048}, + + // Set new table size to 16384, but still limited to + // 4096 + {16384, true, 2048, 4096}, + } + for _, tt := range tests { + e.SetMaxDynamicTableSize(tt.v) + if got := e.tableSizeUpdate; tt.wantUpdate != got { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate) + } + if got := e.minSize; tt.wantMinSize != got { + t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize) + } + if got := e.dynTab.maxSize; tt.wantMaxSize != got { + t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize) + } + } +} + +func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { + e := NewEncoder(nil) + // 4095 < initialHeaderTableSize means maxSize is truncated to + // 4095. + e.SetMaxDynamicTableSizeLimit(4095) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(4095); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } + if got, want := e.tableSizeUpdate, true; got != want { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, want) + } + // maxSize will be truncated to maxSizeLimit + e.SetMaxDynamicTableSize(16384) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + // 8192 > current maxSizeLimit, so maxSize does not change. + e.SetMaxDynamicTableSizeLimit(8192) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(8192); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } +} + +func removeSpace(s string) string { + return strings.Replace(s, " ", "", -1) +} + +func BenchmarkEncoderSearchTable(b *testing.B) { + e := NewEncoder(nil) + + // A sample of possible header fields. + // This is not based on any actual data from HTTP/2 traces. + var possible []HeaderField + for _, f := range staticTable.ents { + if f.Value == "" { + possible = append(possible, f) + continue + } + // Generate 5 random values, except for cookie and set-cookie, + // which we know can have many values in practice. + num := 5 + if f.Name == "cookie" || f.Name == "set-cookie" { + num = 25 + } + for i := 0; i < num; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + for k := 0; k < 10; k++ { + f := HeaderField{ + Name: fmt.Sprintf("x-header-%d", k), + Sensitive: rand.Int()%2 == 0, + } + for i := 0; i < 5; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + + // Add a random sample to the dynamic table. This very loosely simulates + // a history of 100 requests with 20 header fields per request. + for r := 0; r < 100*20; r++ { + f := possible[rand.Int31n(int32(len(possible)))] + // Skip if this is in the staticTable verbatim. + if _, has := staticTable.search(f); !has { + e.dynTab.add(f) + } + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + for _, f := range possible { + e.searchTable(f) + } + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 0000000..176644a --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go new file mode 100644 index 0000000..bc7f476 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go @@ -0,0 +1,722 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" + "time" +) + +func (d *Decoder) mustAt(idx int) HeaderField { + if hf, ok := d.at(uint64(idx)); !ok { + panic(fmt.Sprintf("bogus index %d", idx)) + } else { + return hf + } +} + +func TestDynamicTableAt(t *testing.T) { + d := NewDecoder(4096, nil) + at := d.mustAt + if got, want := at(2), (pair(":method", "GET")); got != want { + t.Errorf("at(2) = %v; want %v", got, want) + } + d.dynTab.add(pair("foo", "bar")) + d.dynTab.add(pair("blake", "miz")) + if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 2) = %v; want %v", got, want) + } + if got, want := at(3), (pair(":method", "POST")); got != want { + t.Errorf("at(3) = %v; want %v", got, want) + } +} + +func TestDynamicTableSizeEvict(t *testing.T) { + d := NewDecoder(4096, nil) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("size = %d; want %d", d.dynTab.size, want) + } + add := d.dynTab.add + add(pair("blake", "eats pizza")) + if want := uint32(15 + 32); d.dynTab.size != want { + t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want) + } + add(pair("foo", "bar")) + if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want { + t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want) + } + d.dynTab.setMaxSize(15 + 32 + 1 /* slop */) + if want := uint32(6 + 32); d.dynTab.size != want { + t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) + } + if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + add(pair("long", strings.Repeat("x", 500))) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want) + } +} + +func TestDecoderDecode(t *testing.T) { + tests := []struct { + name string + in []byte + want []HeaderField + wantDynTab []HeaderField // newest entry first + }{ + // C.2.1 Literal Header Field with Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1 + {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"), + []HeaderField{pair("custom-key", "custom-header")}, + []HeaderField{pair("custom-key", "custom-header")}, + }, + + // C.2.2 Literal Header Field without Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2 + {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"), + []HeaderField{pair(":path", "/sample/path")}, + []HeaderField{}}, + + // C.2.3 Literal Header Field never Indexed + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3 + {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"), + []HeaderField{{"password", "secret", true}}, + []HeaderField{}}, + + // C.2.4 Indexed Header Field + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4 + {"C.2.4", []byte("\x82"), + []HeaderField{pair(":method", "GET")}, + []HeaderField{}}, + } + for _, tt := range tests { + d := NewDecoder(4096, nil) + hf, err := d.DecodeFull(tt.in) + if err != nil { + t.Errorf("%s: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(hf, tt.want) { + t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) { + t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab) + } + } +} + +func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { + hf = make([]HeaderField, len(dt.table.ents)) + for i := range hf { + hf[i] = dt.table.ents[len(dt.table.ents)-1-i] + } + return +} + +type encAndWant struct { + enc []byte + want []HeaderField + wantDynTab []HeaderField + wantDynSize uint32 +} + +// C.3 Request Examples without Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3 +func TestDecodeC3_NoHuffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5808 6e6f 2d63 6163 6865"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// C.4 Request Examples with Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4 +func TestDecodeC4_Huffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5886 a8eb 1064 9cbf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5 +// "This section shows several consecutive header lists, corresponding +// to HTTP responses, on the same connection. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur." +func TestDecodeC5_ResponsesNoHuff(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4803 3330 3258 0770 7269 7661 7465 611d +4d6f 6e2c 2032 3120 4f63 7420 3230 3133 +2032 303a 3133 3a32 3120 474d 546e 1768 +7474 7073 3a2f 2f77 7777 2e65 7861 6d70 +6c65 2e63 6f6d +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4803 3330 37c1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 +3230 3133 2032 303a 3133 3a32 3220 474d +54c0 5a04 677a 6970 7738 666f 6f3d 4153 +444a 4b48 514b 425a 584f 5157 454f 5049 +5541 5851 5745 4f49 553b 206d 6178 2d61 +6765 3d33 3630 303b 2076 6572 7369 6f6e +3d31 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6 +// "This section shows the same examples as the previous section, but +// using Huffman encoding for the literal values. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur. The eviction mechanism +// uses the length of the decoded literal values, so the same +// evictions occurs as in the previous section." +func TestDecodeC6_ResponsesHuffman(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4882 6402 5885 aec3 771a 4b61 96d0 7abe +9410 54d4 44a8 2005 9504 0b81 66e0 82a6 +2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 +e9ae 82ae 43d3 +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4883 640e ffc1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 6196 d07a be94 1054 d444 a820 0595 +040b 8166 e084 a62d 1bff c05a 839b d9ab +77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b +3960 d5af 2708 7f36 72c1 ab27 0fb5 291f +9587 3160 65c0 03ed 4ee5 b106 3d50 07 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { + d := NewDecoder(size, nil) + for i, step := range steps { + hf, err := d.DecodeFull(step.enc) + if err != nil { + t.Fatalf("Error at step index %d: %v", i, err) + } + if !reflect.DeepEqual(hf, step.want) { + t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, step.wantDynTab) { + t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab) + } + if d.dynTab.size != step.wantDynSize { + t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize) + } + } +} + +func TestHuffmanDecodeExcessPadding(t *testing.T) { + tests := [][]byte{ + {0xff}, // Padding Exceeds 7 bits + {0x1f, 0xff}, // {"a", 1 byte excess padding} + {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding} + {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding} + {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding} + {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol. + } + for i, in := range tests { + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err) + } + } +} + +func TestHuffmanDecodeEOS(t *testing.T) { + in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) { + in := []byte{0x00, 0x01} // {"0", "0", "0"} + var buf bytes.Buffer + if err := huffmanDecode(&buf, 2, in); err != ErrStringLength { + t.Errorf("error = %v; want ErrStringLength", err) + } +} + +func TestHuffmanDecodeCorruptPadding(t *testing.T) { + in := []byte{0x00} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecode(t *testing.T) { + tests := []struct { + inHex, want string + }{ + {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"}, + {"a8eb 1064 9cbf", "no-cache"}, + {"25a8 49e9 5ba9 7d7f", "custom-key"}, + {"25a8 49e9 5bb8 e8b4 bf", "custom-value"}, + {"6402", "302"}, + {"aec3 771a 4b", "private"}, + {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"}, + {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"}, + {"9bd9 ab", "gzip"}, + {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07", + "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"}, + } + for i, tt := range tests { + var buf bytes.Buffer + in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1)) + if err != nil { + t.Errorf("%d. hex input error: %v", i, err) + continue + } + if _, err := HuffmanDecode(&buf, in); err != nil { + t.Errorf("%d. decode error: %v", i, err) + continue + } + if got := buf.String(); tt.want != got { + t.Errorf("%d. decode = %q; want %q", i, got, tt.want) + } + } +} + +func TestAppendHuffmanString(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + {"no-cache", "a8eb 1064 9cbf"}, + {"custom-key", "25a8 49e9 5ba9 7d7f"}, + {"custom-value", "25a8 49e9 5bb8 e8b4 bf"}, + {"302", "6402"}, + {"private", "aec3 771a 4b"}, + {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"}, + {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"}, + {"gzip", "9bd9 ab"}, + {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", + "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"}, + } + for i, tt := range tests { + buf := []byte{} + want := strings.Replace(tt.want, " ", "", -1) + buf = AppendHuffmanString(buf, tt.in) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("%d. encode = %q; want %q", i, got, want) + } + } +} + +func TestHuffmanMaxStrLen(t *testing.T) { + const msg = "Some string" + huff := AppendHuffmanString(nil, msg) + + testGood := func(max int) { + var out bytes.Buffer + if err := huffmanDecode(&out, max, huff); err != nil { + t.Errorf("For maxLen=%d, unexpected error: %v", max, err) + } + if out.String() != msg { + t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg) + } + } + testGood(0) + testGood(len(msg)) + testGood(len(msg) + 1) + + var out bytes.Buffer + if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength { + t.Errorf("err = %v; want ErrStringLength", err) + } +} + +func TestHuffmanRoundtripStress(t *testing.T) { + const Len = 50 // of uncompressed string + input := make([]byte, Len) + var output bytes.Buffer + var huff []byte + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + var encSize int64 + for i := 0; i < n; i++ { + for l := range input { + input[l] = byte(src.Intn(256)) + } + huff = AppendHuffmanString(huff[:0], string(input)) + encSize += int64(len(huff)) + output.Reset() + if err := huffmanDecode(&output, 0, huff); err != nil { + t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err) + continue + } + if !bytes.Equal(output.Bytes(), input) { + t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes()) + } + } + t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize) +} + +func TestHuffmanDecodeFuzz(t *testing.T) { + const Len = 50 // of compressed + var buf, zbuf bytes.Buffer + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + numFail := 0 + for i := 0; i < n; i++ { + zbuf.Reset() + if i == 0 { + // Start with at least one invalid one. + zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8") + } else { + for l := 0; l < Len; l++ { + zbuf.WriteByte(byte(src.Intn(256))) + } + } + + buf.Reset() + if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil { + if err == ErrInvalidHuffman { + numFail++ + continue + } + t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err) + continue + } + } + t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n) + if numFail < 1 { + t.Error("expected at least one invalid huffman encoding (test starts with one)") + } +} + +func TestReadVarInt(t *testing.T) { + type res struct { + i uint64 + consumed int + err error + } + tests := []struct { + n byte + p []byte + want res + }{ + // Fits in a byte: + {1, []byte{0}, res{0, 1, nil}}, + {2, []byte{2}, res{2, 1, nil}}, + {3, []byte{6}, res{6, 1, nil}}, + {4, []byte{14}, res{14, 1, nil}}, + {5, []byte{30}, res{30, 1, nil}}, + {6, []byte{62}, res{62, 1, nil}}, + {7, []byte{126}, res{126, 1, nil}}, + {8, []byte{254}, res{254, 1, nil}}, + + // Doesn't fit in a byte: + {1, []byte{1}, res{0, 0, errNeedMore}}, + {2, []byte{3}, res{0, 0, errNeedMore}}, + {3, []byte{7}, res{0, 0, errNeedMore}}, + {4, []byte{15}, res{0, 0, errNeedMore}}, + {5, []byte{31}, res{0, 0, errNeedMore}}, + {6, []byte{63}, res{0, 0, errNeedMore}}, + {7, []byte{127}, res{0, 0, errNeedMore}}, + {8, []byte{255}, res{0, 0, errNeedMore}}, + + // Ignoring top bits: + {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111 + {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100 + {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101 + + // Extra byte: + {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte + + // Short a byte: + {5, []byte{191, 154}, res{0, 0, errNeedMore}}, + + // integer overflow: + {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}}, + } + for _, tt := range tests { + i, remain, err := readVarInt(tt.n, tt.p) + consumed := len(tt.p) - len(remain) + got := res{i, consumed, err} + if got != tt.want { + t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want) + } + } +} + +// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56 +func TestHuffmanFuzzCrash(t *testing.T) { + got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8")) + if got != "" { + t.Errorf("Got %q; want empty string", got) + } + if err != ErrInvalidHuffman { + t.Errorf("Err = %v; want ErrInvalidHuffman", err) + } +} + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +func dehex(s string) []byte { + s = strings.Replace(s, " ", "", -1) + s = strings.Replace(s, "\n", "", -1) + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func TestEmitEnabled(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + + numCallback := 0 + var dec *Decoder + dec = NewDecoder(8<<20, func(HeaderField) { + numCallback++ + dec.SetEmitEnabled(false) + }) + if !dec.EmitEnabled() { + t.Errorf("initial emit enabled = false; want true") + } + if _, err := dec.Write(buf.Bytes()); err != nil { + t.Error(err) + } + if numCallback != 1 { + t.Errorf("num callbacks = %d; want 1", numCallback) + } + if dec.EmitEnabled() { + t.Errorf("emit enabled = true; want false") + } +} + +func TestSaveBufLimit(t *testing.T) { + const maxStr = 1 << 10 + var got []HeaderField + dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) { + got = append(got, hf) + }) + dec.SetMaxStringLength(maxStr) + var frag []byte + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "foo"...) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "bar"...) + + if _, err := dec.Write(frag); err != nil { + t.Fatal(err) + } + + want := []HeaderField{{Name: "foo", Value: "bar"}} + if !reflect.DeepEqual(got, want) { + t.Errorf("After small writes, got %v; want %v", got, want) + } + + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, maxStr*3) + frag = append(frag, make([]byte, maxStr*3)...) + + _, err := dec.Write(frag) + if err != ErrStringLength { + t.Fatalf("Write error = %v; want ErrStringLength", err) + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 0000000..8850e39 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 0000000..a66cfbe --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables_test.go b/vendor/golang.org/x/net/http2/hpack/tables_test.go new file mode 100644 index 0000000..d963f36 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables_test.go @@ -0,0 +1,214 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bufio" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestHeaderFieldTable(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // Tests will be run twice: once before evicting anything, and + // again after evicting the three oldest entries. + tests := []struct { + f HeaderField + beforeWantStaticI uint64 + beforeWantMatch bool + afterWantStaticI uint64 + afterWantMatch bool + }{ + {HeaderField{"key1", "value1-1", false}, 1, true, 0, false}, + {HeaderField{"key1", "value1-2", false}, 3, true, 0, false}, + {HeaderField{"key1", "value1-3", false}, 3, false, 0, false}, + {HeaderField{"key2", "value2-1", false}, 2, true, 3, false}, + {HeaderField{"key2", "value2-2", false}, 6, true, 3, true}, + {HeaderField{"key2", "value2-3", false}, 6, false, 3, false}, + {HeaderField{"key4", "value4-1", false}, 5, true, 2, true}, + // Name match only, because sensitive. + {HeaderField{"key4", "value4-1", true}, 5, false, 2, false}, + // Key not found. + {HeaderField{"key5", "value5-x", false}, 0, false, 0, false}, + } + + staticToDynamic := func(i uint64) uint64 { + if i == 0 { + return 0 + } + return uint64(table.len()) - i + 1 // dynamic is the reversed table + } + + searchStatic := func(f HeaderField) (uint64, bool) { + old := staticTable + staticTable = table + defer func() { staticTable = old }() + return staticTable.search(f) + } + + searchDynamic := func(f HeaderField) (uint64, bool) { + return table.search(f) + } + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.beforeWantStaticI) + if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } + + table.evictOldest(3) + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.afterWantStaticI) + if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } +} + +func TestHeaderFieldTable_LookupMapEviction(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // evict all pairs + table.evictOldest(table.len()) + + if l := table.len(); l > 0 { + t.Errorf("table.len() = %d, want 0", l) + } + + if l := len(table.byName); l > 0 { + t.Errorf("len(table.byName) = %d, want 0", l) + } + + if l := len(table.byNameValue); l > 0 { + t.Errorf("len(table.byNameValue) = %d, want 0", l) + } +} + +func TestStaticTable(t *testing.T) { + fromSpec := ` + +-------+-----------------------------+---------------+ + | 1 | :authority | | + | 2 | :method | GET | + | 3 | :method | POST | + | 4 | :path | / | + | 5 | :path | /index.html | + | 6 | :scheme | http | + | 7 | :scheme | https | + | 8 | :status | 200 | + | 9 | :status | 204 | + | 10 | :status | 206 | + | 11 | :status | 304 | + | 12 | :status | 400 | + | 13 | :status | 404 | + | 14 | :status | 500 | + | 15 | accept-charset | | + | 16 | accept-encoding | gzip, deflate | + | 17 | accept-language | | + | 18 | accept-ranges | | + | 19 | accept | | + | 20 | access-control-allow-origin | | + | 21 | age | | + | 22 | allow | | + | 23 | authorization | | + | 24 | cache-control | | + | 25 | content-disposition | | + | 26 | content-encoding | | + | 27 | content-language | | + | 28 | content-length | | + | 29 | content-location | | + | 30 | content-range | | + | 31 | content-type | | + | 32 | cookie | | + | 33 | date | | + | 34 | etag | | + | 35 | expect | | + | 36 | expires | | + | 37 | from | | + | 38 | host | | + | 39 | if-match | | + | 40 | if-modified-since | | + | 41 | if-none-match | | + | 42 | if-range | | + | 43 | if-unmodified-since | | + | 44 | last-modified | | + | 45 | link | | + | 46 | location | | + | 47 | max-forwards | | + | 48 | proxy-authenticate | | + | 49 | proxy-authorization | | + | 50 | range | | + | 51 | referer | | + | 52 | refresh | | + | 53 | retry-after | | + | 54 | server | | + | 55 | set-cookie | | + | 56 | strict-transport-security | | + | 57 | transfer-encoding | | + | 58 | user-agent | | + | 59 | vary | | + | 60 | via | | + | 61 | www-authenticate | | + +-------+-----------------------------+---------------+ +` + bs := bufio.NewScanner(strings.NewReader(fromSpec)) + re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) + for bs.Scan() { + l := bs.Text() + if !strings.Contains(l, "|") { + continue + } + m := re.FindStringSubmatch(l) + if m == nil { + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("Bogus integer on line %q", l) + continue + } + if i < 1 || i > staticTable.len() { + t.Errorf("Bogus index %d on line %q", i, l) + continue + } + if got, want := staticTable.ents[i-1].Name, m[2]; got != want { + t.Errorf("header index %d name = %q; want %q", i, got, want) + } + if got, want := staticTable.ents[i-1].Value, m[3]; got != want { + t.Errorf("header index %d value = %q; want %q", i, got, want) + } + } + if err := bs.Err(); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 0000000..71db28a --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,391 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/lex/httplex" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httplex.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go new file mode 100644 index 0000000..5248776 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2_test.go @@ -0,0 +1,199 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "flag" + "fmt" + "net/http" + "os/exec" + "strconv" + "strings" + "testing" + + "golang.org/x/net/http2/hpack" +) + +var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.") + +func condSkipFailingTest(t *testing.T) { + if !*knownFailing { + t.Skip("Skipping known-failing test without --known_failing") + } +} + +func init() { + inTests = true + DebugGoroutines = true + flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging") +} + +func TestSettingString(t *testing.T) { + tests := []struct { + s Setting + want string + }{ + {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"}, + {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"}, + } + for i, tt := range tests { + got := fmt.Sprint(tt.s) + if got != tt.want { + t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want) + } + } +} + +type twriter struct { + t testing.TB + st *serverTester // optional +} + +func (w twriter) Write(p []byte) (n int, err error) { + if w.st != nil { + ps := string(p) + for _, phrase := range w.st.logFilter { + if strings.Contains(ps, phrase) { + return len(p), nil // no logging + } + } + } + w.t.Logf("%s", p) + return len(p), nil +} + +// like encodeHeader, but don't add implicit pseudo headers. +func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + } + return buf.Bytes() +} + +// Verify that curl has http2. +func requireCurl(t *testing.T) { + out, err := dockerLogs(curl(t, "--version")) + if err != nil { + t.Skipf("failed to determine curl features; skipping test") + } + if !strings.Contains(string(out), "HTTP2") { + t.Skip("curl doesn't support HTTP2; skipping test") + } +} + +func curl(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run curl in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +// Verify that h2load exists. +func requireH2load(t *testing.T) { + out, err := dockerLogs(h2load(t, "--version")) + if err != nil { + t.Skipf("failed to probe h2load; skipping test: %s", out) + } + if !strings.Contains(string(out), "h2load nghttp2/") { + t.Skipf("h2load not present; skipping test. (Output=%q)", out) + } +} + +func h2load(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run h2load in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +type puppetCommand struct { + fn func(w http.ResponseWriter, r *http.Request) + done chan<- bool +} + +type handlerPuppet struct { + ch chan puppetCommand +} + +func newHandlerPuppet() *handlerPuppet { + return &handlerPuppet{ + ch: make(chan puppetCommand), + } +} + +func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) { + for cmd := range p.ch { + cmd.fn(w, r) + cmd.done <- true + } +} + +func (p *handlerPuppet) done() { close(p.ch) } +func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) { + done := make(chan bool) + p.ch <- puppetCommand{fn, done} + <-done +} +func dockerLogs(container string) ([]byte, error) { + out, err := exec.Command("docker", "wait", container).CombinedOutput() + if err != nil { + return out, err + } + exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + return out, errors.New("unexpected exit status from docker wait") + } + out, err = exec.Command("docker", "logs", container).CombinedOutput() + exec.Command("docker", "rm", container).Run() + if err == nil && exitStatus != 0 { + err = fmt.Errorf("exit status %d: %s", exitStatus, out) + } + return out, err +} + +func kill(container string) { + exec.Command("docker", "kill", container).Run() + exec.Command("docker", "rm", container).Run() +} + +func cleanDate(res *http.Response) { + if d := res.Header["Date"]; len(d) == 1 { + d[0] = "XXX" + } +} + +func TestSorterPoolAllocs(t *testing.T) { + ss := []string{"a", "b", "c"} + h := http.Header{ + "a": nil, + "b": nil, + "c": nil, + } + sorter := new(sorter) + + if allocs := testing.AllocsPerRun(100, func() { + sorter.SortStrings(ss) + }); allocs >= 1 { + t.Logf("SortStrings allocs = %v; want <1", allocs) + } + + if allocs := testing.AllocsPerRun(5, func() { + if len(sorter.Keys(h)) != 3 { + t.Fatal("wrong result") + } + }); allocs > 0 { + t.Logf("Keys allocs = %v; want <1", allocs) + } +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 0000000..508cebc --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,21 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import ( + "net/http" + "time" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 0000000..140434a --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type contextContext interface { + Done() <-chan struct{} + Err() error +} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 0000000..6f8d3f8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } + +func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 0000000..5ae0772 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go19.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + // not supported prior to go1.9 + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 0000000..a614009 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,163 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go new file mode 100644 index 0000000..1bf351f --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe_test.go @@ -0,0 +1,130 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "testing" +) + +func TestPipeClose(t *testing.T) { + var p pipe + p.b = new(bytes.Buffer) + a := errors.New("a") + b := errors.New("b") + p.CloseWithError(a) + p.CloseWithError(b) + _, err := p.Read(make([]byte, 1)) + if err != a { + t.Errorf("err = %v want %v", err, a) + } +} + +func TestPipeDoneChan(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.CloseWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_ErrFirst(t *testing.T) { + var p pipe + p.CloseWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.BreakWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break_ErrFirst(t *testing.T) { + var p pipe + p.BreakWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeCloseWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + const body = "foo" + io.WriteString(p, body) + a := errors.New("test error") + p.CloseWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != body { + t.Errorf("read bytes = %q; want %q", all, body) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } + // Read and Write should fail. + if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 { + t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite) + } + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite) + } +} + +func TestPipeBreakWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + io.WriteString(p, "foo") + a := errors.New("test err") + p.BreakWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != "" { + t.Errorf("read bytes = %q; want empty string", all) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } + if p.b != nil { + t.Errorf("buffer should be nil after BreakWithError") + } + // Write should succeed silently. + if n, err := p.Write([]byte("abc")); err != nil || n != 3 { + t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err) + } + if p.b != nil { + t.Errorf("buffer should be nil after Write") + } + // Read should fail. + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n) + } +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 0000000..39ed755 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2888 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if err := configureServer18(s, conf); err != nil { + return err + } + if err := configureServer19(s, conf); err != nil { + return err + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx contextContext + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errPrefaceTimeout + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return streamError(id, ErrCodeStreamClosed) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = requestWithContext(req, st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

    HTTP Error 431

    Request Header Field(s) Too Large

    ") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 7230 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + checkWriteHeaderCode(code) + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/server_push_test.go b/vendor/golang.org/x/net/http2/server_push_test.go new file mode 100644 index 0000000..918fd30 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_push_test.go @@ -0,0 +1,521 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "sync" + "testing" + "time" +) + +func TestServer_Push_Success(t *testing.T) { + const ( + mainBody = "index page" + pushedBody = "pushed page" + userAgent = "testagent" + cookie = "testcookie" + ) + + var stURL string + checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error { + if got, want := r.Method, wantMethod; got != want { + return fmt.Errorf("promised Req.Method=%q, want %q", got, want) + } + if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) { + return fmt.Errorf("promised Req.Header=%q, want %q", got, want) + } + if got, want := "https://"+r.Host, stURL; got != want { + return fmt.Errorf("promised Req.Host=%q, want %q", got, want) + } + if r.Body == nil { + return fmt.Errorf("nil Body") + } + if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 { + return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err) + } + return nil + } + + errc := make(chan error, 3) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + // Push "/pushed?get" as a GET request, using an absolute URL. + opt := &http.PushOptions{ + Header: http.Header{ + "User-Agent": {userAgent}, + }, + } + if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?get: %v", err) + return + } + // Push "/pushed?head" as a HEAD request, using a path. + opt = &http.PushOptions{ + Method: "HEAD", + Header: http.Header{ + "User-Agent": {userAgent}, + "Cookie": {cookie}, + }, + } + if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?head: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(mainBody))) + w.WriteHeader(200) + io.WriteString(w, mainBody) + errc <- nil + + case "/pushed?get": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + if err := checkPromisedReq(r, "GET", wantH); err != nil { + errc <- fmt.Errorf("/pushed?get: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody))) + w.WriteHeader(200) + io.WriteString(w, pushedBody) + errc <- nil + + case "/pushed?head": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + wantH.Set("Cookie", cookie) + if err := checkPromisedReq(r, "HEAD", wantH); err != nil { + errc <- fmt.Errorf("/pushed?head: %v", err) + return + } + w.WriteHeader(204) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + stURL = st.ts.URL + + // Send one request, which should push two responses. + st.greet() + getSlash(st) + for k := 0; k < 3; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } + + checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error { + pp, ok := f.(*PushPromiseFrame) + if !ok { + return fmt.Errorf("got a %T; want *PushPromiseFrame", f) + } + if !pp.HeadersEnded() { + return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame") + } + if got, want := pp.PromiseID, promiseID; got != want { + return fmt.Errorf("got PromiseID %v; want %v", got, want) + } + gotH := st.decodeHeader(pp.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got promised headers %v; want %v", gotH, wantH) + } + return nil + } + checkHeaders := func(f Frame, wantH [][2]string) error { + hf, ok := f.(*HeadersFrame) + if !ok { + return fmt.Errorf("got a %T; want *HeadersFrame", f) + } + gotH := st.decodeHeader(hf.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got response headers %v; want %v", gotH, wantH) + } + return nil + } + checkData := func(f Frame, wantData string) error { + df, ok := f.(*DataFrame) + if !ok { + return fmt.Errorf("got a %T; want *DataFrame", f) + } + if gotData := string(df.Data()); gotData != wantData { + return fmt.Errorf("got response data %q; want %q", gotData, wantData) + } + return nil + } + + // Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA + // Stream 2 has HEADERS + DATA + // Stream 4 has HEADERS + expected := map[uint32][]func(Frame) error{ + 1: { + func(f Frame) error { + return checkPushPromise(f, 2, [][2]string{ + {":method", "GET"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?get"}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkPushPromise(f, 4, [][2]string{ + {":method", "HEAD"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?head"}, + {"cookie", cookie}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(mainBody))}, + }) + }, + func(f Frame) error { + return checkData(f, mainBody) + }, + }, + 2: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(pushedBody))}, + }) + }, + func(f Frame) error { + return checkData(f, pushedBody) + }, + }, + 4: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "204"}, + }) + }, + }, + } + + consumed := map[uint32]int{} + for k := 0; len(expected) > 0; k++ { + f, err := st.readFrame() + if err != nil { + for id, left := range expected { + t.Errorf("stream %d: missing %d frames", id, len(left)) + } + t.Fatalf("readFrame %d: %v", k, err) + } + id := f.Header().StreamID + label := fmt.Sprintf("stream %d, frame %d", id, consumed[id]) + if len(expected[id]) == 0 { + t.Fatalf("%s: unexpected frame %#+v", label, f) + } + check := expected[id][0] + expected[id] = expected[id][1:] + if len(expected[id]) == 0 { + delete(expected, id) + } + if err := check(f); err != nil { + t.Fatalf("%s: %v", label, err) + } + consumed[id]++ + } +} + +func TestServer_Push_SuccessNoRace(t *testing.T) { + // Regression test for issue #18326. Ensure the request handler can mutate + // pushed request headers without racing with the PUSH_PROMISE write. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + opt := &http.PushOptions{ + Header: http.Header{"User-Agent": {"testagent"}}, + } + if err := w.(http.Pusher).Push("/pushed", opt); err != nil { + errc <- fmt.Errorf("error pushing: %v", err) + return + } + w.WriteHeader(200) + errc <- nil + + case "/pushed": + // Update request header, ensure there is no race. + r.Header.Set("User-Agent", "newagent") + r.Header.Set("Cookie", "cookie") + w.WriteHeader(200) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + + // Send one request, which should push one response. + st.greet() + getSlash(st) + for k := 0; k < 2; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestServer_Push_RejectRecursivePush(t *testing.T) { + // Expect two requests, but might get three if there's a bug and the second push succeeds. + errc := make(chan error, 3) + handler := func(w http.ResponseWriter, r *http.Request) error { + baseURL := "https://" + r.Host + switch r.URL.Path { + case "/": + if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil { + return fmt.Errorf("first Push()=%v, want nil", err) + } + return nil + + case "/push1": + if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + + default: + return fmt.Errorf("unexpected path: %q", r.URL.Path) + } + } + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + getSlash(st) + if err := <-errc; err != nil { + t.Errorf("First request failed: %v", err) + } + if err := <-errc; err != nil { + t.Errorf("Second request failed: %v", err) + } +} + +func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) { + // Expect one request, but might get two if there's a bug and the push succeeds. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- doPush(w.(http.Pusher), r) + }) + defer st.Close() + st.greet() + if err := st.fr.WriteSettings(settings...); err != nil { + st.t.Fatalf("WriteSettings: %v", err) + } + st.wantSettingsAck() + getSlash(st) + if err := <-errc; err != nil { + t.Error(err) + } + // Should not get a PUSH_PROMISE frame. + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("stream should end after headers") + } +} + +func TestServer_Push_RejectIfDisabled(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingEnablePush, 0}) +} + +func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingMaxConcurrentStreams, 0}) +} + +func TestServer_Push_RejectWrongScheme(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL is http)") + } + return nil + }) +} + +func TestServer_Push_RejectMissingHost(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https:pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL missing host)") + } + return nil + }) +} + +func TestServer_Push_RejectRelativePath(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("../test", nil); err == nil { + return errors.New("Push() should have failed (push target is a relative path)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenMethod(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil { + return errors.New("Push() should have failed (cannot promise a POST)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenHeader(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + header := http.Header{ + "Content-Length": {"10"}, + "Content-Encoding": {"gzip"}, + "Trailer": {"Foo"}, + "Te": {"trailers"}, + "Host": {"test.com"}, + ":authority": {"test.com"}, + } + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil { + return errors.New("Push() should have failed (forbidden headers)") + } + return nil + }) +} + +func TestServer_Push_StateTransitions(t *testing.T) { + const body = "foo" + + gotPromise := make(chan bool) + finishedPush := make(chan bool) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + if err := w.(http.Pusher).Push("/pushed", nil); err != nil { + t.Errorf("Push error: %v", err) + } + // Don't finish this request until the push finishes so we don't + // nondeterministically interleave output frames with the push. + <-finishedPush + case "/pushed": + <-gotPromise + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(body))) + w.WriteHeader(200) + io.WriteString(w, body) + }) + defer st.Close() + + st.greet() + if st.stream(2) != nil { + t.Fatal("stream 2 should be empty") + } + if got, want := st.streamState(2), stateIdle; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + getSlash(st) + // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote. + st.wantPushPromise() + if got, want := st.streamState(2), stateHalfClosedRemote; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + // We stall the HTTP handler for "/pushed" until the above check. If we don't + // stall the handler, then the handler might write HEADERS and DATA and finish + // the stream before we check st.streamState(2) -- should that happen, we'll + // see stateClosed and fail the above check. + close(gotPromise) + st.wantHeaders() + if df := st.wantData(); !df.StreamEnded() { + t.Fatal("expected END_STREAM flag on DATA") + } + if got, want := st.streamState(2), stateClosed; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + close(finishedPush) +} + +func TestServer_Push_RejectAfterGoAway(t *testing.T) { + var readyOnce sync.Once + ready := make(chan struct{}) + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + select { + case <-ready: + case <-time.After(5 * time.Second): + errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed") + } + if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + errc <- fmt.Errorf("Push()=%v, want %v", got, want) + } + errc <- nil + }) + defer st.Close() + st.greet() + getSlash(st) + + // Send GOAWAY and wait for it to be processed. + st.fr.WriteGoAway(1, ErrCodeNo, nil) + go func() { + for { + select { + case <-ready: + return + default: + } + st.sc.serveMsgCh <- func(loopNum int) { + if !st.sc.pushEnabled { + readyOnce.Do(func() { close(ready) }) + } + } + } + }() + if err := <-errc; err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go new file mode 100644 index 0000000..c5d8459 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_test.go @@ -0,0 +1,3725 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered") + +func stderrv() io.Writer { + if *stderrVerbose { + return os.Stderr + } + + return ioutil.Discard +} + +type serverTester struct { + cc net.Conn // client conn + t testing.TB + ts *httptest.Server + fr *Framer + serverLogBuf bytes.Buffer // logger for httptest.Server + logFilter []string // substrings to filter out + scMu sync.Mutex // guards sc + sc *serverConn + hpackDec *hpack.Decoder + decodedHeaders [][2]string + + // If http2debug!=2, then we capture Frame debug logs that will be written + // to t.Log after a test fails. The read and write logs use separate locks + // and buffers so we don't accidentally introduce synchronization between + // the read and write goroutines, which may hide data races. + frameReadLogMu sync.Mutex + frameReadLogBuf bytes.Buffer + frameWriteLogMu sync.Mutex + frameWriteLogBuf bytes.Buffer + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder +} + +func init() { + testHookOnPanicMu = new(sync.Mutex) + goAwayTimeout = 25 * time.Millisecond +} + +func resetHooks() { + testHookOnPanicMu.Lock() + testHookOnPanic = nil + testHookOnPanicMu.Unlock() +} + +type serverTesterOpt string + +var optOnlyServer = serverTesterOpt("only_server") +var optQuiet = serverTesterOpt("quiet_logging") +var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames") + +func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { + resetHooks() + + ts := httptest.NewUnstartedServer(handler) + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{NextProtoTLS}, + } + + var onlyServer, quiet, framerReuseFrames bool + h2server := new(Server) + for _, opt := range opts { + switch v := opt.(type) { + case func(*tls.Config): + v(tlsConfig) + case func(*httptest.Server): + v(ts) + case func(*Server): + v(h2server) + case serverTesterOpt: + switch v { + case optOnlyServer: + onlyServer = true + case optQuiet: + quiet = true + case optFramerReuseFrames: + framerReuseFrames = true + } + case func(net.Conn, http.ConnState): + ts.Config.ConnState = v + default: + t.Fatalf("unknown newServerTester option type %T", v) + } + } + + ConfigureServer(ts.Config, h2server) + + st := &serverTester{ + t: t, + ts: ts, + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField) + + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + if quiet { + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + } else { + ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags) + } + ts.StartTLS() + + if VerboseLogs { + t.Logf("Running test server at: %s", ts.URL) + } + testHookGetServerConn = func(v *serverConn) { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc = v + } + log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st})) + if !onlyServer { + cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + st.cc = cc + st.fr = NewFramer(cc, cc) + if framerReuseFrames { + st.fr.SetReuseFrames() + } + if !logFrameReads && !logFrameWrites { + st.fr.debugReadLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameReadLogMu.Lock() + fmt.Fprintf(&st.frameReadLogBuf, m, v...) + st.frameReadLogMu.Unlock() + } + st.fr.debugWriteLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameWriteLogMu.Lock() + fmt.Fprintf(&st.frameWriteLogBuf, m, v...) + st.frameWriteLogMu.Unlock() + } + st.fr.logReads = true + st.fr.logWrites = true + } + } + return st +} + +func (st *serverTester) closeConn() { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc.conn.Close() +} + +func (st *serverTester) addLogFilter(phrase string) { + st.logFilter = append(st.logFilter, phrase) +} + +func (st *serverTester) stream(id uint32) *stream { + ch := make(chan *stream, 1) + st.sc.serveMsgCh <- func(int) { + ch <- st.sc.streams[id] + } + return <-ch +} + +func (st *serverTester) streamState(id uint32) streamState { + ch := make(chan streamState, 1) + st.sc.serveMsgCh <- func(int) { + state, _ := st.sc.state(id) + ch <- state + } + return <-ch +} + +// loopNum reports how many times this conn's select loop has gone around. +func (st *serverTester) loopNum() int { + lastc := make(chan int, 1) + st.sc.serveMsgCh <- func(loopNum int) { + lastc <- loopNum + } + return <-lastc +} + +// awaitIdle heuristically awaits for the server conn's select loop to be idle. +// The heuristic is that the server connection's serve loop must schedule +// 50 times in a row without any channel sends or receives occurring. +func (st *serverTester) awaitIdle() { + remain := 50 + last := st.loopNum() + for remain > 0 { + n := st.loopNum() + if n == last+1 { + remain-- + } else { + remain = 50 + } + last = n + } +} + +func (st *serverTester) Close() { + if st.t.Failed() { + st.frameReadLogMu.Lock() + if st.frameReadLogBuf.Len() > 0 { + st.t.Logf("Framer read log:\n%s", st.frameReadLogBuf.String()) + } + st.frameReadLogMu.Unlock() + + st.frameWriteLogMu.Lock() + if st.frameWriteLogBuf.Len() > 0 { + st.t.Logf("Framer write log:\n%s", st.frameWriteLogBuf.String()) + } + st.frameWriteLogMu.Unlock() + + // If we failed already (and are likely in a Fatal, + // unwindowing), force close the connection, so the + // httptest.Server doesn't wait forever for the conn + // to close. + if st.cc != nil { + st.cc.Close() + } + } + st.ts.Close() + if st.cc != nil { + st.cc.Close() + } + log.SetOutput(os.Stderr) +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.greetAndCheckSettings(func(Setting) error { return nil }) +} + +func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) { + st.writePreface() + st.writeInitialSettings() + st.wantSettings().ForeachSetting(checkSetting) + st.writeSettingsAck() + + // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order. + var gotSettingsAck bool + var gotWindowUpdate bool + + for i := 0; i < 2; i++ { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *SettingsFrame: + if !f.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } + gotSettingsAck = true + + case *WindowUpdateFrame: + if f.FrameHeader.StreamID != 0 { + st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID) + } + incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize) + if f.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr) + } + gotWindowUpdate = true + + default: + st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f) + } + } + + if !gotSettingsAck { + st.t.Fatalf("Didn't get a settings ACK") + } + if !gotWindowUpdate { + st.t.Fatalf("Didn't get a window update") + } +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write(clientPreface) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(clientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) writeHeaders(p HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) writePriority(id uint32, p PriorityParam) { + if err := st.fr.WritePriority(id, p); err != nil { + st.t.Fatalf("Error writing PRIORITY: %v", err) + } +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeaderRaw is the magic-free version of encodeHeader. +// It takes 0 or more (k, v) pairs and encodes them. +func (st *serverTester) encodeHeaderRaw(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + st.headerBuf.Reset() + for len(headers) > 0 { + k, v := headers[0], headers[1] + st.encodeHeaderField(k, v) + headers = headers[2:] + } + return st.headerBuf.Bytes() +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. The :authority header +// defaults to st.ts.Listener.Addr(). +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + defaultAuthority := st.ts.Listener.Addr().String() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":scheme", ":authority", ":path"} + vals := map[string][]string{ + ":method": {"GET"}, + ":scheme": {"https"}, + ":authority": {defaultAuthority}, + ":path": {"/"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set. +func (st *serverTester) bodylessReq1(headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) { + if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) { + ch := make(chan interface{}, 1) + go func() { + fr, err := fr.ReadFrame() + if err != nil { + ch <- err + } else { + ch <- fr + } + }() + t := time.NewTimer(wait) + select { + case v := <-ch: + t.Stop() + if fr, ok := v.(Frame); ok { + return fr, nil + } + return nil, v.(error) + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +func (st *serverTester) readFrame() (Frame, error) { + return readFrameTimeout(st.fr, 2*time.Second) +} + +func (st *serverTester) wantHeaders() *HeadersFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) + } + hf, ok := f.(*HeadersFrame) + if !ok { + st.t.Fatalf("got a %T; want *HeadersFrame", f) + } + return hf +} + +func (st *serverTester) wantContinuation() *ContinuationFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) + } + cf, ok := f.(*ContinuationFrame) + if !ok { + st.t.Fatalf("got a %T; want *ContinuationFrame", f) + } + return cf +} + +func (st *serverTester) wantData() *DataFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a DATA frame: %v", err) + } + df, ok := f.(*DataFrame) + if !ok { + st.t.Fatalf("got a %T; want *DataFrame", f) + } + return df +} + +func (st *serverTester) wantSettings() *SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantPing() *PingFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a PING frame: %v", err) + } + pf, ok := f.(*PingFrame) + if !ok { + st.t.Fatalf("got a %T; want *PingFrame", f) + } + return pf +} + +func (st *serverTester) wantGoAway() *GoAwayFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) + } + gf, ok := f.(*GoAwayFrame) + if !ok { + st.t.Fatalf("got a %T; want *GoAwayFrame", f) + } + return gf +} + +func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) + } + rs, ok := f.(*RSTStreamFrame) + if !ok { + st.t.Fatalf("got a %T; want *RSTStreamFrame", f) + } + if rs.FrameHeader.StreamID != streamID { + st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) + } + if rs.ErrCode != errCode { + st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) + } +} + +func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) + } + wu, ok := f.(*WindowUpdateFrame) + if !ok { + st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) + } + if wu.FrameHeader.StreamID != streamID { + st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) + } + if wu.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) + } +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } +} + +func (st *serverTester) wantPushPromise() *PushPromiseFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + ppf, ok := f.(*PushPromiseFrame) + if !ok { + st.t.Fatalf("Wanted PushPromise, received %T", ppf) + } + return ppf +} + +func TestServer(t *testing.T) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + gotReq <- true + }) + defer st.Close() + + covers("3.5", ` + The server connection preface consists of a potentially empty + SETTINGS frame ([SETTINGS]) that MUST be the first frame the + server sends in the HTTP/2 connection. + `) + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func TestServer_Request_Get(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("foo-bar", "some-value"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "GET" { + t.Errorf("Method = %q; want GET", r.Method) + } + if r.URL.Path != "/" { + t.Errorf("URL.Path = %q; want /", r.URL.Path) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if r.Close { + t.Error("Close = true; want false") + } + if !strings.Contains(r.RemoteAddr, ":") { + t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr) + } + if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 { + t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor) + } + wantHeader := http.Header{ + "Foo-Bar": []string{"some-value"}, + } + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Get_PathSlashes(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":path", "/%2f/"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.RequestURI != "/%2f/" { + t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI) + } + if r.URL.Path != "///" { + t.Errorf("URL.Path = %q; want ///", r.URL.Path) + } + }) +} + +// TODO: add a test with EndStream=true on the HEADERS but setting a +// Content-Length anyway. Should we just omit it and force it to +// zero? + +func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) { + testBodyContents(t, -1, "", func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, nil) // just kidding. empty body. + }) +} + +func TestServer_Request_Post_Body_OneData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_TwoData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, false, []byte(content[:5])) + st.writeData(1, true, []byte(content[5:])) + }) +} + +func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) { + const content = "Some content" + testBodyContents(t, int64(len(content)), content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", strconv.Itoa(len(content)), + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) { + testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "3", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12")) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) { + testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "4", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12345")) + }) +} + +func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(all) != wantBody { + t.Errorf("Read = %q; want %q", all, wantBody) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err == nil { + t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", + wantReadError, all) + } + if !strings.Contains(err.Error(), wantReadError) { + t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +// Using a Host header, instead of :authority +func TestServer_Request_Get_Host(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", "", "host", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +// Using an :authority pseudo-header, instead of Host +func TestServer_Request_Get_Authority(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +func TestServer_Request_WithContinuation(t *testing.T) { + wantHeader := http.Header{ + "Foo-One": []string{"value-one"}, + "Foo-Two": []string{"value-two"}, + "Foo-Three": []string{"value-three"}, + } + testServerRequest(t, func(st *serverTester) { + fullHeaders := st.encodeHeader( + "foo-one", "value-one", + "foo-two", "value-two", + "foo-three", "value-three", + ) + remain := fullHeaders + chunks := 0 + for len(remain) > 0 { + const maxChunkSize = 5 + chunk := remain + if len(chunk) > maxChunkSize { + chunk = chunk[:maxChunkSize] + } + remain = remain[len(chunk):] + + if chunks == 0 { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: chunk, + EndStream: true, // no DATA frames + EndHeaders: false, // we'll have continuation frames + }) + } else { + err := st.fr.WriteContinuation(1, len(remain) == 0, chunk) + if err != nil { + t.Fatal(err) + } + } + chunks++ + } + if chunks < 2 { + t.Fatal("too few chunks") + } + }, func(r *http.Request) { + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + }) +} + +// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field") +func TestServer_Request_CookieConcat(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.bodylessReq1( + ":authority", host, + "cookie", "a=b", + "cookie", "c=d", + "cookie", "e=f", + ) + }, func(r *http.Request) { + const want = "a=b; c=d; e=f" + if got := r.Header.Get("Cookie"); got != want { + t.Errorf("Cookie = %q; want %q", got, want) + } + }) +} + +func TestServer_Request_Reject_CapitalHeader(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") }) +} + +func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") }) +} + +func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") }) +} + +func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid value" ... + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("duplicate pseudo-header") + st.bodylessReq1(":method", "GET", ":method", "POST") + }) +} + +func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All pseudo-header fields MUST appear in the header block + // before regular header fields. Any request or response that + // contains a pseudo-header field that appears in a header + // block after a regular header field MUST be treated as + // malformed (Section 8.1.2.6)." + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("pseudo-header after regular header") + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"}) + enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"}) + enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"}) + enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"}) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") }) +} + +func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") }) +} + +func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) + st.bodylessReq1(":unknown_thing", "") + }) +} + +func testRejectRequest(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }) + defer st.Close() + + st.greet() + send(st) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }, optQuiet) + defer st.Close() + + st.greet() + send(st) + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeProtocol { + t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol) + } +} + +// Section 5.1, on idle connections: "Receiving any frame other than +// HEADERS or PRIORITY on a stream in this state MUST be treated as a +// connection error (Section 5.4.1) of type PROTOCOL_ERROR." +func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteWindowUpdate(123, 456) + }) +} +func TestRejectFrameOnIdle_Data(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteData(123, true, nil) + }) +} +func TestRejectFrameOnIdle_RSTStream(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteRSTStream(123, ErrCodeCancel) + }) +} + +func TestServer_Request_Connect(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if g, w := r.Method, "CONNECT"; g != w { + t.Errorf("Method = %q; want %q", g, w) + } + if g, w := r.RequestURI, "example.com:123"; g != w { + t.Errorf("RequestURI = %q; want %q", g, w) + } + if g, w := r.URL.Host, "example.com:123"; g != w { + t.Errorf("URL.Host = %q; want %q", g, w) + } + }) +} + +func TestServer_Request_Connect_InvalidPath(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":path", "/bogus", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Connect_InvalidScheme(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":scheme", "https", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Ping(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Server should ignore this one, since it has ACK set. + ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + if err := st.fr.WritePing(true, ackPingData); err != nil { + t.Fatal(err) + } + + // But the server should reply to this one, since ACK is false. + pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} + if err := st.fr.WritePing(false, pingData); err != nil { + t.Fatal(err) + } + + pf := st.wantPing() + if !pf.Flags.Has(FlagPingAck) { + t.Error("response ping doesn't have ACK set") + } + if pf.Data != pingData { + t.Errorf("response ping has data %q; want %q", pf.Data, pingData) + } +} + +func TestServer_RejectsLargeFrames(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("see golang.org/issue/13434") + } + + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Write too large of a frame (too large by one byte) + // We ignore the return value because it's expected that the server + // will only read the first 9 bytes (the headre) and then disconnect. + st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) + + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFrameSize { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) + } + if st.serverLogBuf.Len() != 0 { + // Previously we spun here for a bit until the GOAWAY disconnect + // timer fired, logging while we fired. + t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes()) + } +} + +func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // data coming + EndHeaders: true, + }) + st.writeData(1, false, []byte("abcdef")) + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + st.writeData(1, true, []byte("ghijkl")) // END_STREAM here + puppet.do(readBodyHandler(t, "ghi")) + puppet.do(readBodyHandler(t, "jkl")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM +} + +// the version of the TestServer_Handler_Sends_WindowUpdate with padding. +// See golang.org/issue/16556 +func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0}) + + // Expect to immediately get our 5 bytes of padding back for + // both the connection and stream (4 bytes of padding + 1 byte of length) + st.wantWindowUpdate(0, 5) + st.wantWindowUpdate(1, 5) + + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) +} + +func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { + t.Fatal(err) + } + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFlowControl { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) + } + if gf.LastStreamID != 0 { + t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) + } +} + +func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { + inHandler := make(chan bool) + blockHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-blockHandler + }) + defer st.Close() + defer close(blockHandler) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + // Send a bogus window update: + if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil { + t.Fatal(err) + } + st.wantRSTStream(1, ErrCodeFlowControl) +} + +// testServerPostUnblock sends a hanging POST with unsent data to handler, +// then runs fn once in the handler, and verifies that the error returned from +// handler is acceptable. It fails if takes over 5 seconds for handler to exit. +func testServerPostUnblock(t *testing.T, + handler func(http.ResponseWriter, *http.Request) error, + fn func(*serverTester), + checkErr func(error), + otherHeaders ...string) { + inHandler := make(chan bool) + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + fn(st) + select { + case err := <-errc: + if checkErr != nil { + checkErr(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Handler to return") + } +} + +func TestServer_RSTStream_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, + func(err error) { + want := StreamError{StreamID: 0x1, Code: 0x8} + if !reflect.DeepEqual(err, want) { + t.Errorf("Read error = %v; want %v", err, want) + } + }, + ) +} + +func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + // Run this test a bunch, because it doesn't always + // deadlock. But with a bunch, it did. + n := 50 + if testing.Short() { + n = 5 + } + for i := 0; i < n; i++ { + testServer_RSTStream_Unblocks_Header_Write(t) + } +} + +func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + inHandler := make(chan bool, 1) + unblockHandler := make(chan bool, 1) + headerWritten := make(chan bool, 1) + wroteRST := make(chan bool, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-wroteRST + w.Header().Set("foo", "bar") + w.WriteHeader(200) + w.(http.Flusher).Flush() + headerWritten <- true + <-unblockHandler + }) + defer st.Close() + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + wroteRST <- true + st.awaitIdle() + select { + case <-headerWritten: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for header write") + } + unblockHandler <- true +} + +func TestServer_DeadConn_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { st.cc.Close() }, + func(err error) { + if err == nil { + t.Error("unexpected nil error from Request.Body.Read") + } + }, + ) +} + +var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error { + <-w.(http.CloseNotifier).CloseNotify() + return nil +} + +func TestServer_CloseNotify_After_RSTStream(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, nil) +} + +func TestServer_CloseNotify_After_ConnClose(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil) +} + +// that CloseNotify unblocks after a stream error due to the client's +// problem that's unrelated to them explicitly canceling it (which is +// TestServer_CloseNotify_After_RSTStream above) +func TestServer_CloseNotify_After_StreamError(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + // data longer than declared Content-Length => stream error + st.writeData(1, true, []byte("1234")) + }, nil, "content-length", "3") +} + +func TestServer_StateTransitions(t *testing.T) { + var st *serverTester + inHandler := make(chan bool) + writeData := make(chan bool) + leaveHandler := make(chan bool) + st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + if st.stream(1) == nil { + t.Errorf("nil stream 1 in handler") + } + if got, want := st.streamState(1), stateOpen; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + writeData <- true + if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF { + t.Errorf("body read = %d, %v; want 0, EOF", n, err) + } + if got, want := st.streamState(1), stateHalfClosedRemote; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + + <-leaveHandler + }) + st.greet() + if st.stream(1) != nil { + t.Fatal("stream 1 should be empty") + } + if got := st.streamState(1); got != stateIdle { + t.Fatalf("stream 1 should be idle; got %v", got) + } + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + <-writeData + st.writeData(1, true, nil) + + leaveHandler <- true + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("expected END_STREAM flag") + } + + if got, want := st.streamState(1), stateClosed; got != want { + t.Errorf("at end, state is %v; want %v", got, want) + } + if st.stream(1) != nil { + t.Fatal("at end, stream 1 should be gone") + } +} + +// test HEADERS w/o EndHeaders + another HEADERS (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + st.writeHeaders(HeadersFrameParam{ // Not a continuation. + StreamID: 3, // different stream. + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// test HEADERS w/o EndHeaders + PING (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WritePing(false, [8]byte{}); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) +func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID +func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// No HEADERS on stream 0. +func TestServer_Rejects_Headers0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 0, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// No CONTINUATION on stream 0. +func TestServer_Rejects_Continuation0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { + t.Fatal(err) + } + }) +} + +// No PRIORITY on stream 0. +func TestServer_Rejects_Priority0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(0, PriorityParam{StreamDep: 1}) + }) +} + +// No HEADERS frame with a self-dependence. +func TestServer_Rejects_HeadersSelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + Priority: PriorityParam{StreamDep: 1}, + }) + }) +} + +// No PRIORTY frame with a self-dependence. +func TestServer_Rejects_PrioritySelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(1, PriorityParam{StreamDep: 1}) + }) +} + +func TestServer_Rejects_PushPromise(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + pp := PushPromiseParam{ + StreamID: 1, + PromiseID: 3, + } + if err := st.fr.WritePushPromise(pp); err != nil { + t.Fatal(err) + } + }) +} + +// testServerRejectsConn tests that the server hangs up with a GOAWAY +// frame and a server close after the client does something +// deserving a CONNECTION_ERROR. +func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + st.addLogFilter("connection error: PROTOCOL_ERROR") + defer st.Close() + st.greet() + writeReq(st) + + st.wantGoAway() + errc := make(chan error, 1) + go func() { + fr, err := st.fr.ReadFrame() + if err == nil { + err = fmt.Errorf("got frame of type %T", fr) + } + errc <- err + }() + select { + case err := <-errc: + if err != io.EOF { + t.Errorf("ReadFrame = %v; want io.EOF", err) + } + case <-time.After(2 * time.Second): + t.Error("timeout waiting for disconnect") + } +} + +// testServerRejectsStream tests that the server sends a RST_STREAM with the provided +// error code after a client sends a bogus request. +func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + st.greet() + writeReq(st) + st.wantRSTStream(1, code) +} + +// testServerRequest sets up an idle HTTP/2 connection and lets you +// write a single request with writeReq, and then verify that the +// *http.Request is built correctly in checkReq. +func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + checkReq(r) + gotReq <- true + }) + defer st.Close() + + st.greet() + writeReq(st) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func getSlash(st *serverTester) { st.bodylessReq1() } + +func TestServer_Response_NoData(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // Nothing. + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + }) +} + +func TestServer_Response_NoData_Header_FooBar(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Foo-Bar", "some-value") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo-bar", "some-value"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "foo/bar") + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "foo/bar"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_TransferEncoding_chunked(t *testing.T) { + const msg = "hi" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Transfer-Encoding", "chunked") // should be stripped + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed only after the initial write. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed before the initial write and later mutated. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("foo", "proper value") + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "proper value"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_SniffLenType(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { + const msg = "this is HTML" + const msg2 = ", and this is the next chunk" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg2) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + { + df := st.wantData() + if df.StreamEnded() { + t.Error("unexpected END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + } + { + df := st.wantData() + if !df.StreamEnded() { + t.Error("wanted END_STREAM flag on last data chunk") + } + if got := string(df.Data()); got != msg2 { + t.Errorf("got DATA %q; want %q", got, msg2) + } + } + }) +} + +func TestServer_Response_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + // Give the handler quota to write: + if err := st.fr.WriteWindowUpdate(1, size); err != nil { + t.Fatal(err) + } + // Give the handler quota to write to connection-level + // window as well + if err := st.fr.WriteWindowUpdate(0, size); err != nil { + t.Fatal(err) + } + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + var bytes, frames int + for { + df := st.wantData() + bytes += len(df.Data()) + frames++ + for _, b := range df.Data() { + if b != 'a' { + t.Fatal("non-'a' byte seen in DATA") + } + } + if df.StreamEnded() { + break + } + } + if bytes != size { + t.Errorf("Got %d bytes; want %d", bytes, size) + } + if want := int(size / maxFrameSize); frames < want || frames > want*2 { + t.Errorf("Got %d frames; want %d", frames, size) + } + }) +} + +// Test that the handler can't write more than the client allows +func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { + // Make these reads. Before each read, the client adds exactly enough + // flow-control to satisfy the read. Numbers chosen arbitrarily. + reads := []int{123, 1, 13, 127} + size := 0 + for _, n := range reads { + size += n + } + + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + // Set the window size to something explicit for this test. + // It's also how much initial data we expect. + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != reads[0] { + t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got) + } + + for _, quota := range reads[1:] { + if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { + t.Fatal(err) + } + df := st.wantData() + if int(quota) != len(df.Data()) { + t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) + } + } + }) +} + +// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM. +func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + errc := make(chan error, 1) + go func() { + _, err := w.Write(bytes.Repeat([]byte("a"), size)) + errc <- err + }() + select { + case err := <-errc: + if err == nil { + return errors.New("unexpected nil error from Write in handler") + } + return nil + case <-time.After(2 * time.Second): + return errors.New("timeout waiting for Write in handler") + } + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }) +} + +func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + // Nothing; send empty DATA + return nil + }, func(st *serverTester) { + // Handler gets no data quota: + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != 0 { + t.Fatalf("unexpected %d DATA bytes; want 0", got) + } + if !df.StreamEnded() { + t.Fatal("DATA didn't have END_STREAM") + } + }) +} + +func TestServer_Response_Automatic100Continue(t *testing.T) { + const msg = "foo" + const reply = "bar" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + if v := r.Header.Get("Expect"); v != "" { + t.Errorf("Expect header = %q; want empty", v) + } + buf := make([]byte, len(msg)) + // This read should trigger the 100-continue being sent. + if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg { + return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg) + } + _, err := io.WriteString(w, reply) + return err + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "100"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Fatalf("Got headers %v; want %v", goth, wanth) + } + + // Okay, they sent status 100, so we can send our + // gigantic and/or sensitive "foo" payload now. + st.writeData(1, true, []byte(msg)) + + st.wantWindowUpdate(0, uint32(len(msg))) + + hf = st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("expected data to follow") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth = st.decodeHeader(hf.HeaderBlockFragment()) + wanth = [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(reply))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + + df := st.wantData() + if string(df.Data()) != reply { + t.Errorf("Client read %q; want %q", df.Data(), reply) + } + if !df.StreamEnded() { + t.Errorf("expect data stream end") + } + }) +} + +func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { + errc := make(chan error, 1) + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + p := []byte("some data.\n") + for { + _, err := w.Write(p) + if err != nil { + errc <- err + return nil + } + } + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + // Close the connection and wait for the handler to (hopefully) notice. + st.cc.Close() + select { + case <-errc: + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_Too_Many_Streams(t *testing.T) { + const testPath = "/some/path" + + inHandler := make(chan uint32) + leaveHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + id := w.(*responseWriter).rws.stream.id + inHandler <- id + if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath { + t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath) + } + <-leaveHandler + }) + defer st.Close() + st.greet() + nextStreamID := uint32(1) + streamID := func() uint32 { + defer func() { nextStreamID += 2 }() + return nextStreamID + } + sendReq := func(id uint32, headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) + } + for i := 0; i < defaultMaxStreams; i++ { + sendReq(streamID()) + <-inHandler + } + defer func() { + for i := 0; i < defaultMaxStreams; i++ { + leaveHandler <- true + } + }() + + // And this one should cross the limit: + // (It's also sent as a CONTINUATION, to verify we still track the decoder context, + // even if we're rejecting it) + rejectID := streamID() + headerBlock := st.encodeHeader(":path", testPath) + frag1, frag2 := headerBlock[:3], headerBlock[3:] + st.writeHeaders(HeadersFrameParam{ + StreamID: rejectID, + BlockFragment: frag1, + EndStream: true, + EndHeaders: false, // CONTINUATION coming + }) + if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { + t.Fatal(err) + } + st.wantRSTStream(rejectID, ErrCodeProtocol) + + // But let a handler finish: + leaveHandler <- true + st.wantHeaders() + + // And now another stream should be able to start: + goodID := streamID() + sendReq(goodID, ":path", testPath) + select { + case got := <-inHandler: + if got != goodID { + t.Errorf("Got stream %d; want %d", got, goodID) + } + case <-time.After(3 * time.Second): + t.Error("timeout waiting for handler") + } +} + +// So many response headers that the server needs to use CONTINUATION frames: +func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + h := w.Header() + for i := 0; i < 5000; i++ { + h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i)) + } + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.HeadersEnded() { + t.Fatal("got unwanted END_HEADERS flag") + } + n := 0 + for { + n++ + cf := st.wantContinuation() + if cf.HeadersEnded() { + break + } + } + if n < 5 { + t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n) + } + }) +} + +// This previously crashed (reported by Mathieu Lonjaret as observed +// while using Camlistore) because we got a DATA frame from the client +// after the handler exited and our logic at the time was wrong, +// keeping a stream in the map in stateClosed, which tickled an +// invariant check later when we tried to remove that stream (via +// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop +// ended. +func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // nothing + return nil + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, // DATA is coming + EndHeaders: true, + }) + hf := st.wantHeaders() + if !hf.HeadersEnded() || !hf.StreamEnded() { + t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) + } + + // Sent when the a Handler closes while a client has + // indicated it's still sending DATA: + st.wantRSTStream(1, ErrCodeNo) + + // Now the handler has ended, so it's ended its + // stream, but the client hasn't closed its side + // (stateClosedLocal). So send more data and verify + // it doesn't crash with an internal invariant panic, like + // it did before. + st.writeData(1, true, []byte("foo")) + + // Get our flow control bytes back, since the handler didn't get them. + st.wantWindowUpdate(0, uint32(len("foo"))) + + // Sent after a peer sends data anyway (admittedly the + // previous RST_STREAM might've still been in-flight), + // but they'll get the more friendly 'cancel' code + // first. + st.wantRSTStream(1, ErrCodeStreamClosed) + + // Set up a bunch of machinery to record the panic we saw + // previously. + var ( + panMu sync.Mutex + panicVal interface{} + ) + + testHookOnPanicMu.Lock() + testHookOnPanic = func(sc *serverConn, pv interface{}) bool { + panMu.Lock() + panicVal = pv + panMu.Unlock() + return true + } + testHookOnPanicMu.Unlock() + + // Now force the serve loop to end, via closing the connection. + st.cc.Close() + select { + case <-st.sc.doneServing: + // Loop has exited. + panMu.Lock() + got := panicVal + panMu.Unlock() + if got != nil { + t.Errorf("Got panic: %v", got) + } + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } +func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } + +func testRejectTLS(t *testing.T, max uint16) { + st := newServerTester(t, nil, func(c *tls.Config) { + c.MaxVersion = max + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Rejects_TLSBadCipher(t *testing.T) { + st := newServerTester(t, nil, func(c *tls.Config) { + // Only list bad ones: + c.CipherSuites = []uint16{ + tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + } + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Advertises_Common_Cipher(t *testing.T) { + const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + st := newServerTester(t, nil, func(c *tls.Config) { + // Have the client only support the one required by the spec. + c.CipherSuites = []uint16{requiredSuite} + }, func(ts *httptest.Server) { + var srv *http.Server = ts.Config + // Have the server configured with no specific cipher suites. + // This tests that Go's defaults include the required one. + srv.TLSConfig = nil + }) + defer st.Close() + st.greet() +} + +func (st *serverTester) onHeaderField(f hpack.HeaderField) { + if f.Name == "date" { + return + } + st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value}) +} + +func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) { + st.decodedHeaders = nil + if _, err := st.hpackDec.Write(headerBlock); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + if err := st.hpackDec.Close(); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + return st.decodedHeaders +} + +// testServerResponse sets up an idle HTTP/2 connection. The client function should +// write a single request that must be handled by the handler. This waits up to 5s +// for client to return, then up to an additional 2s for the handler to return. +func testServerResponse(t testing.TB, + handler func(http.ResponseWriter, *http.Request) error, + client func(*serverTester), +) { + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + errc <- handler(w, r) + }) + defer st.Close() + + donec := make(chan bool) + go func() { + defer close(donec) + st.greet() + client(st) + }() + + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout in client") + } + + select { + case err := <-errc: + if err != nil { + t.Fatalf("Error in handler: %v", err) + } + case <-time.After(2 * time.Second): + t.Fatal("timeout in handler") + } +} + +// readBodyHandler returns an http Handler func that reads len(want) +// bytes from r.Body and fails t if the contents read were not +// the value of want. +func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + buf := make([]byte, len(want)) + _, err := io.ReadFull(r.Body, buf) + if err != nil { + t.Error(err) + return + } + if string(buf) != want { + t.Errorf("read %q; want %q", buf, want) + } + } +} + +// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See: +// https://github.com/tatsuhiro-t/nghttp2/issues/140 & +// http://sourceforge.net/p/curl/bugs/1472/ +func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) } +func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) } + +func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) { + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + if testing.Short() { + t.Skip("skipping curl test in short mode") + } + requireCurl(t) + var gotConn int32 + testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) } + + const msg = "Hello from curl!\n" + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + w.Header().Set("Client-Proto", r.Proto) + io.WriteString(w, msg) + })) + ConfigureServer(ts.Config, &Server{ + PermitProhibitedCipherSuites: permitProhibitedCipherSuites, + }) + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + ts.StartTLS() + defer ts.Close() + + t.Logf("Running test server for curl to hit at: %s", ts.URL) + container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL) + defer kill(container) + resc := make(chan interface{}, 1) + go func() { + res, err := dockerLogs(container) + if err != nil { + resc <- err + } else { + resc <- res + } + }() + select { + case res := <-resc: + if err, ok := res.(error); ok { + t.Fatal(err) + } + body := string(res.([]byte)) + // Search for both "key: value" and "key:value", since curl changed their format + // Our Dockerfile contains the latest version (no space), but just in case people + // didn't rebuild, check both. + if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") { + t.Errorf("didn't see foo: Bar header") + t.Logf("Got: %s", body) + } + if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") { + t.Errorf("didn't see client-proto: HTTP/2 header") + t.Logf("Got: %s", res) + } + if !strings.Contains(string(res.([]byte)), msg) { + t.Errorf("didn't see %q content", msg) + t.Logf("Got: %s", res) + } + case <-time.After(3 * time.Second): + t.Errorf("timeout waiting for curl") + } + + if atomic.LoadInt32(&gotConn) == 0 { + t.Error("never saw an http2 connection") + } +} + +var doh2load = flag.Bool("h2load", false, "Run h2load test") + +func TestServerWithH2Load(t *testing.T) { + if !*doh2load { + t.Skip("Skipping without --h2load flag.") + } + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + requireH2load(t) + + msg := strings.Repeat("Hello, h2load!\n", 5000) + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg) + })) + ts.StartTLS() + defer ts.Close() + + cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl", + "-n100000", "-c100", "-m100", ts.URL) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + t.Fatal(err) + } +} + +// Issue 12843 +func TestServerDoS_MaxHeaderListSize(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + + // shake hands + frameSize := defaultMaxReadFrameSize + var advHeaderListSize *uint32 + st.greetAndCheckSettings(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + if s.Val < minMaxFrameSize { + frameSize = minMaxFrameSize + } else if s.Val > maxFrameSize { + frameSize = maxFrameSize + } else { + frameSize = int(s.Val) + } + case SettingMaxHeaderListSize: + advHeaderListSize = &s.Val + } + return nil + }) + + if advHeaderListSize == nil { + t.Errorf("server didn't advertise a max header list size") + } else if *advHeaderListSize == 0 { + t.Errorf("server advertised a max header list size of 0") + } + + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + cookie := strings.Repeat("*", 4058) + st.encodeHeaderField("cookie", cookie) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.headerBuf.Bytes(), + EndStream: true, + EndHeaders: false, + }) + + // Capture the short encoding of a duplicate ~4K cookie, now + // that we've already sent it once. + st.headerBuf.Reset() + st.encodeHeaderField("cookie", cookie) + + // Now send 1MB of it. + const size = 1 << 20 + b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len()) + for len(b) > 0 { + chunk := b + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + b = b[len(chunk):] + st.fr.WriteContinuation(1, len(b) == 0, chunk) + } + + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func TestCompressionErrorOnWrite(t *testing.T) { + const maxStrLen = 8 << 10 + var serverConfig *http.Server + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }, func(ts *httptest.Server) { + serverConfig = ts.Config + serverConfig.MaxHeaderBytes = maxStrLen + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + maxAllowed := st.sc.framer.maxHeaderStringLen() + + // Crank this up, now that we have a conn connected with the + // hpack.Decoder's max string length set has been initialized + // from the earlier low ~8K value. We want this higher so don't + // hit the max header list size. We only want to test hitting + // the max string size. + serverConfig.MaxHeaderBytes = 1 << 20 + + // First a request with a header that's exactly the max allowed size + // for the hpack compression. It's still too long for the header list + // size, so we'll get the 431 error, but that keeps the compression + // context still valid. + hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed)) + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } + df := st.wantData() + if !strings.Contains(string(df.Data()), "HTTP Error 431") { + t.Errorf("Unexpected data body: %q", df.Data()) + } + if !df.StreamEnded() { + t.Fatalf("expect data stream end") + } + + // And now send one that's just one byte too big. + hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 3, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +func TestCompressionErrorOnClose(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + hbf := st.encodeHeader("foo", "bar") + hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails. + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +// test that a server handler can read trailers from a client +func TestServerReadsTrailers(t *testing.T) { + const testBody = "some test body" + writeReq := func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(1, false, []byte(testBody)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeaderRaw( + "foo", "foov", + "bar", "barv", + "baz", "bazv", + "surprise", "wasn't declared; shouldn't show up", + ), + EndStream: true, + EndHeaders: true, + }) + } + checkReq := func(r *http.Request) { + wantTrailer := http.Header{ + "Foo": nil, + "Bar": nil, + "Baz": nil, + } + if !reflect.DeepEqual(r.Trailer, wantTrailer) { + t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer) + } + slurp, err := ioutil.ReadAll(r.Body) + if string(slurp) != testBody { + t.Errorf("read body %q; want %q", slurp, testBody) + } + if err != nil { + t.Fatalf("Body slurp: %v", err) + } + wantTrailerAfter := http.Header{ + "Foo": {"foov"}, + "Bar": {"barv"}, + "Baz": {"bazv"}, + } + if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) { + t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter) + } + } + testServerRequest(t, writeReq, checkReq) +} + +// test that a server handler can send trailers +func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) } +func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) } + +func testServerWritesTrailers(t *testing.T, withFlush bool) { + // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") + w.Header().Add("Trailer", "Server-Trailer-C") + w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered + + // Regular headers: + w.Header().Set("Foo", "Bar") + w.Header().Set("Content-Length", "5") // len("Hello") + + io.WriteString(w, "Hello") + if withFlush { + w.(http.Flusher).Flush() + } + w.Header().Set("Server-Trailer-A", "valuea") + w.Header().Set("Server-Trailer-C", "valuec") // skipping B + // After a flush, random keys like Server-Surprise shouldn't show up: + w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!") + // But we do permit promoting keys to trailers after a + // flush if they start with the magic + // otherwise-invalid "Trailer:" prefix: + w.Header().Set("Trailer:Post-Header-Trailer", "hi1") + w.Header().Set("Trailer:post-header-trailer2", "hi2") + w.Header().Set("Trailer:Range", "invalid") + w.Header().Set("Trailer:Foo\x01Bogus", "invalid") + w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 7230 4.1.2") + w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 7230 4.1.2") + w.Header().Set("Trailer", "should not be included; Forbidden by RFC 7230 4.1.2") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("response HEADERS had END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "Bar"}, + {"trailer", "Server-Trailer-A, Server-Trailer-B"}, + {"trailer", "Server-Trailer-C"}, + {"trailer", "Transfer-Encoding, Content-Length, Trailer"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "5"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + df := st.wantData() + if string(df.Data()) != "Hello" { + t.Fatalf("Client read %q; want Hello", df.Data()) + } + if df.StreamEnded() { + t.Fatalf("data frame had STREAM_ENDED") + } + tf := st.wantHeaders() // for the trailers + if !tf.StreamEnded() { + t.Fatalf("trailers HEADERS lacked END_STREAM") + } + if !tf.HeadersEnded() { + t.Fatalf("trailers HEADERS lacked END_HEADERS") + } + wanth = [][2]string{ + {"post-header-trailer", "hi1"}, + {"post-header-trailer2", "hi2"}, + {"server-trailer-a", "valuea"}, + {"server-trailer-c", "valuec"}, + } + goth = st.decodeHeader(tf.HeaderBlockFragment()) + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +// validate transmitted header field names & values +// golang.org/issue/14048 +func TestServerDoesntWriteInvalidHeaders(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Add("OK1", "x") + w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key + w.Header().Add("Bad1\x00", "x") // null in key + w.Header().Add("Bad2", "x\x00y") // null in value + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("response HEADERS lacked END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"ok1", "x"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +func BenchmarkServerGets(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +func BenchmarkServerPosts(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(id, true, nil) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +// Send a stream of messages from server to client in separate data frames. +// Brings up performance issues seen in long streams. +// Created to show problem in go issue #18502 +func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) { + benchmarkServerToClientStream(b) +} + +// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8 +// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer. +func BenchmarkServerToClientStreamReuseFrames(b *testing.B) { + benchmarkServerToClientStream(b, optFramerReuseFrames) +} + +func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msgLen = 1 + // default window size + const windowSize = 1<<16 - 1 + + // next message to send from the server and for the client to expect + nextMsg := func(i int) []byte { + msg := make([]byte, msgLen) + msg[0] = byte(i) + if len(msg) != msgLen { + panic("invalid test setup msg length") + } + return msg + } + + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + for i := 0; i < b.N; i += 1 { + w.Write(nextMsg(i)) + w.(http.Flusher).Flush() + } + }, newServerOpts...) + defer st.Close() + st.greet() + + const id = uint32(1) + + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + + st.writeData(id, true, nil) + st.wantHeaders() + + var pendingWindowUpdate = uint32(0) + + for i := 0; i < b.N; i += 1 { + expected := nextMsg(i) + df := st.wantData() + if bytes.Compare(expected, df.data) != 0 { + b.Fatalf("Bad message received; want %v; got %v", expected, df.data) + } + // try to send infrequent but large window updates so they don't overwhelm the test + pendingWindowUpdate += uint32(len(df.data)) + if pendingWindowUpdate >= windowSize/2 { + if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + pendingWindowUpdate = 0 + } + } + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } +} + +// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 +// Verify we don't hang. +func TestIssue53(t *testing.T) { + const data = "PRI * HTTP/2.0\r\n\r\nSM" + + "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad" + s := &http.Server{ + ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags), + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("hello")) + }), + } + s2 := &Server{ + MaxReadFrameSize: 1 << 16, + PermitProhibitedCipherSuites: true, + } + c := &issue53Conn{[]byte(data), false, false} + s2.ServeConn(c, &ServeConnOpts{BaseConfig: s}) + if !c.closed { + t.Fatal("connection is not closed") + } +} + +type issue53Conn struct { + data []byte + closed bool + written bool +} + +func (c *issue53Conn) Read(b []byte) (n int, err error) { + if len(c.data) == 0 { + return 0, io.EOF + } + n = copy(b, c.data) + c.data = c.data[n:] + return +} + +func (c *issue53Conn) Write(b []byte) (n int, err error) { + c.written = true + return len(b), nil +} + +func (c *issue53Conn) Close() error { + c.closed = true + return nil +} + +func (c *issue53Conn) LocalAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) RemoteAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) SetDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil } + +// golang.org/issue/12895 +func TestConfigureServer(t *testing.T) { + tests := []struct { + name string + tlsConfig *tls.Config + wantErr string + }{ + { + name: "empty server", + }, + { + name: "just the required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + }, + { + name: "just the alternative required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + }, + }, + { + name: "missing required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, + }, + wantErr: "is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.", + }, + { + name: "required after bad", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after", + }, + { + name: "bad after required", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA}, + }, + }, + } + for _, tt := range tests { + srv := &http.Server{TLSConfig: tt.tlsConfig} + err := ConfigureServer(srv, nil) + if (err != nil) != (tt.wantErr != "") { + if tt.wantErr != "" { + t.Errorf("%s: success, but want error", tt.name) + } else { + t.Errorf("%s: unexpected error: %v", tt.name, err) + } + } + if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr) + } + if err == nil && !srv.TLSConfig.PreferServerCipherSuites { + t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name) + } + } +} + +func TestServerRejectHeadWithBody(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: false, // what we're testing, a bogus HEAD request with body + EndHeaders: true, + }) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func TestServerNoAutoContentLengthOnHead(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. (or smaller than one frame) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +// golang.org/issue/13495 +func TestServerNoDuplicateContentType(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header()["Content-Type"] = []string{""} + fmt.Fprintf(w, "hi") + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + {"content-type", ""}, + {"content-length", "41"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func disableGoroutineTracking() (restore func()) { + old := DebugGoroutines + DebugGoroutines = false + return func() { DebugGoroutines = old } +} + +func BenchmarkServer_GetRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + + st.greet() + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "GET") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + st.wantData() + } +} + +func BenchmarkServer_PostRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "POST") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: false, + EndHeaders: true, + }) + st.writeData(streamID, true, nil) + st.wantHeaders() + st.wantData() + } +} + +type connStateConn struct { + net.Conn + cs tls.ConnectionState +} + +func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs } + +// golang.org/issue/12737 -- handle any net.Conn, not just +// *tls.Conn. +func TestServerHandleCustomConn(t *testing.T) { + var s Server + c1, c2 := net.Pipe() + clientDone := make(chan struct{}) + handlerDone := make(chan struct{}) + var req *http.Request + go func() { + defer close(clientDone) + defer c2.Close() + fr := NewFramer(c2, c2) + io.WriteString(c2, ClientPreface) + fr.WriteSettings() + fr.WriteSettingsAck() + f, err := fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() { + t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f)) + return + } + f, err = fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() { + t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f)) + return + } + var henc hpackEncoder + fr.WriteHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"), + EndStream: true, + EndHeaders: true, + }) + go io.Copy(ioutil.Discard, c2) + <-handlerDone + }() + const testString = "my custom ConnectionState" + fakeConnState := tls.ConnectionState{ + ServerName: testString, + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } + go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{ + BaseConfig: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer close(handlerDone) + req = r + }), + }}) + select { + case <-clientDone: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for handler") + } + if req.TLS == nil { + t.Fatalf("Request.TLS is nil. Got: %#v", req) + } + if req.TLS.ServerName != testString { + t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString) + } +} + +// golang.org/issue/14214 +func TestServer_Rejects_ConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("should not get to Handler") + }) + defer st.Close() + st.greet() + st.bodylessReq1("connection", "foo") + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "400"}, + {"content-type", "text/plain; charset=utf-8"}, + {"x-content-type-options", "nosniff"}, + {"content-length", "51"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } +} + +type hpackEncoder struct { + enc *hpack.Encoder + buf bytes.Buffer +} + +func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + he.buf.Reset() + if he.enc == nil { + he.enc = hpack.NewEncoder(&he.buf) + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + headers = headers[2:] + } + return he.buf.Bytes() +} + +func TestCheckValidHTTP2Request(t *testing.T) { + tests := []struct { + h http.Header + want error + }{ + { + h: http.Header{"Te": {"trailers"}}, + want: nil, + }, + { + h: http.Header{"Te": {"trailers", "bogus"}}, + want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`), + }, + { + h: http.Header{"Foo": {""}}, + want: nil, + }, + { + h: http.Header{"Connection": {""}}, + want: errors.New(`request header "Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Proxy-Connection": {""}}, + want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Keep-Alive": {""}}, + want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`), + }, + { + h: http.Header{"Upgrade": {""}}, + want: errors.New(`request header "Upgrade" is not valid in HTTP/2`), + }, + } + for i, tt := range tests { + got := checkValidHTTP2RequestHeaders(tt.h) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want) + } + } +} + +// golang.org/issue/14030 +func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { + const msg = "Hello" + const msg2 = "World" + + doRead := make(chan bool, 1) + defer close(doRead) // fallback cleanup + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + + // Do a read, which might force a 100-continue status to be sent. + <-doRead + r.Body.Read(make([]byte, 10)) + + io.WriteString(w, msg2) + + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) + req.Header.Set("Expect", "100-continue") + + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + buf := make([]byte, len(msg)) + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg { + t.Fatalf("msg = %q; want %q", buf, msg) + } + + doRead <- true + + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg2 { + t.Fatalf("second msg = %q; want %q", buf, msg2) + } +} + +type funcReader func([]byte) (n int, err error) + +func (f funcReader) Read(p []byte) (n int, err error) { return f(p) } + +// golang.org/issue/16481 -- return flow control when streams close with unread data. +// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport) +func TestUnreadFlowControlReturned_Server(t *testing.T) { + unblock := make(chan bool, 1) + defer close(unblock) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Don't read the 16KB request body. Wait until the client's + // done sending it and then return. This should cause the Server + // to then return those 16KB of flow control to the client. + <-unblock + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // This previously hung on the 4th iteration. + for i := 0; i < 6; i++ { + body := io.MultiReader( + io.LimitReader(neverEnding('A'), 16<<10), + funcReader(func([]byte) (n int, err error) { + unblock <- true + return 0, io.EOF + }), + ) + req, _ := http.NewRequest("POST", st.ts.URL, body) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } + +} + +func TestServerIdleTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + }, func(h2s *Server) { + h2s.IdleTimeout = 500 * time.Millisecond + }) + defer st.Close() + + st.greet() + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +func TestServerIdleTimeout_AfterRequest(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + const timeout = 250 * time.Millisecond + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + time.Sleep(timeout * 2) + }, func(h2s *Server) { + h2s.IdleTimeout = timeout + }) + defer st.Close() + + st.greet() + + // Send a request which takes twice the timeout. Verifies the + // idle timeout doesn't fire while we're in a request: + st.bodylessReq1() + st.wantHeaders() + + // But the idle timeout should be rearmed after the request + // is done: + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +// grpc-go closes the Request.Body currently with a Read. +// Verify that it doesn't race. +// See https://github.com/grpc/grpc-go/pull/938 +func TestRequestBodyReadCloseRace(t *testing.T) { + for i := 0; i < 100; i++ { + body := &requestBody{ + pipe: &pipe{ + b: new(bytes.Buffer), + }, + } + body.pipe.CloseWithError(io.EOF) + + done := make(chan bool, 1) + buf := make([]byte, 10) + go func() { + time.Sleep(1 * time.Millisecond) + body.Close() + done <- true + }() + body.Read(buf) + <-done + } +} + +func TestIssue20704Race(t *testing.T) { + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + t.Skip("skipping in short mode") + } + const ( + itemSize = 1 << 10 + itemCount = 100 + ) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < itemCount; i++ { + _, err := w.Write(make([]byte, itemSize)) + if err != nil { + return + } + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cl := &http.Client{Transport: tr} + + for i := 0; i < 1000; i++ { + resp, err := cl.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + // Force a RST stream to the server by closing without + // reading the body: + resp.Body.Close() + } +} diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml new file mode 100644 index 0000000..31a84be --- /dev/null +++ b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml @@ -0,0 +1,5021 @@ + + + + + + + + + + + + + + + + + + + Hypertext Transfer Protocol version 2 + + + Twist +
    + mbelshe@chromium.org +
    +
    + + + Google, Inc +
    + fenix@google.com +
    +
    + + + Mozilla +
    + + 331 E Evelyn Street + Mountain View + CA + 94041 + US + + martin.thomson@gmail.com +
    +
    + + + Applications + HTTPbis + HTTP + SPDY + Web + + + + This specification describes an optimized expression of the semantics of the Hypertext + Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a + reduced perception of latency by introducing header field compression and allowing multiple + concurrent messages on the same connection. It also introduces unsolicited push of + representations from servers to clients. + + + This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. + HTTP's existing semantics remain unchanged. + + + + + + Discussion of this draft takes place on the HTTPBIS working group mailing list + (ietf-http-wg@w3.org), which is archived at . + + + Working Group information can be found at ; that specific to HTTP/2 are at . + + + The changes in this draft are summarized in . + + + +
    + + +
    + + + The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the + HTTP/1.1 message format () has + several characteristics that have a negative overall effect on application performance + today. + + + In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given + TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed + request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 + clients that need to make many requests typically use multiple connections to a server in + order to achieve concurrency and thereby reduce latency. + + + Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary + network traffic, as well as causing the initial TCP congestion + window to quickly fill. This can result in excessive latency when multiple requests are + made on a new TCP connection. + + + HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an + underlying connection. Specifically, it allows interleaving of request and response + messages on the same connection and uses an efficient coding for HTTP header fields. It + also allows prioritization of requests, letting more important requests complete more + quickly, further improving performance. + + + The resulting protocol is more friendly to the network, because fewer TCP connections can + be used in comparison to HTTP/1.x. This means less competition with other flows, and + longer-lived connections, which in turn leads to better utilization of available network + capacity. + + + Finally, HTTP/2 also enables more efficient processing of messages through use of binary + message framing. + +
    + +
    + + HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core + features of HTTP/1.1, but aims to be more efficient in several ways. + + + The basic protocol unit in HTTP/2 is a frame. Each frame + type serves a different purpose. For example, HEADERS and + DATA frames form the basis of HTTP requests and + responses; other frame types like SETTINGS, + WINDOW_UPDATE, and PUSH_PROMISE are used in support of other + HTTP/2 features. + + + Multiplexing of requests is achieved by having each HTTP request-response exchange + associated with its own stream. Streams are largely + independent of each other, so a blocked or stalled request or response does not prevent + progress on other streams. + + + Flow control and prioritization ensure that it is possible to efficiently use multiplexed + streams. Flow control helps to ensure that only data that + can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed + to the most important streams first. + + + HTTP/2 adds a new interaction mode, whereby a server can push + responses to a client. Server push allows a server to speculatively send a client + data that the server anticipates the client will need, trading off some network usage + against a potential latency gain. The server does this by synthesizing a request, which it + sends as a PUSH_PROMISE frame. The server is then able to send a response to + the synthetic request on a separate stream. + + + Frames that contain HTTP header fields are compressed. + HTTP requests can be highly redundant, so compression can reduce the size of requests and + responses significantly. + + +
    + + The HTTP/2 specification is split into four parts: + + + Starting HTTP/2 covers how an HTTP/2 connection is + initiated. + + + The framing and streams layers describe the way HTTP/2 frames are + structured and formed into multiplexed streams. + + + Frame and error + definitions include details of the frame and error types used in HTTP/2. + + + HTTP mappings and additional + requirements describe how HTTP semantics are expressed using frames and + streams. + + + + + While some of the frame and stream layer concepts are isolated from HTTP, this + specification does not define a completely generic framing layer. The framing and streams + layers are tailored to the needs of the HTTP protocol and server push. + +
    + +
    + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD + NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as + described in RFC 2119. + + + All numeric values are in network byte order. Values are unsigned unless otherwise + indicated. Literal values are provided in decimal or hexadecimal as appropriate. + Hexadecimal literals are prefixed with 0x to distinguish them + from decimal literals. + + + The following terms are used: + + + The endpoint initiating the HTTP/2 connection. + + + A transport-layer connection between two endpoints. + + + An error that affects the entire HTTP/2 connection. + + + Either the client or server of the connection. + + + The smallest unit of communication within an HTTP/2 connection, consisting of a header + and a variable-length sequence of octets structured according to the frame type. + + + An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint + that is remote to the primary subject of discussion. + + + An endpoint that is receiving frames. + + + An endpoint that is transmitting frames. + + + The endpoint which did not initiate the HTTP/2 connection. + + + A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. + + + An error on the individual HTTP/2 stream. + + + + + Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined + in . + +
    +
    + +
    + + An HTTP/2 connection is an application layer protocol running on top of a TCP connection + (). The client is the TCP connection initiator. + + + HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same + default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, + implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the + upstream server (the immediate peer to which the client wishes to establish a connection) + supports HTTP/2. + + + + The means by which support for HTTP/2 is determined is different for "http" and "https" + URIs. Discovery for "http" URIs is described in . Discovery + for "https" URIs is described in . + + +
    + + The protocol defined in this document has two identifiers. + + + + The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) + field and any place that HTTP/2 over TLS is identified. + + + The "h2" string is serialized into an ALPN protocol identifier as the two octet + sequence: 0x68, 0x32. + + + + + The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. + This identifier is used in the HTTP/1.1 Upgrade header field and any place that + HTTP/2 over TCP is identified. + + + + + + Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message + semantics described in this document. + + + RFC Editor's Note: please remove the remainder of this section prior to the + publication of a final version of this document. + + + Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". + Until such an RFC exists, implementations MUST NOT identify themselves using these + strings. + + + Examples and text throughout the rest of this document use "h2" as a matter of + editorial convenience only. Implementations of draft versions MUST NOT identify using + this string. + + + Implementations of draft versions of the protocol MUST add the string "-" and the + corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 + over TLS is identified using the string "h2-11". + + + Non-compatible experiments that are based on these draft versions MUST append the string + "-" and an experiment name to the identifier. For example, an experimental implementation + of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself + as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in + . Experimenters are + encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. + +
    + +
    + + A client that makes a request for an "http" URI without prior knowledge about support for + HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade + header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include + exactly one HTTP2-Settings header field. + +
    + For example: + + +]]> +
    + + Requests that contain an entity body MUST be sent in their entirety before the client can + send HTTP/2 frames. This means that a large request entity can block the use of the + connection until it is completely sent. + + + If concurrency of an initial request with subsequent requests is important, an OPTIONS + request can be used to perform the upgrade to HTTP/2, at the cost of an additional + round-trip. + + + A server that does not support HTTP/2 can respond to the request as though the Upgrade + header field were absent: + +
    + +HTTP/1.1 200 OK +Content-Length: 243 +Content-Type: text/html + +... + +
    + + A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with + "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . + + + A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) + response. After the empty line that terminates the 101 response, the server can begin + sending HTTP/2 frames. These frames MUST include a response to the request that initiated + the Upgrade. + + +
    + + For example: + + +HTTP/1.1 101 Switching Protocols +Connection: Upgrade +Upgrade: h2c + +[ HTTP/2 connection ... + +
    + + The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a + SETTINGS frame. + + + The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is + assigned default priority values. Stream 1 is + implicitly half closed from the client toward the server, since the request is completed + as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the + response. + + +
    + + A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field + that includes parameters that govern the HTTP/2 connection, provided in anticipation of + the server accepting the request to upgrade. + +
    + +
    + + A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, + or if more than one is present. A server MUST NOT send this header field. + + + + The content of the HTTP2-Settings header field is the + payload of a SETTINGS frame (), encoded as a + base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The + ABNF production for token68 is + defined in . + + + Since the upgrade is only intended to apply to the immediate connection, a client + sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded + downstream. + + + A server decodes and interprets these values as it would any other + SETTINGS frame. Acknowledgement of the + SETTINGS parameters is not necessary, since a 101 response serves as implicit + acknowledgment. Providing these values in the Upgrade request gives a client an + opportunity to provide parameters prior to receiving any frames from the server. + +
    +
    + +
    + + A client that makes a request to an "https" URI uses TLS + with the application layer protocol negotiation extension. + + + HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a + client or selected by a server. + + + Once TLS negotiation is complete, both the client and the server send a connection preface. + +
    + +
    + + A client can learn that a particular server supports HTTP/2 by other means. For example, + describes a mechanism for advertising this capability. + + + A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, + after the connection preface; a server can + identify such a connection by the presence of the connection preface. This only affects + the establishment of HTTP/2 connections over cleartext TCP; implementations that support + HTTP/2 over TLS MUST use protocol negotiation in TLS. + + + Without additional information, prior support for HTTP/2 is not a strong signal that a + given server will support HTTP/2 for future connections. For example, it is possible for + server configurations to change, for configurations to differ between instances in + clustered servers, or for network conditions to change. + +
    + +
    + + Upon establishment of a TCP connection and determination that HTTP/2 will be used by both + peers, each endpoint MUST send a connection preface as a final confirmation and to + establish the initial SETTINGS parameters for the HTTP/2 connection. The client and + server each send a different connection preface. + + + The client connection preface starts with a sequence of 24 octets, which in hex notation + are: + +
    + +
    + + (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence + is followed by a SETTINGS frame (). The + SETTINGS frame MAY be empty. The client sends the client connection + preface immediately upon receipt of a 101 Switching Protocols response (indicating a + successful upgrade), or as the first application data octets of a TLS connection. If + starting an HTTP/2 connection with prior knowledge of server support for the protocol, the + client connection preface is sent upon connection establishment. + + + + + The client connection preface is selected so that a large proportion of HTTP/1.1 or + HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note + that this does not address the concerns raised in . + + + + + The server connection preface consists of a potentially empty SETTINGS + frame () that MUST be the first frame the server sends in the + HTTP/2 connection. + + + The SETTINGS frames received from a peer as part of the connection preface + MUST be acknowledged (see ) after sending the connection + preface. + + + To avoid unnecessary latency, clients are permitted to send additional frames to the + server immediately after sending the client connection preface, without waiting to receive + the server connection preface. It is important to note, however, that the server + connection preface SETTINGS frame might include parameters that necessarily + alter how a client is expected to communicate with the server. Upon receiving the + SETTINGS frame, the client is expected to honor any parameters established. + In some configurations, it is possible for the server to transmit SETTINGS + before the client sends additional frames, providing an opportunity to avoid this issue. + + + Clients and servers MUST treat an invalid connection preface as a connection error of type + PROTOCOL_ERROR. A GOAWAY frame () + MAY be omitted in this case, since an invalid preface indicates that the peer is not using + HTTP/2. + +
    +
    + +
    + + Once the HTTP/2 connection is established, endpoints can begin exchanging frames. + + +
    + + All frames begin with a fixed 9-octet header followed by a variable-length payload. + +
    + +
    + + The fields of the frame header are defined as: + + + + The length of the frame payload expressed as an unsigned 24-bit integer. Values + greater than 214 (16,384) MUST NOT be sent unless the receiver has + set a larger value for SETTINGS_MAX_FRAME_SIZE. + + + The 9 octets of the frame header are not included in this value. + + + + + The 8-bit type of the frame. The frame type determines the format and semantics of + the frame. Implementations MUST ignore and discard any frame that has a type that + is unknown. + + + + + An 8-bit field reserved for frame-type specific boolean flags. + + + Flags are assigned semantics specific to the indicated frame type. Flags that have + no defined semantics for a particular frame type MUST be ignored, and MUST be left + unset (0) when sending. + + + + + A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST + remain unset (0) when sending and MUST be ignored when receiving. + + + + + A 31-bit stream identifier (see ). The value 0 is + reserved for frames that are associated with the connection as a whole as opposed to + an individual stream. + + + + + + The structure and content of the frame payload is dependent entirely on the frame type. + +
    + +
    + + The size of a frame payload is limited by the maximum size that a receiver advertises in + the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value + between 214 (16,384) and 224-1 (16,777,215) octets, + inclusive. + + + All implementations MUST be capable of receiving and minimally processing frames up to + 214 octets in length, plus the 9 octet frame + header. The size of the frame header is not included when describing frame sizes. + + + Certain frame types, such as PING, impose additional limits + on the amount of payload data allowed. + + + + + If a frame size exceeds any defined limit, or is too small to contain mandatory frame + data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error + in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying + a header block (that is, HEADERS, + PUSH_PROMISE, and CONTINUATION), SETTINGS, + and any WINDOW_UPDATE frame with a stream identifier of 0. + + + Endpoints are not obligated to use all available space in a frame. Responsiveness can be + improved by using frames that are smaller than the permitted maximum size. Sending large + frames can result in delays in sending time-sensitive frames (such + RST_STREAM, WINDOW_UPDATE, or PRIORITY) + which if blocked by the transmission of a large frame, could affect performance. + +
    + +
    + + Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. + They are used within HTTP request and response messages as well as server push operations + (see ). + + + Header lists are collections of zero or more header fields. When transmitted over a + connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then + divided into one or more octet sequences, called header block fragments, and transmitted + within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. + + + The Cookie header field is treated specially by the HTTP + mapping (see ). + + + A receiving endpoint reassembles the header block by concatenating its fragments, then + decompresses the block to reconstruct the header list. + + + A complete header block consists of either: + + + a single HEADERS or PUSH_PROMISE frame, + with the END_HEADERS flag set, or + + + a HEADERS or PUSH_PROMISE frame with the END_HEADERS + flag cleared and one or more CONTINUATION frames, + where the last CONTINUATION frame has the END_HEADERS flag set. + + + + + Header compression is stateful. One compression context and one decompression context is + used for the entire connection. Each header block is processed as a discrete unit. + Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved + frames of any other type or from any other stream. The last frame in a sequence of + HEADERS or CONTINUATION frames MUST have the END_HEADERS + flag set. The last frame in a sequence of PUSH_PROMISE or + CONTINUATION frames MUST have the END_HEADERS flag set. This allows a + header block to be logically equivalent to a single frame. + + + Header block fragments can only be sent as the payload of HEADERS, + PUSH_PROMISE or CONTINUATION frames, because these frames + carry data that can modify the compression context maintained by a receiver. An endpoint + receiving HEADERS, PUSH_PROMISE or + CONTINUATION frames MUST reassemble header blocks and perform decompression + even if the frames are to be discarded. A receiver MUST terminate the connection with a + connection error of type + COMPRESSION_ERROR if it does not decompress a header block. + +
    +
    + +
    + + A "stream" is an independent, bi-directional sequence of frames exchanged between the client + and server within an HTTP/2 connection. Streams have several important characteristics: + + + A single HTTP/2 connection can contain multiple concurrently open streams, with either + endpoint interleaving frames from multiple streams. + + + Streams can be established and used unilaterally or shared by either the client or + server. + + + Streams can be closed by either endpoint. + + + The order in which frames are sent on a stream is significant. Recipients process frames + in the order they are received. In particular, the order of HEADERS, + and DATA frames is semantically significant. + + + Streams are identified by an integer. Stream identifiers are assigned to streams by the + endpoint initiating the stream. + + + + +
    + + The lifecycle of a stream is shown in . + + +
    + + | |<-----------' | + | R | closed | R | + `-------------------->| |<--------------------' + +--------+ + + H: HEADERS frame (with implied CONTINUATIONs) + PP: PUSH_PROMISE frame (with implied CONTINUATIONs) + ES: END_STREAM flag + R: RST_STREAM frame +]]> + +
    + + + Note that this diagram shows stream state transitions and the frames and flags that affect + those transitions only. In this regard, CONTINUATION frames do not result + in state transitions; they are effectively part of the HEADERS or + PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is + processed as a separate event to the frame that bears it; a HEADERS frame + with the END_STREAM flag set can cause two state transitions. + + + Both endpoints have a subjective view of the state of a stream that could be different + when frames are in transit. Endpoints do not coordinate the creation of streams; they are + created unilaterally by either endpoint. The negative consequences of a mismatch in + states are limited to the "closed" state after sending RST_STREAM, where + frames might be received for some time after closing. + + + Streams have the following states: + + + + + + All streams start in the "idle" state. In this state, no frames have been + exchanged. + + + The following transitions are valid from this state: + + + Sending or receiving a HEADERS frame causes the stream to become + "open". The stream identifier is selected as described in . The same HEADERS frame can also + cause a stream to immediately become "half closed". + + + Sending a PUSH_PROMISE frame marks the associated stream for + later use. The stream state for the reserved stream transitions to "reserved + (local)". + + + Receiving a PUSH_PROMISE frame marks the associated stream as + reserved by the remote peer. The state of the stream becomes "reserved + (remote)". + + + + + Receiving any frames other than HEADERS or + PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (local)" state is one that has been promised by sending a + PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an + idle stream by associating the stream with an open stream that was initiated by the + remote peer (see ). + + + In this state, only the following transitions are possible: + + + The endpoint can send a HEADERS frame. This causes the stream to + open in a "half closed (remote)" state. + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MUST NOT send any type of frame other than HEADERS or + RST_STREAM in this state. + + + A PRIORITY frame MAY be received in this state. Receiving any type + of frame other than RST_STREAM or PRIORITY on a stream + in this state MUST be treated as a connection + error of type PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (remote)" state has been reserved by a remote peer. + + + In this state, only the following transitions are possible: + + + Receiving a HEADERS frame causes the stream to transition to + "half closed (local)". + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MAY send a PRIORITY frame in this state to reprioritize + the reserved stream. An endpoint MUST NOT send any type of frame other than + RST_STREAM, WINDOW_UPDATE, or PRIORITY + in this state. + + + Receiving any type of frame other than HEADERS or + RST_STREAM on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "open" state may be used by both peers to send frames of any type. + In this state, sending peers observe advertised stream + level flow control limits. + + + From this state either endpoint can send a frame with an END_STREAM flag set, which + causes the stream to transition into one of the "half closed" states: an endpoint + sending an END_STREAM flag causes the stream state to become "half closed (local)"; + an endpoint receiving an END_STREAM flag causes the stream state to become "half + closed (remote)". + + + Either endpoint can send a RST_STREAM frame from this state, causing + it to transition immediately to "closed". + + + + + + + A stream that is in the "half closed (local)" state cannot be used for sending + frames. Only WINDOW_UPDATE, PRIORITY and + RST_STREAM frames can be sent in this state. + + + A stream transitions from this state to "closed" when a frame that contains an + END_STREAM flag is received, or when either peer sends a RST_STREAM + frame. + + + A receiver can ignore WINDOW_UPDATE frames in this state, which might + arrive for a short period after a frame bearing the END_STREAM flag is sent. + + + PRIORITY frames received in this state are used to reprioritize + streams that depend on the current stream. + + + + + + + A stream that is "half closed (remote)" is no longer being used by the peer to send + frames. In this state, an endpoint is no longer obligated to maintain a receiver + flow control window if it performs flow control. + + + If an endpoint receives additional frames for a stream that is in this state, other + than WINDOW_UPDATE, PRIORITY or + RST_STREAM, it MUST respond with a stream error of type + STREAM_CLOSED. + + + A stream that is "half closed (remote)" can be used by the endpoint to send frames + of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. + + + A stream can transition from this state to "closed" by sending a frame that contains + an END_STREAM flag, or when either peer sends a RST_STREAM frame. + + + + + + + The "closed" state is the terminal state. + + + An endpoint MUST NOT send frames other than PRIORITY on a closed + stream. An endpoint that receives any frame other than PRIORITY + after receiving a RST_STREAM MUST treat that as a stream error of type + STREAM_CLOSED. Similarly, an endpoint that receives any frames after + receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type + STREAM_CLOSED, unless the frame is permitted as described below. + + + WINDOW_UPDATE or RST_STREAM frames can be received in + this state for a short period after a DATA or HEADERS + frame containing an END_STREAM flag is sent. Until the remote peer receives and + processes RST_STREAM or the frame bearing the END_STREAM flag, it + might send frames of these types. Endpoints MUST ignore + WINDOW_UPDATE or RST_STREAM frames received in this + state, though endpoints MAY choose to treat frames that arrive a significant time + after sending END_STREAM as a connection + error of type PROTOCOL_ERROR. + + + PRIORITY frames can be sent on closed streams to prioritize streams + that are dependent on the closed stream. Endpoints SHOULD process + PRIORITY frame, though they can be ignored if the stream has been + removed from the dependency tree (see ). + + + If this state is reached as a result of sending a RST_STREAM frame, + the peer that receives the RST_STREAM might have already sent - or + enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint + MUST ignore frames that it receives on closed streams after it has sent a + RST_STREAM frame. An endpoint MAY choose to limit the period over + which it ignores frames and treat frames that arrive after this time as being in + error. + + + Flow controlled frames (i.e., DATA) received after sending + RST_STREAM are counted toward the connection flow control window. + Even though these frames might be ignored, because they are sent before the sender + receives the RST_STREAM, the sender will consider the frames to count + against the flow control window. + + + An endpoint might receive a PUSH_PROMISE frame after it sends + RST_STREAM. PUSH_PROMISE causes a stream to become + "reserved" even if the associated stream has been reset. Therefore, a + RST_STREAM is needed to close an unwanted promised stream. + + + + + + In the absence of more specific guidance elsewhere in this document, implementations + SHOULD treat the receipt of a frame that is not expressly permitted in the description of + a state as a connection error of type + PROTOCOL_ERROR. Frame of unknown types are ignored. + + + An example of the state transitions for an HTTP request/response exchange can be found in + . An example of the state transitions for server push can be + found in and . + + +
    + + Streams are identified with an unsigned 31-bit integer. Streams initiated by a client + MUST use odd-numbered stream identifiers; those initiated by the server MUST use + even-numbered stream identifiers. A stream identifier of zero (0x0) is used for + connection control messages; the stream identifier zero cannot be used to establish a + new stream. + + + HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are + responded to with a stream identifier of one (0x1). After the upgrade + completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 + cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. + + + The identifier of a newly established stream MUST be numerically greater than all + streams that the initiating endpoint has opened or reserved. This governs streams that + are opened using a HEADERS frame and streams that are reserved using + PUSH_PROMISE. An endpoint that receives an unexpected stream identifier + MUST respond with a connection error of + type PROTOCOL_ERROR. + + + The first use of a new stream identifier implicitly closes all streams in the "idle" + state that might have been initiated by that peer with a lower-valued stream identifier. + For example, if a client sends a HEADERS frame on stream 7 without ever + sending a frame on stream 5, then stream 5 transitions to the "closed" state when the + first frame for stream 7 is sent or received. + + + Stream identifiers cannot be reused. Long-lived connections can result in an endpoint + exhausting the available range of stream identifiers. A client that is unable to + establish a new stream identifier can establish a new connection for new streams. A + server that is unable to establish a new stream identifier can send a + GOAWAY frame so that the client is forced to open a new connection for + new streams. + +
    + +
    + + A peer can limit the number of concurrently active streams using the + SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent + streams setting is specific to each endpoint and applies only to the peer that receives + the setting. That is, clients specify the maximum number of concurrent streams the + server can initiate, and servers specify the maximum number of concurrent streams the + client can initiate. + + + Streams that are in the "open" state, or either of the "half closed" states count toward + the maximum number of streams that an endpoint is permitted to open. Streams in any of + these three states count toward the limit advertised in the + SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the + "reserved" states do not count toward the stream limit. + + + Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a + HEADERS frame that causes their advertised concurrent stream limit to be + exceeded MUST treat this as a stream error. An + endpoint that wishes to reduce the value of + SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current + number of open streams can either close streams that exceed the new value or allow + streams to complete. + +
    +
    + +
    + + Using streams for multiplexing introduces contention over use of the TCP connection, + resulting in blocked streams. A flow control scheme ensures that streams on the same + connection do not destructively interfere with each other. Flow control is used for both + individual streams and for the connection as a whole. + + + HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. + + +
    + + HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be + used without requiring protocol changes. Flow control in HTTP/2 has the following + characteristics: + + + Flow control is specific to a connection; i.e., it is "hop-by-hop", not + "end-to-end". + + + Flow control is based on window update frames. Receivers advertise how many octets + they are prepared to receive on a stream and for the entire connection. This is a + credit-based scheme. + + + Flow control is directional with overall control provided by the receiver. A + receiver MAY choose to set any window size that it desires for each stream and for + the entire connection. A sender MUST respect flow control limits imposed by a + receiver. Clients, servers and intermediaries all independently advertise their + flow control window as a receiver and abide by the flow control limits set by + their peer when sending. + + + The initial value for the flow control window is 65,535 octets for both new streams + and the overall connection. + + + The frame type determines whether flow control applies to a frame. Of the frames + specified in this document, only DATA frames are subject to flow + control; all other frame types do not consume space in the advertised flow control + window. This ensures that important control frames are not blocked by flow control. + + + Flow control cannot be disabled. + + + HTTP/2 defines only the format and semantics of the WINDOW_UPDATE + frame (). This document does not stipulate how a + receiver decides when to send this frame or the value that it sends, nor does it + specify how a sender chooses to send packets. Implementations are able to select + any algorithm that suits their needs. + + + + + Implementations are also responsible for managing how requests and responses are sent + based on priority; choosing how to avoid head of line blocking for requests; and + managing the creation of new streams. Algorithm choices for these could interact with + any flow control algorithm. + +
    + +
    + + Flow control is defined to protect endpoints that are operating under resource + constraints. For example, a proxy needs to share memory between many connections, and + also might have a slow upstream connection and a fast downstream one. Flow control + addresses cases where the receiver is unable process data on one stream, yet wants to + continue to process other streams in the same connection. + + + Deployments that do not require this capability can advertise a flow control window of + the maximum size, incrementing the available space when new data is received. This + effectively disables flow control for that receiver. Conversely, a sender is always + subject to the flow control window advertised by the receiver. + + + Deployments with constrained resources (for example, memory) can employ flow control to + limit the amount of memory a peer can consume. Note, however, that this can lead to + suboptimal use of available network resources if flow control is enabled without + knowledge of the bandwidth-delay product (see ). + + + Even with full awareness of the current bandwidth-delay product, implementation of flow + control can be difficult. When using flow control, the receiver MUST read from the TCP + receive buffer in a timely fashion. Failure to do so could lead to a deadlock when + critical frames, such as WINDOW_UPDATE, are not read and acted upon. + +
    +
    + +
    + + A client can assign a priority for a new stream by including prioritization information in + the HEADERS frame that opens the stream. For an existing + stream, the PRIORITY frame can be used to change the + priority. + + + The purpose of prioritization is to allow an endpoint to express how it would prefer its + peer allocate resources when managing concurrent streams. Most importantly, priority can + be used to select streams for transmitting frames when there is limited capacity for + sending. + + + Streams can be prioritized by marking them as dependent on the completion of other streams + (). Each dependency is assigned a relative weight, a number + that is used to determine the relative proportion of available resources that are assigned + to streams dependent on the same stream. + + + + Explicitly setting the priority for a stream is input to a prioritization process. It + does not guarantee any particular processing or transmission order for the stream relative + to any other stream. An endpoint cannot force a peer to process concurrent streams in a + particular order using priority. Expressing priority is therefore only ever a suggestion. + + + Providing prioritization information is optional, so default values are used if no + explicit indicator is provided (). + + +
    + + Each stream can be given an explicit dependency on another stream. Including a + dependency expresses a preference to allocate resources to the identified stream rather + than to the dependent stream. + + + A stream that is not dependent on any other stream is given a stream dependency of 0x0. + In other words, the non-existent stream 0 forms the root of the tree. + + + A stream that depends on another stream is a dependent stream. The stream upon which a + stream is dependent is a parent stream. A dependency on a stream that is not currently + in the tree - such as a stream in the "idle" state - results in that stream being given + a default priority. + + + When assigning a dependency on another stream, the stream is added as a new dependency + of the parent stream. Dependent streams that share the same parent are not ordered with + respect to each other. For example, if streams B and C are dependent on stream A, and + if stream D is created with a dependency on stream A, this results in a dependency order + of A followed by B, C, and D in any order. + +
    + /|\ + B C B D C +]]> +
    + + An exclusive flag allows for the insertion of a new level of dependencies. The + exclusive flag causes the stream to become the sole dependency of its parent stream, + causing other dependencies to become dependent on the exclusive stream. In the + previous example, if stream D is created with an exclusive dependency on stream A, this + results in D becoming the dependency parent of B and C. + +
    + D + B C / \ + B C +]]> +
    + + Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all + of the streams that it depends on (the chain of parent streams up to 0x0) are either + closed, or it is not possible to make progress on them. + + + A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. + +
    + +
    + + All dependent streams are allocated an integer weight between 1 and 256 (inclusive). + + + Streams with the same parent SHOULD be allocated resources proportionally based on their + weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A + with weight 12, and if no progress can be made on A, stream B ideally receives one third + of the resources allocated to stream C. + +
    + +
    + + Stream priorities are changed using the PRIORITY frame. Setting a + dependency causes a stream to become dependent on the identified parent stream. + + + Dependent streams move with their parent stream if the parent is reprioritized. Setting + a dependency with the exclusive flag for a reprioritized stream moves all the + dependencies of the new parent stream to become dependent on the reprioritized stream. + + + If a stream is made dependent on one of its own dependencies, the formerly dependent + stream is first moved to be dependent on the reprioritized stream's previous parent. + The moved dependency retains its weight. + +
    + + For example, consider an original dependency tree where B and C depend on A, D and E + depend on C, and F depends on D. If A is made dependent on D, then D takes the place + of A. All other dependency relationships stay the same, except for F, which becomes + dependent on A if the reprioritization is exclusive. + + F B C ==> F A OR A + / \ | / \ /|\ + D E E B C B C F + | | | + F E E + (intermediate) (non-exclusive) (exclusive) +]]> +
    +
    + +
    + + When a stream is removed from the dependency tree, its dependencies can be moved to + become dependent on the parent of the closed stream. The weights of new dependencies + are recalculated by distributing the weight of the dependency of the closed stream + proportionally based on the weights of its dependencies. + + + Streams that are removed from the dependency tree cause some prioritization information + to be lost. Resources are shared between streams with the same parent stream, which + means that if a stream in that set closes or becomes blocked, any spare capacity + allocated to a stream is distributed to the immediate neighbors of the stream. However, + if the common dependency is removed from the tree, those streams share resources with + streams at the next highest level. + + + For example, assume streams A and B share a parent, and streams C and D both depend on + stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, + then stream C receives all the resources dedicated to stream A. If stream A is removed + from the tree, the weight of stream A is divided between streams C and D. If stream D + is still unable to proceed, this results in stream C receiving a reduced proportion of + resources. For equal starting weights, C receives one third, rather than one half, of + available resources. + + + It is possible for a stream to become closed while prioritization information that + creates a dependency on that stream is in transit. If a stream identified in a + dependency has no associated priority information, then the dependent stream is instead + assigned a default priority. This potentially creates + suboptimal prioritization, since the stream could be given a priority that is different + to what is intended. + + + To avoid these problems, an endpoint SHOULD retain stream prioritization state for a + period after streams become closed. The longer state is retained, the lower the chance + that streams are assigned incorrect or default priority values. + + + This could create a large state burden for an endpoint, so this state MAY be limited. + An endpoint MAY apply a fixed upper limit on the number of closed streams for which + prioritization state is tracked to limit state exposure. The amount of additional state + an endpoint maintains could be dependent on load; under high load, prioritization state + can be discarded to limit resource commitments. In extreme cases, an endpoint could + even discard prioritization state for active or reserved streams. If a fixed limit is + applied, endpoints SHOULD maintain state for at least as many streams as allowed by + their setting for SETTINGS_MAX_CONCURRENT_STREAMS. + + + An endpoint receiving a PRIORITY frame that changes the priority of a + closed stream SHOULD alter the dependencies of the streams that depend on it, if it has + retained enough state to do so. + +
    + +
    + + Providing priority information is optional. Streams are assigned a non-exclusive + dependency on stream 0x0 by default. Pushed streams + initially depend on their associated stream. In both cases, streams are assigned a + default weight of 16. + +
    +
    + +
    + + HTTP/2 framing permits two classes of error: + + + An error condition that renders the entire connection unusable is a connection error. + + + An error in an individual stream is a stream error. + + + + + A list of error codes is included in . + + +
    + + A connection error is any error which prevents further processing of the framing layer, + or which corrupts any connection state. + + + An endpoint that encounters a connection error SHOULD first send a GOAWAY + frame () with the stream identifier of the last stream that it + successfully received from its peer. The GOAWAY frame includes an error + code that indicates why the connection is terminating. After sending the + GOAWAY frame, the endpoint MUST close the TCP connection. + + + It is possible that the GOAWAY will not be reliably received by the + receiving endpoint (see ). In the event of a connection error, + GOAWAY only provides a best effort attempt to communicate with the peer + about why the connection is being terminated. + + + An endpoint can end a connection at any time. In particular, an endpoint MAY choose to + treat a stream error as a connection error. Endpoints SHOULD send a + GOAWAY frame when ending a connection, providing that circumstances + permit it. + +
    + +
    + + A stream error is an error related to a specific stream that does not affect processing + of other streams. + + + An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error + occurred. The RST_STREAM frame includes an error code that indicates the + type of error. + + + A RST_STREAM is the last frame that an endpoint can send on a stream. + The peer that sends the RST_STREAM frame MUST be prepared to receive any + frames that were sent or enqueued for sending by the remote peer. These frames can be + ignored, except where they modify connection state (such as the state maintained for + header compression, or flow control). + + + Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for + any stream. However, an endpoint MAY send additional RST_STREAM frames if + it receives frames on a closed stream after more than a round-trip time. This behavior + is permitted to deal with misbehaving implementations. + + + An endpoint MUST NOT send a RST_STREAM in response to an + RST_STREAM frame, to avoid looping. + +
    + +
    + + If the TCP connection is closed or reset while streams remain in open or half closed + states, then the endpoint MUST assume that those streams were abnormally interrupted and + could be incomplete. + +
    +
    + +
    + + HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide + additional services or alter any aspect of the protocol, within the limitations described + in this section. Extensions are effective only within the scope of a single HTTP/2 + connection. + + + Extensions are permitted to use new frame types, new + settings, or new error + codes. Registries are established for managing these extension points: frame types, settings and + error codes. + + + Implementations MUST ignore unknown or unsupported values in all extensible protocol + elements. Implementations MUST discard frames that have unknown or unsupported types. + This means that any of these extension points can be safely used by extensions without + prior arrangement or negotiation. However, extension frames that appear in the middle of + a header block are not permitted; these MUST be treated + as a connection error of type + PROTOCOL_ERROR. + + + However, extensions that could change the semantics of existing protocol components MUST + be negotiated before being used. For example, an extension that changes the layout of the + HEADERS frame cannot be used until the peer has given a positive signal + that this is acceptable. In this case, it could also be necessary to coordinate when the + revised layout comes into effect. Note that treating any frame other than + DATA frames as flow controlled is such a change in semantics, and can only + be done through negotiation. + + + This document doesn't mandate a specific method for negotiating the use of an extension, + but notes that a setting could be used for that + purpose. If both peers set a value that indicates willingness to use the extension, then + the extension can be used. If a setting is used for extension negotiation, the initial + value MUST be defined so that the extension is initially disabled. + +
    +
    + +
    + + This specification defines a number of frame types, each identified by a unique 8-bit type + code. Each frame type serves a distinct purpose either in the establishment and management + of the connection as a whole, or of individual streams. + + + The transmission of specific frame types can alter the state of a connection. If endpoints + fail to maintain a synchronized view of the connection state, successful communication + within the connection will no longer be possible. Therefore, it is important that endpoints + have a shared comprehension of how the state is affected by the use any given frame. + + +
    + + DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated + with a stream. One or more DATA frames are used, for instance, to carry HTTP request or + response payloads. + + + DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to + obscure the size of messages. + +
    + +
    + + The DATA frame contains the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is optional and is only present if the PADDED flag is set. + + + Application data. The amount of data is the remainder of the frame payload after + subtracting the length of the other fields that are present. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The DATA frame defines the following flags: + + + Bit 1 being set indicates that this frame is the last that the endpoint will send for + the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. + + + Bit 4 being set indicates that the Pad Length field and any padding that it describes + is present. + + + + + DATA frames MUST be associated with a stream. If a DATA frame is received whose stream + identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + DATA frames are subject to flow control and can only be sent when a stream is in the + "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow + control, including Pad Length and Padding fields if present. If a DATA frame is received + whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond + with a stream error of type + STREAM_CLOSED. + + + The total number of padding octets is determined by the value of the Pad Length field. If + the length of the padding is greater than the length of the frame payload, the recipient + MUST treat this as a connection error of + type PROTOCOL_ERROR. + + + A frame can be increased in size by one octet by including a Pad Length field with a + value of zero. + + + + + Padding is a security feature; see . + +
    + +
    + + The HEADERS frame (type=0x1) is used to open a stream, + and additionally carries a header block fragment. HEADERS frames can be sent on a stream + in the "open" or "half closed (remote)" states. + +
    + +
    + + The HEADERS frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. + + + A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. + + + An 8-bit weight for the stream, see . Add one to the + value to obtain a weight between 1 and 256. This field is only present if the + PRIORITY flag is set. + + + A header block fragment. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The HEADERS frame defines the following flags: + + + + Bit 1 being set indicates that the header block is + the last that the endpoint will send for the identified stream. Setting this flag + causes the stream to enter one of "half closed" + states. + + + A HEADERS frame carries the END_STREAM flag that signals the end of a stream. + However, a HEADERS frame with the END_STREAM flag set can be followed by + CONTINUATION frames on the same stream. Logically, the + CONTINUATION frames are part of the HEADERS frame. + + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A HEADERS frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the + receipt of any other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight + fields are present; see . + + + + + + + The payload of a HEADERS frame contains a header block + fragment. A header block that does not fit within a HEADERS frame is continued in + a CONTINUATION frame. + + + + HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose + stream identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + The HEADERS frame changes the connection state as described in . + + + + The HEADERS frame includes optional padding. Padding fields and flags are identical to + those defined for DATA frames. + + + Prioritization information in a HEADERS frame is logically equivalent to a separate + PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in + stream prioritization when new streams are created. Priorization fields in HEADERS frames + subsequent to the first on a stream reprioritize the + stream. + +
    + +
    + + The PRIORITY frame (type=0x2) specifies the sender-advised + priority of a stream. It can be sent at any time for an existing stream, including + closed streams. This enables reprioritization of existing streams. + +
    + +
    + + The payload of a PRIORITY frame contains the following fields: + + + A single bit flag indicates that the stream dependency is exclusive, see . + + + A 31-bit stream identifier for the stream that this stream depends on, see . + + + An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. + + + + + + The PRIORITY frame does not define any flags. + + + + The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received + with a stream identifier of 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", + "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be + sent between consecutive frames that comprise a single header + block. Note that this frame could arrive after processing or frame sending has + completed, which would cause it to have no effect on the current stream. For a stream + that is in the "half closed (remote)" or "closed" - state, this frame can only affect + processing of the current stream and not frame transmission. + + + The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. + This allows for the reprioritization of a group of dependent streams by altering the + priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a + closed stream risks being ignored due to the peer having discarded priority state + information for that stream. + +
    + +
    + + The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by + the initiator of a stream, it indicates that they wish to cancel the stream or that an + error condition has occurred. When sent by the receiver of a stream, it indicates that + either the receiver is rejecting the stream, requesting that the stream be cancelled, or + that an error condition has occurred. + +
    + +
    + + + The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being + terminated. + + + + The RST_STREAM frame does not define any flags. + + + + The RST_STREAM frame fully terminates the referenced stream and causes it to enter the + closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send + additional frames for that stream, with the exception of PRIORITY. However, + after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process + additional frames sent on the stream that might have been sent by the peer prior to the + arrival of the RST_STREAM. + + + + RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received + with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + + + RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM + frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + +
    + +
    + + The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints + communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is + also used to acknowledge the receipt of those parameters. Individually, a SETTINGS + parameter can also be referred to as a "setting". + + + SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, + which are used by the receiving peer. Different values for the same parameter can be + advertised by each peer. For example, a client might set a high initial flow control + window, whereas a server might set a lower value to conserve resources. + + + + A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be + sent at any other time by either endpoint over the lifetime of the connection. + Implementations MUST support all of the parameters defined by this specification. + + + + Each parameter in a SETTINGS frame replaces any existing value for that parameter. + Parameters are processed in the order in which they appear, and a receiver of a SETTINGS + frame does not need to maintain any state other than the current value of its + parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by + a receiver. + + + SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS + frame defines the following flag: + + + Bit 1 being set indicates that this frame acknowledges receipt and application of the + peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST + be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value + other than 0 MUST be treated as a connection + error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. + + + + + SETTINGS frames always apply to a connection, never a single stream. The stream + identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS + frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond + with a connection error of type + PROTOCOL_ERROR. + + + The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame + MUST be treated as a connection error of type + PROTOCOL_ERROR. + + +
    + + The payload of a SETTINGS frame consists of zero or more parameters, each consisting of + an unsigned 16-bit setting identifier and an unsigned 32-bit value. + + +
    + +
    +
    + +
    + + The following parameters are defined: + + + + Allows the sender to inform the remote endpoint of the maximum size of the header + compression table used to decode header blocks, in octets. The encoder can select + any size equal to or less than this value by using signaling specific to the + header compression format inside a header block. The initial value is 4,096 + octets. + + + + + This setting can be use to disable server + push. An endpoint MUST NOT send a PUSH_PROMISE frame if it + receives this parameter set to a value of 0. An endpoint that has both set this + parameter to 0 and had it acknowledged MUST treat the receipt of a + PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + The initial value is 1, which indicates that server push is permitted. Any value + other than 0 or 1 MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + Indicates the maximum number of concurrent streams that the sender will allow. + This limit is directional: it applies to the number of streams that the sender + permits the receiver to create. Initially there is no limit to this value. It is + recommended that this value be no smaller than 100, so as to not unnecessarily + limit parallelism. + + + A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special + by endpoints. A zero value does prevent the creation of new streams, however this + can also happen for any limit that is exhausted with active streams. Servers + SHOULD only set a zero value for short durations; if a server does not wish to + accept requests, closing the connection could be preferable. + + + + + Indicates the sender's initial window size (in octets) for stream level flow + control. The initial value is 216-1 (65,535) octets. + + + This setting affects the window size of all streams, including existing streams, + see . + + + Values above the maximum flow control window size of 231-1 MUST + be treated as a connection error of + type FLOW_CONTROL_ERROR. + + + + + Indicates the size of the largest frame payload that the sender is willing to + receive, in octets. + + + The initial value is 214 (16,384) octets. The value advertised by + an endpoint MUST be between this initial value and the maximum allowed frame size + (224-1 or 16,777,215 octets), inclusive. Values outside this range + MUST be treated as a connection error + of type PROTOCOL_ERROR. + + + + + This advisory setting informs a peer of the maximum size of header list that the + sender is prepared to accept, in octets. The value is based on the uncompressed + size of header fields, including the length of the name and value in octets plus + an overhead of 32 octets for each header field. + + + For any given request, a lower limit than what is advertised MAY be enforced. The + initial value of this setting is unlimited. + + + + + + An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier + MUST ignore that setting. + +
    + +
    + + Most values in SETTINGS benefit from or require an understanding of when the peer has + received and applied the changed parameter values. In order to provide + such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag + is not set MUST apply the updated parameters as soon as possible upon receipt. + + + The values in the SETTINGS frame MUST be processed in the order they appear, with no + other frame processing between values. Unsupported parameters MUST be ignored. Once + all values have been processed, the recipient MUST immediately emit a SETTINGS frame + with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender + of the altered parameters can rely on the setting having been applied. + + + If the sender of a SETTINGS frame does not receive an acknowledgement within a + reasonable amount of time, it MAY issue a connection error of type + SETTINGS_TIMEOUT. + +
    +
    + +
    + + The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of + streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned + 31-bit identifier of the stream the endpoint plans to create along with a set of headers + that provide additional context for the stream. contains a + thorough description of the use of PUSH_PROMISE frames. + + +
    + +
    + + The PUSH_PROMISE frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single reserved bit. + + + An unsigned 31-bit integer that identifies the stream that is reserved by the + PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next + stream sent by the sender (see new stream + identifier). + + + A header block fragment containing request header + fields. + + + Padding octets. + + + + + + The PUSH_PROMISE frame defines the following flags: + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any + other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + + + PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream + identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the + stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + Promised streams are not required to be used in the order they are promised. The + PUSH_PROMISE only reserves stream identifiers for later use. + + + + PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the + peer endpoint is set to 0. An endpoint that has set this setting and has received + acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a + RST_STREAM referencing the promised stream identifier back to the sender of + the PUSH_PROMISE. + + + + A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for + header compression. PUSH_PROMISE also reserves a stream for later use, causing the + promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a + stream unless that stream is either "open" or "half closed (remote)"; the sender MUST + ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST + be in the "idle" state). + + + Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream + state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a + stream that is neither "open" nor "half closed (local)" as a connection error of type + PROTOCOL_ERROR. However, an endpoint that has sent + RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that + might have been created before the RST_STREAM frame is received and + processed. + + + A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a + stream that is not currently in the "idle" state) as a connection error of type + PROTOCOL_ERROR. + + + + The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical + to those defined for DATA frames. + +
    + +
    + + The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the + sender, as well as determining whether an idle connection is still functional. PING + frames can be sent from any endpoint. + +
    + +
    + + + In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. + A sender can include any value it chooses and use those bytes in any fashion. + + + Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with + the ACK flag set in response, with an identical payload. PING responses SHOULD be given + higher priority than any other frame. + + + + The PING frame defines the following flags: + + + Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST + set this flag in PING responses. An endpoint MUST NOT respond to PING frames + containing this flag. + + + + + PING frames are not associated with any individual stream. If a PING frame is received + with a stream identifier field value other than 0x0, the recipient MUST respond with a + connection error of type + PROTOCOL_ERROR. + + + Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type + FRAME_SIZE_ERROR. + + +
    + +
    + + The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this + connection. GOAWAY can be sent by either the client or the server. Once sent, the sender + will ignore frames sent on any new streams with identifiers higher than the included last + stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the + connection, although a new connection can be established for new streams. + + + The purpose of this frame is to allow an endpoint to gracefully stop accepting new + streams, while still finishing processing of previously established streams. This enables + administrative actions, like server maintainance. + + + There is an inherent race condition between an endpoint starting new streams and the + remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream + identifier of the last peer-initiated stream which was or might be processed on the + sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, + the identified stream is the highest numbered stream initiated by the client. + + + If the receiver of the GOAWAY has sent data on streams with a higher stream identifier + than what is indicated in the GOAWAY frame, those streams are not or will not be + processed. The receiver of the GOAWAY frame can treat the streams as though they had + never been created at all, thereby allowing those streams to be retried later on a new + connection. + + + Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote + can know whether a stream has been partially processed or not. For example, if an HTTP + client sends a POST at the same time that a server closes a connection, the client cannot + know if the server started to process that POST request if the server does not send a + GOAWAY frame to indicate what streams it might have acted on. + + + An endpoint might choose to close a connection without sending GOAWAY for misbehaving + peers. + + +
    + +
    + + The GOAWAY frame does not define any flags. + + + The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat + a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type + PROTOCOL_ERROR. + + + The last stream identifier in the GOAWAY frame contains the highest numbered stream + identifier for which the sender of the GOAWAY frame might have taken some action on, or + might yet take action on. All streams up to and including the identified stream might + have been processed in some way. The last stream identifier can be set to 0 if no streams + were processed. + + + In this context, "processed" means that some data from the stream was passed to some + higher layer of software that might have taken some action as a result. + + + If a connection terminates without a GOAWAY frame, the last stream identifier is + effectively the highest possible stream identifier. + + + On streams with lower or equal numbered identifiers that were not closed completely prior + to the connection being closed, re-attempting requests, transactions, or any protocol + activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or + DELETE. Any protocol activity that uses higher numbered streams can be safely retried + using a new connection. + + + Activity on streams numbered lower or equal to the last stream identifier might still + complete successfully. The sender of a GOAWAY frame might gracefully shut down a + connection by sending a GOAWAY frame, maintaining the connection in an open state until + all in-progress streams complete. + + + An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an + endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could + subsequently encounter an condition that requires immediate termination of the connection. + The last stream identifier from the last GOAWAY frame received indicates which streams + could have been acted upon. Endpoints MUST NOT increase the value they send in the last + stream identifier, since the peers might already have retried unprocessed requests on + another connection. + + + A client that is unable to retry requests loses all requests that are in flight when the + server closes the connection. This is especially true for intermediaries that might + not be serving clients using HTTP/2. A server that is attempting to gracefully shut down + a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to + 231-1 and a NO_ERROR code. This signals to the client that + a shutdown is imminent and that no further requests can be initiated. After waiting at + least one round trip time, the server can send another GOAWAY frame with an updated last + stream identifier. This ensures that a connection can be cleanly shut down without losing + requests. + + + + After sending a GOAWAY frame, the sender can discard frames for streams with identifiers + higher than the identified last stream. However, any frames that alter connection state + cannot be completely ignored. For instance, HEADERS, + PUSH_PROMISE and CONTINUATION frames MUST be minimally + processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow + control window. Failure to process these frames can cause flow control or header + compression state to become unsynchronized. + + + + The GOAWAY frame also contains a 32-bit error code that + contains the reason for closing the connection. + + + Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug + data is intended for diagnostic purposes only and carries no semantic value. Debug + information could contain security- or privacy-sensitive data. Logged or otherwise + persistently stored debug data MUST have adequate safeguards to prevent unauthorized + access. + +
    + +
    + + The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. + + + Flow control operates at two levels: on each individual stream and on the entire + connection. + + + Both types of flow control are hop-by-hop; that is, only between the two endpoints. + Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. + However, throttling of data transfer by any receiver can indirectly cause the propagation + of flow control information toward the original sender. + + + Flow control only applies to frames that are identified as being subject to flow control. + Of the frame types defined in this document, this includes only DATA frames. + Frames that are exempt from flow control MUST be accepted and processed, unless the + receiver is unable to assign resources to handling the frame. A receiver MAY respond with + a stream error or connection error of type + FLOW_CONTROL_ERROR if it is unable to accept a frame. + +
    + +
    + + The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer + indicating the number of octets that the sender can transmit in addition to the existing + flow control window. The legal range for the increment to the flow control window is 1 to + 231-1 (0x7fffffff) octets. + + + The WINDOW_UPDATE frame does not define any flags. + + + The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the + former case, the frame's stream identifier indicates the affected stream; in the latter, + the value "0" indicates that the entire connection is the subject of the frame. + + + A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window + increment of 0 as a stream error of type + PROTOCOL_ERROR; errors on the connection flow control window MUST be + treated as a connection error. + + + WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. + This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" + or "closed" stream. A receiver MUST NOT treat this as an error, see . + + + A receiver that receives a flow controlled frame MUST always account for its contribution + against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the + frame is in error. Since the sender counts the frame toward the flow control window, if + the receiver does not, the flow control window at sender and receiver can become + different. + + +
    + + Flow control in HTTP/2 is implemented using a window kept by each sender on every + stream. The flow control window is a simple integer value that indicates how many octets + of data the sender is permitted to transmit; as such, its size is a measure of the + buffering capacity of the receiver. + + + Two flow control windows are applicable: the stream flow control window and the + connection flow control window. The sender MUST NOT send a flow controlled frame with a + length that exceeds the space available in either of the flow control windows advertised + by the receiver. Frames with zero length with the END_STREAM flag set (that is, an + empty DATA frame) MAY be sent if there is no available space in either + flow control window. + + + For flow control calculations, the 9 octet frame header is not counted. + + + After sending a flow controlled frame, the sender reduces the space available in both + windows by the length of the transmitted frame. + + + The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up + space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream + and connection level flow control windows. + + + A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the + amount specified in the frame. + + + A sender MUST NOT allow a flow control window to exceed 231-1 octets. + If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this + maximum it MUST terminate either the stream or the connection, as appropriate. For + streams, the sender sends a RST_STREAM with the error code of + FLOW_CONTROL_ERROR code; for the connection, a GOAWAY + frame with a FLOW_CONTROL_ERROR code. + + + Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are + completely asynchronous with respect to each other. This property allows a receiver to + aggressively update the window size kept by the sender to prevent streams from stalling. + +
    + +
    + + When an HTTP/2 connection is first established, new streams are created with an initial + flow control window size of 65,535 octets. The connection flow control window is 65,535 + octets. Both endpoints can adjust the initial window size for new streams by including + a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS + frame that forms part of the connection preface. The connection flow control window can + only be changed using WINDOW_UPDATE frames. + + + Prior to receiving a SETTINGS frame that sets a value for + SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default + initial window size when sending flow controlled frames. Similarly, the connection flow + control window is set to the default initial window size until a WINDOW_UPDATE frame is + received. + + + A SETTINGS frame can alter the initial flow control window size for all + current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, + a receiver MUST adjust the size of all stream flow control windows that it maintains by + the difference between the new value and the old value. + + + A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in + a flow control window to become negative. A sender MUST track the negative flow control + window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE + frames that cause the flow control window to become positive. + + + For example, if the client sends 60KB immediately on connection establishment, and the + server sets the initial window size to be 16KB, the client will recalculate the + available flow control window to be -44KB on receipt of the SETTINGS + frame. The client retains a negative flow control window until WINDOW_UPDATE frames + restore the window to being positive, after which the client can resume sending. + + + A SETTINGS frame cannot alter the connection flow control window. + + + An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that + causes any flow control window to exceed the maximum size as a connection error of type + FLOW_CONTROL_ERROR. + +
    + +
    + + A receiver that wishes to use a smaller flow control window than the current size can + send a new SETTINGS frame. However, the receiver MUST be prepared to + receive data that exceeds this window size, since the sender might send data that + exceeds the lower limit prior to processing the SETTINGS frame. + + + After sending a SETTINGS frame that reduces the initial flow control window size, a + receiver has two options for handling streams that exceed flow control limits: + + + The receiver can immediately send RST_STREAM with + FLOW_CONTROL_ERROR error code for the affected streams. + + + The receiver can accept the streams and tolerate the resulting head of line + blocking, sending WINDOW_UPDATE frames as it consumes data. + + + +
    +
    + +
    + + The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can + be sent on an existing stream, as long as the preceding frame is on the same stream and is + a HEADERS, PUSH_PROMISE or CONTINUATION frame without the + END_HEADERS flag set. + + +
    + +
    + + The CONTINUATION frame payload contains a header block + fragment. + + + + The CONTINUATION frame defines the following flag: + + + + Bit 3 being set indicates that this frame ends a header + block. + + + If the END_HEADERS bit is not set, this frame MUST be followed by another + CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or + a frame on a different stream as a connection + error of type PROTOCOL_ERROR. + + + + + + + The CONTINUATION frame changes the connection state as defined in . + + + + CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received + whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. + + + + A CONTINUATION frame MUST be preceded by a HEADERS, + PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A + recipient that observes violation of this rule MUST respond with a connection error of type + PROTOCOL_ERROR. + +
    +
    + +
    + + Error codes are 32-bit fields that are used in RST_STREAM and + GOAWAY frames to convey the reasons for the stream or connection error. + + + + Error codes share a common code space. Some error codes apply only to either streams or the + entire connection and have no defined semantics in the other context. + + + + The following error codes are defined: + + + The associated condition is not as a result of an error. For example, a + GOAWAY might include this code to indicate graceful shutdown of a + connection. + + + The endpoint detected an unspecific protocol error. This error is for use when a more + specific error code is not available. + + + The endpoint encountered an unexpected internal error. + + + The endpoint detected that its peer violated the flow control protocol. + + + The endpoint sent a SETTINGS frame, but did not receive a response in a + timely manner. See Settings Synchronization. + + + The endpoint received a frame after a stream was half closed. + + + The endpoint received a frame with an invalid size. + + + The endpoint refuses the stream prior to performing any application processing, see + for details. + + + Used by the endpoint to indicate that the stream is no longer needed. + + + The endpoint is unable to maintain the header compression context for the connection. + + + The connection established in response to a CONNECT + request was reset or abnormally closed. + + + The endpoint detected that its peer is exhibiting a behavior that might be generating + excessive load. + + + The underlying transport has properties that do not meet minimum security + requirements (see ). + + + + + Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be + treated by an implementation as being equivalent to INTERNAL_ERROR. + +
    + +
    + + HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means + that, from the application perspective, the features of the protocol are largely + unchanged. To achieve this, all request and response semantics are preserved, although the + syntax of conveying those semantics has changed. + + + Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax + and Routing , such as the HTTP and HTTPS URI schemes, are also + applicable in HTTP/2, but the expression of those semantics for this protocol are defined + in the sections below. + + +
    + + A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on + the same stream as the request. + + + An HTTP message (request or response) consists of: + + + for a response only, zero or more HEADERS frames (each followed by zero + or more CONTINUATION frames) containing the message headers of + informational (1xx) HTTP responses (see and ), + and + + + one HEADERS frame (followed by zero or more CONTINUATION + frames) containing the message headers (see ), and + + + zero or more DATA frames containing the message payload (see ), and + + + optionally, one HEADERS frame, followed by zero or more + CONTINUATION frames containing the trailer-part, if present (see ). + + + The last frame in the sequence bears an END_STREAM flag, noting that a + HEADERS frame bearing the END_STREAM flag can be followed by + CONTINUATION frames that carry any remaining portions of the header block. + + + Other frames (from any stream) MUST NOT occur between either HEADERS frame + and any CONTINUATION frames that might follow. + + + + Trailing header fields are carried in a header block that also terminates the stream. + That is, a sequence starting with a HEADERS frame, followed by zero or more + CONTINUATION frames, where the HEADERS frame bears an + END_STREAM flag. Header blocks after the first that do not terminate the stream are not + part of an HTTP request or response. + + + A HEADERS frame (and associated CONTINUATION frames) can + only appear at the start or end of a stream. An endpoint that receives a + HEADERS frame without the END_STREAM flag set after receiving a final + (non-informational) status code MUST treat the corresponding request or response as malformed. + + + + An HTTP request/response exchange fully consumes a single stream. A request starts with + the HEADERS frame that puts the stream into an "open" state. The request + ends with a frame bearing END_STREAM, which causes the stream to become "half closed + (local)" for the client and "half closed (remote)" for the server. A response starts with + a HEADERS frame and ends with a frame bearing END_STREAM, which places the + stream in the "closed" state. + + + +
    + + HTTP/2 removes support for the 101 (Switching Protocols) informational status code + (). + + + The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. + Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate + their use (see ). + +
    + +
    + + HTTP header fields carry information as a series of key-value pairs. For a listing of + registered HTTP headers, see the Message Header Field Registry maintained at . + + +
    + + While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the + status code for the response, HTTP/2 uses special pseudo-header fields beginning with + ':' character (ASCII 0x3a) for this purpose. + + + Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate + pseudo-header fields other than those defined in this document. + + + Pseudo-header fields are only valid in the context in which they are defined. + Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header + fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST + NOT appear in trailers. Endpoints MUST treat a request or response that contains + undefined or invalid pseudo-header fields as malformed. + + + Just as in HTTP/1.x, header field names are strings of ASCII characters that are + compared in a case-insensitive fashion. However, header field names MUST be converted + to lowercase prior to their encoding in HTTP/2. A request or response containing + uppercase header field names MUST be treated as malformed. + + + All pseudo-header fields MUST appear in the header block before regular header fields. + Any request or response that contains a pseudo-header field that appears in a header + block after a regular header field MUST be treated as malformed. + +
    + +
    + + HTTP/2 does not use the Connection header field to + indicate connection-specific header fields; in this protocol, connection-specific + metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message + containing connection-specific header fields; any message containing + connection-specific header fields MUST be treated as malformed. + + + This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need + to remove any header fields nominated by the Connection header field, along with the + Connection header field itself. Such intermediaries SHOULD also remove other + connection-specific header fields, such as Keep-Alive, Proxy-Connection, + Transfer-Encoding and Upgrade, even if they are not nominated by Connection. + + + One exception to this is the TE header field, which MAY be present in an HTTP/2 + request, but when it is MUST NOT contain any value other than "trailers". + + + + + HTTP/2 purposefully does not support upgrade to another protocol. The handshake + methods described in are believed sufficient to + negotiate the use of alternative protocols. + + + +
    + +
    + + The following pseudo-header fields are defined for HTTP/2 requests: + + + + The :method pseudo-header field includes the HTTP + method (). + + + + + The :scheme pseudo-header field includes the scheme + portion of the target URI (). + + + :scheme is not restricted to http and https schemed URIs. A + proxy or gateway can translate requests for non-HTTP schemes, enabling the use + of HTTP to interact with non-HTTP services. + + + + + The :authority pseudo-header field includes the + authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http + or https schemed URIs. + + + To ensure that the HTTP/1.1 request line can be reproduced accurately, this + pseudo-header field MUST be omitted when translating from an HTTP/1.1 request + that has a request target in origin or asterisk form (see ). Clients that generate + HTTP/2 requests directly SHOULD use the :authority pseudo-header + field instead of the Host header field. An + intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by + copying the value of the :authority pseudo-header + field. + + + + + The :path pseudo-header field includes the path and + query parts of the target URI (the path-absolute + production from and optionally a '?' character + followed by the query production, see and ). A request in asterisk form includes the value '*' for the + :path pseudo-header field. + + + This pseudo-header field MUST NOT be empty for http + or https URIs; http or + https URIs that do not contain a path component + MUST include a value of '/'. The exception to this rule is an OPTIONS request + for an http or https + URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). + + + + + + All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory + pseudo-header fields is malformed. + + + HTTP/2 does not define a way to carry the version identifier that is included in the + HTTP/1.1 request line. + +
    + +
    + + For HTTP/2 responses, a single :status pseudo-header + field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all + responses, otherwise the response is malformed. + + + HTTP/2 does not define a way to carry the version or reason phrase that is included in + an HTTP/1.1 status line. + +
    + +
    + + The Cookie header field can carry a significant amount of + redundant data. + + + The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). + This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from + being separated into different name-value pairs. This can significantly reduce + compression efficiency as individual cookie-pairs are updated. + + + To allow for better compression efficiency, the Cookie header field MAY be split into + separate header fields, each with one or more cookie-pairs. If there are multiple + Cookie header fields after decompression, these MUST be concatenated into a single + octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") + before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a + generic HTTP server application. + +
    + + Therefore, the following two lists of Cookie header fields are semantically + equivalent. + + +
    +
    + +
    + + A malformed request or response is one that is an otherwise valid sequence of HTTP/2 + frames, but is otherwise invalid due to the presence of extraneous frames, prohibited + header fields, the absence of mandatory header fields, or the inclusion of uppercase + header field names. + + + A request or response that includes an entity body can include a content-length header field. A request or response is also + malformed if the value of a content-length header field + does not equal the sum of the DATA frame payload lengths that form the + body. A response that is defined to have no payload, as described in , can have a non-zero + content-length header field, even though no content is + included in DATA frames. + + + Intermediaries that process HTTP requests or responses (i.e., any intermediary not + acting as a tunnel) MUST NOT forward a malformed request or response. Malformed + requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. + + + For malformed requests, a server MAY send an HTTP response prior to closing or + resetting the stream. Clients MUST NOT accept a malformed response. Note that these + requirements are intended to protect against several types of common attacks against + HTTP; they are deliberately strict, because being permissive can expose + implementations to these vulnerabilities. + +
    +
    + +
    + + This section shows HTTP/1.1 requests and responses, with illustrations of equivalent + HTTP/2 requests and responses. + + + An HTTP GET request includes request header fields and no body and is therefore + transmitted as a single HEADERS frame, followed by zero or more + CONTINUATION frames containing the serialized block of request header + fields. The HEADERS frame in the following has both the END_HEADERS and + END_STREAM flags set; no CONTINUATION frames are sent: + + +
    + + END_STREAM + Accept: image/jpeg + END_HEADERS + :method = GET + :scheme = https + :path = /resource + host = example.org + accept = image/jpeg +]]> +
    + + + Similarly, a response that includes only response header fields is transmitted as a + HEADERS frame (again, followed by zero or more + CONTINUATION frames) containing the serialized block of response header + fields. + + +
    + + END_STREAM + Expires: Thu, 23 Jan ... + END_HEADERS + :status = 304 + etag = "xyzzy" + expires = Thu, 23 Jan ... +]]> +
    + + + An HTTP POST request that includes request header fields and payload data is transmitted + as one HEADERS frame, followed by zero or more + CONTINUATION frames containing the request header fields, followed by one + or more DATA frames, with the last CONTINUATION (or + HEADERS) frame having the END_HEADERS flag set and the final + DATA frame having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Type: image/jpeg - END_HEADERS + Content-Length: 123 :method = POST + :path = /resource + {binary data} :scheme = https + + CONTINUATION + + END_HEADERS + content-type = image/jpeg + host = example.org + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> + + Note that data contributing to any given header field could be spread between header + block fragments. The allocation of header fields to frames in this example is + illustrative only. + +
    + + + A response that includes header fields and payload data is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames, followed by one or more DATA frames, with the last + DATA frame in the sequence having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Length: 123 + END_HEADERS + :status = 200 + {binary data} content-type = image/jpeg + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> +
    + + + Trailing header fields are sent as a header block after both the request or response + header block and all the DATA frames have been sent. The + HEADERS frame starting the trailers header block has the END_STREAM flag + set. + + +
    + - END_STREAM + Transfer-Encoding: chunked + END_HEADERS + Trailer: Foo :status = 200 + content-length = 123 + 123 content-type = image/jpeg + {binary data} trailer = Foo + 0 + Foo: bar DATA + - END_STREAM + {binary data} + + HEADERS + + END_STREAM + + END_HEADERS + foo = bar +]]> +
    + + +
    + + An informational response using a 1xx status code other than 101 is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames: + + - END_STREAM + + END_HEADERS + :status = 103 + extension-field = bar +]]> +
    +
    + +
    + + In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error + occurs, because there is no means to determine the nature of the error. It is possible + that some server processing occurred prior to the error, which could result in + undesirable effects if the request were reattempted. + + + HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has + not been processed: + + + The GOAWAY frame indicates the highest stream number that might have + been processed. Requests on streams with higher numbers are therefore guaranteed to + be safe to retry. + + + The REFUSED_STREAM error code can be included in a + RST_STREAM frame to indicate that the stream is being closed prior to + any processing having occurred. Any request that was sent on the reset stream can + be safely retried. + + + + + Requests that have not been processed have not failed; clients MAY automatically retry + them, even those with non-idempotent methods. + + + A server MUST NOT indicate that a stream has not been processed unless it can guarantee + that fact. If frames that are on a stream are passed to the application layer for any + stream, then REFUSED_STREAM MUST NOT be used for that stream, and a + GOAWAY frame MUST include a stream identifier that is greater than or + equal to the given stream identifier. + + + In addition to these mechanisms, the PING frame provides a way for a + client to easily test a connection. Connections that remain idle can become broken as + some middleboxes (for instance, network address translators, or load balancers) silently + discard connection bindings. The PING frame allows a client to safely + test whether a connection is still active without sending a request. + +
    +
    + +
    + + HTTP/2 allows a server to pre-emptively send (or "push") responses (along with + corresponding "promised" requests) to a client in association with a previous + client-initiated request. This can be useful when the server knows the client will need + to have those responses available in order to fully process the response to the original + request. + + + + Pushing additional message exchanges in this fashion is optional, and is negotiated + between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set + to 0 to indicate that server push is disabled. + + + Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a + promised request that is not cacheable, unsafe or that includes a request body MUST + reset the stream with a stream error of type + PROTOCOL_ERROR. + + + Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP + cache. Pushed responses are considered successfully validated on the origin server (e.g., + if the "no-cache" cache response directive is present) while the stream identified by the + promised stream ID is still open. + + + Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY + be made available to the application separately. + + + An intermediary can receive pushes from the server and choose not to forward them on to + the client. In other words, how to make use of the pushed information is up to that + intermediary. Equally, the intermediary might choose to make additional pushes to the + client, without any action taken by the server. + + + A client cannot push. Thus, servers MUST treat the receipt of a + PUSH_PROMISE frame as a connection + error of type PROTOCOL_ERROR. Clients MUST reject any attempt to + change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating + the message as a connection error of type + PROTOCOL_ERROR. + + +
    + + Server push is semantically equivalent to a server responding to a request; however, in + this case that request is also sent by the server, as a PUSH_PROMISE + frame. + + + The PUSH_PROMISE frame includes a header block that contains a complete + set of request header fields that the server attributes to the request. It is not + possible to push a response to a request that includes a request body. + + + + Pushed responses are always associated with an explicit request from the client. The + PUSH_PROMISE frames sent by the server are sent on that explicit + request's stream. The PUSH_PROMISE frame also includes a promised stream + identifier, chosen from the stream identifiers available to the server (see ). + + + + The header fields in PUSH_PROMISE and any subsequent + CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in + the :method header field that is safe and cacheable. If a + client receives a PUSH_PROMISE that does not include a complete and valid + set of header fields, or the :method header field identifies + a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. + + + + The server SHOULD send PUSH_PROMISE () + frames prior to sending any frames that reference the promised responses. This avoids a + race where clients issue requests prior to receiving any PUSH_PROMISE + frames. + + + For example, if the server receives a request for a document containing embedded links + to multiple image files, and the server chooses to push those additional images to the + client, sending push promises before the DATA frames that contain the + image links ensures that the client is able to see the promises before discovering + embedded links. Similarly, if the server pushes responses referenced by the header block + (for instance, in Link header fields), sending the push promises before sending the + header block ensures that clients do not request them. + + + + PUSH_PROMISE frames MUST NOT be sent by the client. + + + PUSH_PROMISE frames can be sent by the server in response to any + client-initiated stream, but the stream MUST be in either the "open" or "half closed + (remote)" state with respect to the server. PUSH_PROMISE frames are + interspersed with the frames that comprise a response, though they cannot be + interspersed with HEADERS and CONTINUATION frames that + comprise a single header block. + + + Sending a PUSH_PROMISE frame creates a new stream and puts the stream + into the “reserved (local)†state for the server and the “reserved (remote)†state for + the client. + +
    + +
    + + After sending the PUSH_PROMISE frame, the server can begin delivering the + pushed response as a response on a server-initiated + stream that uses the promised stream identifier. The server uses this stream to + transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" + to the client after the initial HEADERS frame is sent. + + + + Once a client receives a PUSH_PROMISE frame and chooses to accept the + pushed response, the client SHOULD NOT issue any requests for the promised response + until after the promised stream has closed. + + + + If the client determines, for any reason, that it does not wish to receive the pushed + response from the server, or if the server takes too long to begin sending the promised + response, the client can send an RST_STREAM frame, using either the + CANCEL or REFUSED_STREAM codes, and referencing the pushed + stream's identifier. + + + A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the + number of responses that can be concurrently pushed by a server. Advertising a + SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by + preventing the server from creating the necessary streams. This does not prohibit a + server from sending PUSH_PROMISE frames; clients need to reset any + promised streams that are not wanted. + + + + Clients receiving a pushed response MUST validate that either the server is + authoritative (see ), or the proxy that provided the pushed + response is configured for the corresponding request. For example, a server that offers + a certificate for only the example.com DNS-ID or Common Name + is not permitted to push a response for https://www.example.org/doc. + + + The response for a PUSH_PROMISE stream begins with a + HEADERS frame, which immediately puts the stream into the “half closed + (remote)†state for the server and “half closed (local)†state for the client, and ends + with a frame bearing END_STREAM, which places the stream in the "closed" state. + + + The client never sends a frame with the END_STREAM flag for a server push. + + + +
    + +
    + +
    + + In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. + CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin + server for the purposes of interacting with https resources. + + + In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to + a remote host, for similar purposes. The HTTP header field mapping works as defined in + Request Header Fields, with a few + differences. Specifically: + + + The :method header field is set to CONNECT. + + + The :scheme and :path header + fields MUST be omitted. + + + The :authority header field contains the host and port to + connect to (equivalent to the authority-form of the request-target of CONNECT + requests, see ). + + + + + A proxy that supports CONNECT establishes a TCP connection to + the server identified in the :authority header field. Once + this connection is successfully established, the proxy sends a HEADERS + frame containing a 2xx series status code to the client, as defined in . + + + After the initial HEADERS frame sent by each peer, all subsequent + DATA frames correspond to data sent on the TCP connection. The payload of + any DATA frames sent by the client is transmitted by the proxy to the TCP + server; data received from the TCP server is assembled into DATA frames by + the proxy. Frame types other than DATA or stream management frames + (RST_STREAM, WINDOW_UPDATE, and PRIORITY) + MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. + + + The TCP connection can be closed by either peer. The END_STREAM flag on a + DATA frame is treated as being equivalent to the TCP FIN bit. A client is + expected to send a DATA frame with the END_STREAM flag set after receiving + a frame bearing the END_STREAM flag. A proxy that receives a DATA frame + with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP + segment. A proxy that receives a TCP segment with the FIN bit set sends a + DATA frame with the END_STREAM flag set. Note that the final TCP segment + or DATA frame could be empty. + + + A TCP connection error is signaled with RST_STREAM. A proxy treats any + error in the TCP connection, which includes receiving a TCP segment with the RST bit set, + as a stream error of type + CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the + RST bit set if it detects an error with the stream or the HTTP/2 connection. + +
    +
    + +
    + + This section outlines attributes of the HTTP protocol that improve interoperability, reduce + exposure to known security vulnerabilities, or reduce the potential for implementation + variation. + + +
    + + HTTP/2 connections are persistent. For best performance, it is expected clients will not + close connections until it is determined that no further communication with a server is + necessary (for example, when a user navigates away from a particular web page), or until + the server closes the connection. + + + Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, + where host is derived from a URI, a selected alternative + service, or a configured proxy. + + + A client can create additional connections as replacements, either to replace connections + that are near to exhausting the available stream + identifier space, to refresh the keying material for a TLS connection, or to + replace connections that have encountered errors. + + + A client MAY open multiple connections to the same IP address and TCP port using different + Server Name Indication values or to provide different TLS + client certificates, but SHOULD avoid creating multiple connections with the same + configuration. + + + Servers are encouraged to maintain open connections for as long as possible, but are + permitted to terminate idle connections if necessary. When either endpoint chooses to + close the transport-layer TCP connection, the terminating endpoint SHOULD first send a + GOAWAY () frame so that both endpoints can reliably + determine whether previously sent frames have been processed and gracefully complete or + terminate any necessary remaining tasks. + + +
    + + Connections that are made to an origin servers, either directly or through a tunnel + created using the CONNECT method MAY be reused for + requests with multiple different URI authority components. A connection can be reused + as long as the origin server is authoritative. For + http resources, this depends on the host having resolved to + the same IP address. + + + For https resources, connection reuse additionally depends + on having a certificate that is valid for the host in the URI. An origin server might + offer a certificate with multiple subjectAltName attributes, + or names with wildcards, one of which is valid for the authority in the URI. For + example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for + requests to URIs starting with https://a.example.com/ and + https://b.example.com/. + + + In some deployments, reusing a connection for multiple origins can result in requests + being directed to the wrong origin server. For example, TLS termination might be + performed by a middlebox that uses the TLS Server Name Indication + (SNI) extension to select an origin server. This means that it is possible + for clients to send confidential information to servers that might not be the intended + target for the request, even though the server is otherwise authoritative. + + + A server that does not wish clients to reuse connections can indicate that it is not + authoritative for a request by sending a 421 (Misdirected Request) status code in response + to the request (see ). + + + A client that is configured to use a proxy over HTTP/2 directs requests to that proxy + through a single connection. That is, all requests sent via a proxy reuse the + connection to the proxy. + +
    + +
    + + The 421 (Misdirected Request) status code indicates that the request was directed at a + server that is not able to produce a response. This can be sent by a server that is not + configured to produce responses for the combination of scheme and authority that are + included in the request URI. + + + Clients receiving a 421 (Misdirected Request) response from a server MAY retry the + request - whether the request method is idempotent or not - over a different connection. + This is possible if a connection is reused () or if an alternative + service is selected (). + + + This status code MUST NOT be generated by proxies. + + + A 421 response is cacheable by default; i.e., unless otherwise indicated by the method + definition or explicit cache controls (see ). + +
    +
    + +
    + + Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over + TLS. The general TLS usage guidance in SHOULD be followed, with + some additional restrictions that are specific to HTTP/2. + + + + An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on + feature set and cipher suite described in this section. Due to implementation + limitations, it might not be possible to fail TLS negotiation. An endpoint MUST + immediately terminate an HTTP/2 connection that does not meet these minimum requirements + with a connection error of type + INADEQUATE_SECURITY. + + +
    + + The TLS implementation MUST support the Server Name Indication + (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when + negotiating TLS. + + + The TLS implementation MUST disable compression. TLS compression can lead to the + exposure of information that would not otherwise be revealed . + Generic compression is unnecessary since HTTP/2 provides compression features that are + more aware of context and therefore likely to be more appropriate for use for + performance, security or other reasons. + + + The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS + renegotiation as a connection error of type + PROTOCOL_ERROR. Note that disabling renegotiation can result in + long-lived connections becoming unusable due to limits on the number of messages the + underlying cipher suite can encipher. + + + A client MAY use renegotiation to provide confidentiality protection for client + credentials offered in the handshake, but any renegotiation MUST occur prior to sending + the connection preface. A server SHOULD request a client certificate if it sees a + renegotiation request immediately after establishing a connection. + + + This effectively prevents the use of renegotiation in response to a request for a + specific protected resource. A future specification might provide a way to support this + use case. + +
    + +
    + + The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST + only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST + have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. + Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher + suites that use stream or block ciphers. Authenticated Encryption with Additional Data + (AEAD) modes, such as the Galois Counter Model (GCM) mode for + AES are acceptable. + + + The effect of these restrictions is that TLS 1.2 implementations could have + non-intersecting sets of available cipher suites, since these prevent the use of the + cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of + HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . + + + Clients MAY advertise support of cipher suites that are prohibited by the above + restrictions in order to allow for connection to servers that do not support HTTP/2. + This enables a fallback to protocols without these constraints without the additional + latency imposed by using a separate connection for fallback. + +
    +
    +
    + +
    +
    + + HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is + authoritative in providing a given response, see . This relies on local name resolution for the "http" + URI scheme, and the authenticated server identity for the "https" scheme (see ). + +
    + +
    + + In a cross-protocol attack, an attacker causes a client to initiate a transaction in one + protocol toward a server that understands a different protocol. An attacker might be able + to cause the transaction to appear as valid transaction in the second protocol. In + combination with the capabilities of the web context, this can be used to interact with + poorly protected servers in private networks. + + + Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient + protection against cross protocol attacks. ALPN provides a positive indication that a + server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based + protocols. + + + The encryption in TLS makes it difficult for attackers to control the data which could be + used in a cross-protocol attack on a cleartext protocol. + + + The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. + The connection preface contains a string that is + designed to confuse HTTP/1.1 servers, but no special protection is offered for other + protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an + Upgrade header field in addition to the client connection preface could be exposed to a + cross-protocol attack. + +
    + +
    + + HTTP/2 header field names and values are encoded as sequences of octets with a length + prefix. This enables HTTP/2 to carry any string of octets as the name or value of a + header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 + directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might + exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal + header fields, extra header fields, or even new messages that are entirely falsified. + + + Header field names or values that contain characters not permitted by HTTP/1.1, including + carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an + intermediary, as stipulated in . + + + Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. + Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. + +
    + +
    + + Pushed responses do not have an explicit request from the client; the request + is provided by the server in the PUSH_PROMISE frame. + + + Caching responses that are pushed is possible based on the guidance provided by the origin + server in the Cache-Control header field. However, this can cause issues if a single + server hosts more than one tenant. For example, a server might offer multiple users each + a small portion of its URI space. + + + Where multiple tenants share space on the same server, that server MUST ensure that + tenants are not able to push representations of resources that they do not have authority + over. Failure to enforce this would allow a tenant to provide a representation that would + be served out of cache, overriding the actual representation that the authoritative tenant + provides. + + + Pushed responses for which an origin server is not authoritative (see + ) are never cached or used. + +
    + +
    + + An HTTP/2 connection can demand a greater commitment of resources to operate than a + HTTP/1.1 connection. The use of header compression and flow control depend on a + commitment of resources for storing a greater amount of state. Settings for these + features ensure that memory commitments for these features are strictly bounded. + + + The number of PUSH_PROMISE frames is not constrained in the same fashion. + A client that accepts server push SHOULD limit the number of streams it allows to be in + the "reserved (remote)" state. Excessive number of server push streams can be treated as + a stream error of type + ENHANCE_YOUR_CALM. + + + Processing capacity cannot be guarded as effectively as state capacity. + + + The SETTINGS frame can be abused to cause a peer to expend additional + processing time. This might be done by pointlessly changing SETTINGS parameters, setting + multiple undefined parameters, or changing the same setting multiple times in the same + frame. WINDOW_UPDATE or PRIORITY frames can be abused to + cause an unnecessary waste of resources. + + + Large numbers of small or empty frames can be abused to cause a peer to expend time + processing frame headers. Note however that some uses are entirely legitimate, such as + the sending of an empty DATA frame to end a stream. + + + Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. + + + Limits in SETTINGS parameters cannot be reduced instantaneously, which + leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In + particular, immediately after establishing a connection, limits set by a server are not + known to clients and could be exceeded without being an obvious protocol violation. + + + All these features - i.e., SETTINGS changes, small frames, header + compression - have legitimate uses. These features become a burden only when they are + used unnecessarily or to excess. + + + An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of + service attack. Implementations SHOULD track the use of these features and set limits on + their use. An endpoint MAY treat activity that is suspicious as a connection error of type + ENHANCE_YOUR_CALM. + + +
    + + A large header block can cause an implementation to + commit a large amount of state. Header fields that are critical for routing can appear + toward the end of a header block, which prevents streaming of header fields to their + ultimate destination. For this an other reasons, such as ensuring cache correctness, + means that an endpoint might need to buffer the entire header block. Since there is no + hard limit to the size of a header block, some endpoints could be forced commit a large + amount of available memory for header fields. + + + An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of + limits that might apply on the size of header blocks. This setting is only advisory, so + endpoints MAY choose to send header blocks that exceed this limit and risk having the + request or response being treated as malformed. This setting specific to a connection, + so any request or response could encounter a hop with a lower, unknown limit. An + intermediary can attempt to avoid this problem by passing on values presented by + different peers, but they are not obligated to do so. + + + A server that receives a larger header block than it is willing to handle can send an + HTTP 431 (Request Header Fields Too Large) status code . A + client can discard responses that it cannot process. The header block MUST be processed + to ensure a consistent connection state, unless the connection is closed. + +
    +
    + +
    + + HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover + secret data when it is compressed in the same context as data under attacker control. + + + There are demonstrable attacks on compression that exploit the characteristics of the web + (e.g., ). The attacker induces multiple requests containing + varying plaintext, observing the length of the resulting ciphertext in each, which + reveals a shorter length when a guess about the secret is correct. + + + Implementations communicating on a secure channel MUST NOT compress content that includes + both confidential and attacker-controlled data unless separate compression dictionaries + are used for each source of data. Compression MUST NOT be used if the source of data + cannot be reliably determined. Generic stream compression, such as that provided by TLS + MUST NOT be used with HTTP/2 (). + + + Further considerations regarding the compression of header fields are described in . + +
    + +
    + + Padding within HTTP/2 is not intended as a replacement for general purpose padding, such + as might be provided by TLS. Redundant padding could even be + counterproductive. Correct application can depend on having specific knowledge of the + data that is being padded. + + + To mitigate attacks that rely on compression, disabling or limiting compression might be + preferable to padding as a countermeasure. + + + Padding can be used to obscure the exact size of frame content, and is provided to + mitigate specific attacks within HTTP. For example, attacks where compressed content + includes both attacker-controlled plaintext and secret data (see for example, ). + + + Use of padding can result in less protection than might seem immediately obvious. At + best, padding only makes it more difficult for an attacker to infer length information by + increasing the number of frames an attacker has to observe. Incorrectly implemented + padding schemes can be easily defeated. In particular, randomized padding with a + predictable distribution provides very little protection; similarly, padding payloads to a + fixed size exposes information as payload sizes cross the fixed size boundary, which could + be possible if an attacker can control plaintext. + + + Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding + for HEADERS and PUSH_PROMISE frames. A valid reason for an + intermediary to change the amount of padding of frames is to improve the protections that + padding provides. + +
    + +
    + + Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions + of a single client or server over time. This includes the value of settings, the manner + in which flow control windows are managed, the way priorities are allocated to streams, + timing of reactions to stimulus, and handling of any optional features. + + + As far as this creates observable differences in behavior, they could be used as a basis + for fingerprinting a specific client, as defined in . + +
    +
    + +
    + + A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation + (ALPN) Protocol IDs" registry established in . + + + This document establishes a registry for frame types, settings, and error codes. These new + registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. + + + This document registers the HTTP2-Settings header field for + use in HTTP; and the 421 (Misdirected Request) status code. + + + This document registers the PRI method for use in HTTP, to avoid + collisions with the connection preface. + + +
    + + This document creates two registrations for the identification of HTTP/2 in the + "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . + + + The "h2" string identifies HTTP/2 when used over TLS: + + HTTP/2 over TLS + 0x68 0x32 ("h2") + This document + + + + The "h2c" string identifies HTTP/2 when used over cleartext TCP: + + HTTP/2 over TCP + 0x68 0x32 0x63 ("h2c") + This document + + +
    + +
    + + This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame + Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under + either of the "IETF Review" or "IESG Approval" policies for + values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for + experimental use. + + + New entries in this registry require the following information: + + + A name or label for the frame type. + + + The 8-bit code assigned to the frame type. + + + A reference to a specification that includes a description of the frame layout, + it's semantics and flags that the frame type uses, including any parts of the frame + that are conditionally present based on the value of flags. + + + + + The entries in the following table are registered by this document. + + + Frame Type + Code + Section + DATA0x0 + HEADERS0x1 + PRIORITY0x2 + RST_STREAM0x3 + SETTINGS0x4 + PUSH_PROMISE0x5 + PING0x6 + GOAWAY0x7 + WINDOW_UPDATE0x8 + CONTINUATION0x9 + +
    + +
    + + This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry + manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to + 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. + + + New registrations are advised to provide the following information: + + + A symbolic name for the setting. Specifying a setting name is optional. + + + The 16-bit code assigned to the setting. + + + An initial value for the setting. + + + An optional reference to a specification that describes the use of the setting. + + + + + An initial set of setting registrations can be found in . + + + Name + Code + Initial Value + Specification + HEADER_TABLE_SIZE + 0x14096 + ENABLE_PUSH + 0x21 + MAX_CONCURRENT_STREAMS + 0x3(infinite) + INITIAL_WINDOW_SIZE + 0x465535 + MAX_FRAME_SIZE + 0x516384 + MAX_HEADER_LIST_SIZE + 0x6(infinite) + + +
    + +
    + + This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" + registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the + "Expert Review" policy. + + + Registrations for error codes are required to include a description of the error code. An + expert reviewer is advised to examine new registrations for possible duplication with + existing error codes. Use of existing registrations is to be encouraged, but not + mandated. + + + New registrations are advised to provide the following information: + + + A name for the error code. Specifying an error code name is optional. + + + The 32-bit error code value. + + + A brief description of the error code semantics, longer if no detailed specification + is provided. + + + An optional reference for a specification that defines the error code. + + + + + The entries in the following table are registered by this document. + + + Name + Code + Description + Specification + NO_ERROR0x0 + Graceful shutdown + + PROTOCOL_ERROR0x1 + Protocol error detected + + INTERNAL_ERROR0x2 + Implementation fault + + FLOW_CONTROL_ERROR0x3 + Flow control limits exceeded + + SETTINGS_TIMEOUT0x4 + Settings not acknowledged + + STREAM_CLOSED0x5 + Frame received for closed stream + + FRAME_SIZE_ERROR0x6 + Frame size incorrect + + REFUSED_STREAM0x7 + Stream not processed + + CANCEL0x8 + Stream cancelled + + COMPRESSION_ERROR0x9 + Compression state not updated + + CONNECT_ERROR0xa + TCP connection error for CONNECT method + + ENHANCE_YOUR_CALM0xb + Processing capacity exceeded + + INADEQUATE_SECURITY0xc + Negotiated TLS parameters not acceptable + + + +
    + +
    + + This section registers the HTTP2-Settings header field in the + Permanent Message Header Field Registry. + + + HTTP2-Settings + + + http + + + standard + + + IETF + + + of this document + + + This header field is only used by an HTTP/2 client for Upgrade-based negotiation. + + + +
    + +
    + + This section registers the PRI method in the HTTP Method + Registry (). + + + PRI + + + No + + + No + + + of this document + + + This method is never used by an actual client. This method will appear to be used + when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection + preface. + + + +
    + +
    + + This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext + Transfer Protocol (HTTP) Status Code Registry (). + + + + + 421 + + + Misdirected Request + + + of this document + + + +
    + +
    + +
    + + This document includes substantial input from the following individuals: + + + Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin + Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin + Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY + contributors). + + + Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). + + + William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto + Peon, Rob Trace (Flow control). + + + Mike Bishop (Extensibility). + + + Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan + (Substantial editorial contributions). + + + Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. + + + Alexey Melnikov was an editor of this document during 2013. + + + A substantial proportion of Martin's contribution was supported by Microsoft during his + employment there. + + + +
    +
    + + + + + + HPACK - Header Compression for HTTP/2 + + + + + + + + + + + + Transmission Control Protocol + + + University of Southern California (USC)/Information Sciences + Institute + + + + + + + + + + + Key words for use in RFCs to Indicate Requirement Levels + + + Harvard University +
    sob@harvard.edu
    +
    + +
    + + +
    + + + + + HTTP Over TLS + + + + + + + + + + Uniform Resource Identifier (URI): Generic + Syntax + + + + + + + + + + + + The Base16, Base32, and Base64 Data Encodings + + + + + + + + + Guidelines for Writing an IANA Considerations Section in RFCs + + + + + + + + + + + Augmented BNF for Syntax Specifications: ABNF + + + + + + + + + + + The Transport Layer Security (TLS) Protocol Version 1.2 + + + + + + + + + + + Transport Layer Security (TLS) Extensions: Extension Definitions + + + + + + + + + + Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension + + + + + + + + + + + + + TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois + Counter Mode (GCM) + + + + + + + + + + + Digital Signature Standard (DSS) + + NIST + + + + + + + + + Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Range Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + World Wide Web Consortium +
    ylafon@w3.org
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Caching + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + Akamai +
    mnot@mnot.net
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Authentication + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + HTTP State Management Mechanism + + + + + +
    + + + + + + TCP Extensions for High Performance + + + + + + + + + + + + Transport Layer Security Protocol Compression Methods + + + + + + + + + Additional HTTP Status Codes + + + + + + + + + + + Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) + + + + + + + + + + + + + + + AES Galois Counter Mode (GCM) Cipher Suites for TLS + + + + + + + + + + + + HTML5 + + + + + + + + + + + Latest version available at + . + + + + + + + Talking to Yourself for Fun and Profit + + + + + + + + + + + + + + BREACH: Reviving the CRIME Attack + + + + + + + + + + + Registration Procedures for Message Header Fields + + Nine by Nine +
    GK-IETF@ninebynine.org
    +
    + + BEA Systems +
    mnot@pobox.com
    +
    + + HP Labs +
    JeffMogul@acm.org
    +
    + +
    + + +
    + + + + Recommendations for Secure Use of TLS and DTLS + + + + + + + + + + + + + + + + + + HTTP Alternative Services + + + Akamai + + + Mozilla + + + greenbytes + + + + + + +
    + +
    + + This section is to be removed by RFC Editor before publication. + + +
    + + Renamed Not Authoritative status code to Misdirected Request. + +
    + +
    + + Pseudo-header fields are now required to appear strictly before regular ones. + + + Restored 1xx series status codes, except 101. + + + Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting + to limit the damage. + + + Added a setting to advise peers of header set size limits. + + + Removed segments. + + + Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. + +
    + +
    + + Restored extensibility options. + + + Restricting TLS cipher suites to AEAD only. + + + Removing Content-Encoding requirements. + + + Permitting the use of PRIORITY after stream close. + + + Removed ALTSVC frame. + + + Removed BLOCKED frame. + + + Reducing the maximum padding size to 256 octets; removing padding from + CONTINUATION frames. + + + Removed per-frame GZIP compression. + +
    + +
    + + Added BLOCKED frame (at risk). + + + Simplified priority scheme. + + + Added DATA per-frame GZIP compression. + +
    + +
    + + Changed "connection header" to "connection preface" to avoid confusion. + + + Added dependency-based stream prioritization. + + + Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. + + + Adding missing padding to PUSH_PROMISE. + + + Integrate ALTSVC frame and supporting text. + + + Dropping requirement on "deflate" Content-Encoding. + + + Improving security considerations around use of compression. + +
    + +
    + + Adding padding for data frames. + + + Renumbering frame types, error codes, and settings. + + + Adding INADEQUATE_SECURITY error code. + + + Updating TLS usage requirements to 1.2; forbidding TLS compression. + + + Removing extensibility for frames and settings. + + + Changing setting identifier size. + + + Removing the ability to disable flow control. + + + Changing the protocol identification token to "h2". + + + Changing the use of :authority to make it optional and to allow userinfo in non-HTTP + cases. + + + Allowing split on 0x0 for Cookie. + + + Reserved PRI method in HTTP/1.1 to avoid possible future collisions. + +
    + +
    + + Added cookie crumbling for more efficient header compression. + + + Added header field ordering with the value-concatenation mechanism. + +
    + +
    + + Marked draft for implementation. + +
    + +
    + + Adding definition for CONNECT method. + + + Constraining the use of push to safe, cacheable methods with no request body. + + + Changing from :host to :authority to remove any potential confusion. + + + Adding setting for header compression table size. + + + Adding settings acknowledgement. + + + Removing unnecessary and potentially problematic flags from CONTINUATION. + + + Added denial of service considerations. + +
    +
    + + Marking the draft ready for implementation. + + + Renumbering END_PUSH_PROMISE flag. + + + Editorial clarifications and changes. + +
    + +
    + + Added CONTINUATION frame for HEADERS and PUSH_PROMISE. + + + PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is + zero. + + + Push expanded to allow all safe methods without a request body. + + + Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 + hop-by-hop header fields. + + + Requiring that intermediaries not forward requests with missing or illegal routing + :-headers. + + + Clarified requirements around handling different frames after stream close, stream reset + and GOAWAY. + + + Added more specific prohibitions for sending of different frame types in various stream + states. + + + Making the last received setting value the effective value. + + + Clarified requirements on TLS version, extension and ciphers. + +
    + +
    + + Committed major restructuring atrocities. + + + Added reference to first header compression draft. + + + Added more formal description of frame lifecycle. + + + Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. + + + Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. + + + Added PRIORITY frame. + +
    + +
    + + Added continuations to frames carrying header blocks. + + + Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful + concepts, like cookies. + + + Removed "message". + + + Switched to TLS ALPN from NPN. + + + Editorial changes. + +
    + +
    + + Added IANA considerations section for frame types, error codes and settings. + + + Removed data frame compression. + + + Added PUSH_PROMISE. + + + Added globally applicable flags to framing. + + + Removed zlib-based header compression mechanism. + + + Updated references. + + + Clarified stream identifier reuse. + + + Removed CREDENTIALS frame and associated mechanisms. + + + Added advice against naive implementation of flow control. + + + Added session header section. + + + Restructured frame header. Removed distinction between data and control frames. + + + Altered flow control properties to include session-level limits. + + + Added note on cacheability of pushed resources and multiple tenant servers. + + + Changed protocol label form based on discussions. + +
    + +
    + + Changed title throughout. + + + Removed section on Incompatibilities with SPDY draft#2. + + + Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . + + + Replaced abstract and introduction. + + + Added section on starting HTTP/2.0, including upgrade mechanism. + + + Removed unused references. + + + Added flow control principles based on . + +
    + +
    + + Adopted as base for draft-ietf-httpbis-http2. + + + Updated authors/editors list. + + + Added status note. + +
    +
    + +
    +
    + diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 0000000..e6b321f --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2303 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + "golang.org/x/net/lex/httplex" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *clientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + traceGotConn(req, cc) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-reqContext(req).Done(): + return nil, reqContext(req).Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + if !afterBodyWrite { + return req, nil + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *cloneTLSConfig(t.TLSClientConfig) + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && + int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { + if err := checkConnHeaders(req); err != nil { + return nil, false, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, false, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, false, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, false, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, false, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, false, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.getStartedWrite(), cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, cs.getStartedWrite(), err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + cc.writeHeader(strings.ToLower(name), value) + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + cs.bufPipe.closeWithErrorAndCode(err, code) + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go new file mode 100644 index 0000000..fe04bd2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport_test.go @@ -0,0 +1,3847 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var ( + extNet = flag.Bool("extnet", false, "do external network tests") + transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport") + insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove? +) + +var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true} + +type testContext struct{} + +func (testContext) Done() <-chan struct{} { return make(chan struct{}) } +func (testContext) Err() error { panic("should not be called") } +func (testContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } +func (testContext) Value(key interface{}) interface{} { return nil } + +func TestTransportExternal(t *testing.T) { + if !*extNet { + t.Skip("skipping external network test") + } + req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil) + rt := &Transport{TLSClientConfig: tlsConfigInsecure} + res, err := rt.RoundTrip(req) + if err != nil { + t.Fatalf("%v", err) + } + res.Write(os.Stdout) +} + +type fakeTLSConn struct { + net.Conn +} + +func (c *fakeTLSConn) ConnectionState() tls.ConnectionState { + return tls.ConnectionState{ + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } +} + +func startH2cServer(t *testing.T) net.Listener { + h2Server := &Server{} + l := newLocalListener(t) + go func() { + conn, err := l.Accept() + if err != nil { + t.Error(err) + return + } + h2Server.ServeConn(&fakeTLSConn{conn}, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %v, http: %v", r.URL.Path, r.TLS == nil) + })}) + }() + return l +} + +func TestTransportH2c(t *testing.T) { + l := startH2cServer(t) + defer l.Close() + req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil) + if err != nil { + t.Fatal(err) + } + tr := &Transport{ + AllowHTTP: true, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + return net.Dial(network, addr) + }, + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.ProtoMajor != 2 { + t.Fatal("proto not h2c") + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(body), "Hello, /foobar, http: true"; got != want { + t.Fatalf("response got %v, want %v", got, want) + } +} + +func TestTransport(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, body) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + t.Logf("Got res: %+v", res) + if g, w := res.StatusCode, 200; g != w { + t.Errorf("StatusCode = %v; want %v", g, w) + } + if g, w := res.Status, "200 OK"; g != w { + t.Errorf("Status = %q; want %q", g, w) + } + wantHeader := http.Header{ + "Content-Length": []string{"3"}, + "Content-Type": []string{"text/plain; charset=utf-8"}, + "Date": []string{"XXX"}, // see cleanDate + } + cleanDate(res) + if !reflect.DeepEqual(res.Header, wantHeader) { + t.Errorf("res Header = %v; want %v", res.Header, wantHeader) + } + if res.Request != req { + t.Errorf("Response.Request = %p; want %p", res.Request, req) + } + if res.TLS == nil { + t.Error("Response.TLS = nil; want non-nil") + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } else if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func onSameConn(t *testing.T, modReq func(*http.Request)) bool { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer, func(c net.Conn, st http.ConnState) { + t.Logf("conn %v is now state %v", c.RemoteAddr(), st) + }) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + get := func() string { + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + modReq(req) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Fatalf("didn't get an addr in response") + } + return addr + } + first := get() + second := get() + return first == second +} + +func TestTransportReusesConns(t *testing.T) { + if !onSameConn(t, func(*http.Request) {}) { + t.Errorf("first and second responses were on different connections") + } +} + +func TestTransportReusesConn_RequestClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Close = true }) { + t.Errorf("first and second responses were not on different connections") + } +} + +func TestTransportReusesConn_ConnClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) { + t.Errorf("first and second responses were not on different connections") + } +} + +// Tests that the Transport only keeps one pending dial open per destination address. +// https://golang.org/issue/13397 +func TestTransportGroupsPendingDials(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer) + defer st.Close() + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + defer tr.CloseIdleConnections() + var ( + mu sync.Mutex + dials = map[string]int{} + ) + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Error(err) + return + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Error(err) + return + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Errorf("didn't get an addr in response") + } + mu.Lock() + dials[addr]++ + mu.Unlock() + }() + } + wg.Wait() + if len(dials) != 1 { + t.Errorf("saw %d dials; want 1: %v", len(dials), dials) + } + tr.CloseIdleConnections() + if err := retry(50, 10*time.Millisecond, func() error { + cp, ok := tr.connPool().(*clientConnPool) + if !ok { + return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool()) + } + cp.mu.Lock() + defer cp.mu.Unlock() + if len(cp.dialing) != 0 { + return fmt.Errorf("dialing map = %v; want empty", cp.dialing) + } + if len(cp.conns) != 0 { + return fmt.Errorf("conns = %v; want empty", cp.conns) + } + if len(cp.keys) != 0 { + return fmt.Errorf("keys = %v; want empty", cp.keys) + } + return nil + }); err != nil { + t.Errorf("State of pool after CloseIdleConnections: %v", err) + } +} + +func retry(tries int, delay time.Duration, fn func() error) error { + var err error + for i := 0; i < tries; i++ { + err = fn() + if err == nil { + return nil + } + time.Sleep(delay) + } + return err +} + +func TestTransportAbortClosesPipes(t *testing.T) { + shutdown := make(chan struct{}) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + <-shutdown + }, + optOnlyServer, + ) + defer st.Close() + defer close(shutdown) // we must shutdown before st.Close() to avoid hanging + + done := make(chan struct{}) + requestMade := make(chan struct{}) + go func() { + defer close(done) + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + close(requestMade) + _, err = ioutil.ReadAll(res.Body) + if err == nil { + t.Error("expected error from res.Body.Read") + } + }() + + <-requestMade + // Now force the serve loop to end, via closing the connection. + st.closeConn() + // deadlock? that's a bug. + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("timeout") + } +} + +// TODO: merge this with TestTransportBody to make TestTransportRequest? This +// could be a table-driven test with extra goodies. +func TestTransportPath(t *testing.T) { + gotc := make(chan *url.URL, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + gotc <- r.URL + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + const ( + path = "/testpath" + query = "q=1" + ) + surl := st.ts.URL + path + "?" + query + req, err := http.NewRequest("POST", surl, nil) + if err != nil { + t.Fatal(err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + got := <-gotc + if got.Path != path { + t.Errorf("Read Path = %q; want %q", got.Path, path) + } + if got.RawQuery != query { + t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query) + } +} + +func randString(n int) string { + rnd := rand.New(rand.NewSource(int64(n))) + b := make([]byte, n) + for i := range b { + b[i] = byte(rnd.Intn(256)) + } + return string(b) +} + +type panicReader struct{} + +func (panicReader) Read([]byte) (int, error) { panic("unexpected Read") } +func (panicReader) Close() error { panic("unexpected Close") } + +func TestActualContentLength(t *testing.T) { + tests := []struct { + req *http.Request + want int64 + }{ + // Verify we don't read from Body: + 0: { + req: &http.Request{Body: panicReader{}}, + want: -1, + }, + // nil Body means 0, regardless of ContentLength: + 1: { + req: &http.Request{Body: nil, ContentLength: 5}, + want: 0, + }, + // ContentLength is used if set. + 2: { + req: &http.Request{Body: panicReader{}, ContentLength: 5}, + want: 5, + }, + // http.NoBody means 0, not -1. + 3: { + req: &http.Request{Body: go18httpNoBody()}, + want: 0, + }, + } + for i, tt := range tests { + got := actualContentLength(tt.req) + if got != tt.want { + t.Errorf("test[%d]: got %d; want %d", i, got, tt.want) + } + } +} + +func TestTransportBody(t *testing.T) { + bodyTests := []struct { + body string + noContentLen bool + }{ + {body: "some message"}, + {body: "some message", noContentLen: true}, + {body: strings.Repeat("a", 1<<20), noContentLen: true}, + {body: strings.Repeat("a", 1<<20)}, + {body: randString(16<<10 - 1)}, + {body: randString(16 << 10)}, + {body: randString(16<<10 + 1)}, + {body: randString(512<<10 - 1)}, + {body: randString(512 << 10)}, + {body: randString(512<<10 + 1)}, + {body: randString(1<<20 - 1)}, + {body: randString(1 << 20)}, + {body: randString(1<<20 + 2)}, + } + + type reqInfo struct { + req *http.Request + slurp []byte + err error + } + gotc := make(chan reqInfo, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + slurp, err := ioutil.ReadAll(r.Body) + if err != nil { + gotc <- reqInfo{err: err} + } else { + gotc <- reqInfo{req: r, slurp: slurp} + } + }, + optOnlyServer, + ) + defer st.Close() + + for i, tt := range bodyTests { + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + var body io.Reader = strings.NewReader(tt.body) + if tt.noContentLen { + body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods + } + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + defer res.Body.Close() + ri := <-gotc + if ri.err != nil { + t.Errorf("#%d: read error: %v", i, ri.err) + continue + } + if got := string(ri.slurp); got != tt.body { + t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body)) + } + wantLen := int64(len(tt.body)) + if tt.noContentLen && tt.body != "" { + wantLen = -1 + } + if ri.req.ContentLength != wantLen { + t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen) + } + } +} + +func shortString(v string) string { + const maxLen = 100 + if len(v) <= maxLen { + return v + } + return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:]) +} + +func TestTransportDialTLS(t *testing.T) { + var mu sync.Mutex // guards following + var gotReq, didDial bool + + ts := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + gotReq = true + mu.Unlock() + }, + optOnlyServer, + ) + defer ts.Close() + tr := &Transport{ + DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { + mu.Lock() + didDial = true + mu.Unlock() + cfg.InsecureSkipVerify = true + c, err := tls.Dial(netw, addr, cfg) + if err != nil { + return nil, err + } + return c, c.Handshake() + }, + } + defer tr.CloseIdleConnections() + client := &http.Client{Transport: tr} + res, err := client.Get(ts.ts.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + mu.Lock() + if !gotReq { + t.Error("didn't get request") + } + if !didDial { + t.Error("didn't use dial hook") + } +} + +func TestConfigureTransport(t *testing.T) { + t1 := &http.Transport{} + err := ConfigureTransport(t1) + if err == errTransportVersion { + t.Skip(err) + } + if err != nil { + t.Fatal(err) + } + if got := fmt.Sprintf("%#v", t1); !strings.Contains(got, `"h2"`) { + // Laziness, to avoid buildtags. + t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got) + } + wantNextProtos := []string{"h2", "http/1.1"} + if t1.TLSClientConfig == nil { + t.Errorf("nil t1.TLSClientConfig") + } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) { + t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos) + } + if err := ConfigureTransport(t1); err == nil { + t.Error("unexpected success on second call to ConfigureTransport") + } + + // And does it work? + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.Proto) + }, optOnlyServer) + defer st.Close() + + t1.TLSClientConfig.InsecureSkipVerify = true + c := &http.Client{Transport: t1} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(slurp), "HTTP/2.0"; got != want { + t.Errorf("body = %q; want %q", got, want) + } +} + +type capitalizeReader struct { + r io.Reader +} + +func (cr capitalizeReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + for i, b := range p[:n] { + if b >= 'a' && b <= 'z' { + p[i] = b - ('a' - 'A') + } + } + return +} + +type flushWriter struct { + w io.Writer +} + +func (fw flushWriter) Write(p []byte) (n int, err error) { + n, err = fw.w.Write(p) + if f, ok := fw.w.(http.Flusher); ok { + f.Flush() + } + return +} + +type clientTester struct { + t *testing.T + tr *Transport + sc, cc net.Conn // server and client conn + fr *Framer // server's framer + client func() error + server func() error +} + +func newClientTester(t *testing.T) *clientTester { + var dialOnce struct { + sync.Mutex + dialed bool + } + ct := &clientTester{ + t: t, + } + ct.tr = &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialOnce.Lock() + defer dialOnce.Unlock() + if dialOnce.dialed { + return nil, errors.New("only one dial allowed in test mode") + } + dialOnce.dialed = true + return ct.cc, nil + }, + } + + ln := newLocalListener(t) + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + + } + sc, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + ln.Close() + ct.cc = cc + ct.sc = sc + ct.fr = NewFramer(sc, sc) + return ct +} + +func newLocalListener(t *testing.T) net.Listener { + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err == nil { + return ln + } + ln, err = net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + return ln +} + +func (ct *clientTester) greet(settings ...Setting) { + buf := make([]byte, len(ClientPreface)) + _, err := io.ReadFull(ct.sc, buf) + if err != nil { + ct.t.Fatalf("reading client preface: %v", err) + } + f, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Fatalf("Reading client settings frame: %v", err) + } + if sf, ok := f.(*SettingsFrame); !ok { + ct.t.Fatalf("Wanted client settings frame; got %v", f) + _ = sf // stash it away? + } + if err := ct.fr.WriteSettings(settings...); err != nil { + ct.t.Fatal(err) + } + if err := ct.fr.WriteSettingsAck(); err != nil { + ct.t.Fatal(err) + } +} + +func (ct *clientTester) readNonSettingsFrame() (Frame, error) { + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return nil, err + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + return f, nil + } +} + +func (ct *clientTester) cleanup() { + ct.tr.CloseIdleConnections() +} + +func (ct *clientTester) run() { + errc := make(chan error, 2) + ct.start("client", errc, ct.client) + ct.start("server", errc, ct.server) + defer ct.cleanup() + for i := 0; i < 2; i++ { + if err := <-errc; err != nil { + ct.t.Error(err) + return + } + } +} + +func (ct *clientTester) start(which string, errc chan<- error, fn func() error) { + go func() { + finished := false + var err error + defer func() { + if !finished { + err = fmt.Errorf("%s goroutine didn't finish.", which) + } else if err != nil { + err = fmt.Errorf("%s: %v", which, err) + } + errc <- err + }() + err = fn() + finished = true + }() +} + +func (ct *clientTester) readFrame() (Frame, error) { + return readFrameTimeout(ct.fr, 2*time.Second) +} + +func (ct *clientTester) firstHeaders() (*HeadersFrame, error) { + for { + f, err := ct.readFrame() + if err != nil { + return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + hf, ok := f.(*HeadersFrame) + if !ok { + return nil, fmt.Errorf("Got %T; want HeadersFrame", f) + } + return hf, nil + } +} + +type countingReader struct { + n *int64 +} + +func (r countingReader) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(i) + } + atomic.AddInt64(r.n, int64(len(p))) + return len(p), err +} + +func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) } +func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) } + +func testTransportReqBodyAfterResponse(t *testing.T, status int) { + const bodySize = 10 << 20 + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + var n int64 // atomic + req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize)) + if err != nil { + return err + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != status { + return fmt.Errorf("status code = %v; want %v", res.StatusCode, status) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("unexpected body: %q", slurp) + } + if status == 200 { + if got := atomic.LoadInt64(&n); got != bodySize { + return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize) + } + } else { + if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize { + return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + var dataRecv int64 + var closed bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + //println(fmt.Sprintf("server got frame: %v", f)) + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + if f.StreamEnded() { + return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) + } + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if dataRecv == 0 { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + dataRecv += int64(dataLen) + + if !closed && ((status != 200 && dataRecv > 0) || + (status == 200 && dataRecv == bodySize)) { + closed = true + if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil { + return err + } + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +// See golang.org/issue/13444 +func TestTransportFullDuplex(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) // redundant but for clarity + w.(http.Flusher).Flush() + io.Copy(flushWriter{w}, capitalizeReader{r.Body}) + fmt.Fprintf(w, "bye.\n") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + pr, pw := io.Pipe() + req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr)) + if err != nil { + t.Fatal(err) + } + req.ContentLength = -1 + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200) + } + bs := bufio.NewScanner(res.Body) + want := func(v string) { + if !bs.Scan() { + t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err()) + } + } + write := func(v string) { + _, err := io.WriteString(pw, v) + if err != nil { + t.Fatalf("pipe write: %v", err) + } + } + write("foo\n") + want("FOO") + write("bar\n") + want("BAR") + pw.Close() + want("bye.") + if err := bs.Err(); err != nil { + t.Fatal(err) + } +} + +func TestTransportConnectRequest(t *testing.T) { + gotc := make(chan *http.Request, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + gotc <- r + }, optOnlyServer) + defer st.Close() + + u, err := url.Parse(st.ts.URL) + if err != nil { + t.Fatal(err) + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + tests := []struct { + req *http.Request + want string + }{ + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + }, + want: u.Host, + }, + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + Host: "example.com:123", + }, + want: "example.com:123", + }, + } + + for i, tt := range tests { + res, err := c.Do(tt.req) + if err != nil { + t.Errorf("%d. RoundTrip = %v", i, err) + continue + } + res.Body.Close() + req := <-gotc + if req.Method != "CONNECT" { + t.Errorf("method = %q; want CONNECT", req.Method) + } + if req.Host != tt.want { + t.Errorf("Host = %q; want %q", req.Host, tt.want) + } + if req.URL.Host != tt.want { + t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want) + } + } +} + +type headerType int + +const ( + noHeader headerType = iota // omitted + oneHeader + splitHeader // broken into continuation on purpose +) + +const ( + f0 = noHeader + f1 = oneHeader + f2 = splitHeader + d0 = false + d1 = true +) + +// Test all 36 combinations of response frame orders: +// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) } +// Generated by http://play.golang.org/p/SScqYKJYXd +func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) } +func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) } +func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) } +func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) } +func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) } +func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) } +func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) } +func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) } +func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) } +func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) } +func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) } +func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) } +func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) } +func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) } +func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) } +func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) } +func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) } +func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) } +func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) } +func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) } +func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) } +func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) } +func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) } +func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) } +func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) } +func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) } +func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) } +func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) } +func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) } +func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) } +func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) } +func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) } +func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) } +func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) } +func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) } +func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) } + +func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) { + const reqBody = "some request body" + const resBody = "some response body" + + if resHeader == noHeader { + // TODO: test 100-continue followed by immediate + // server stream reset, without headers in the middle? + panic("invalid combination") + } + + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody)) + if expect100Continue != noHeader { + req.Header.Set("Expect", "100-continue") + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + wantBody := resBody + if !withData { + wantBody = "" + } + if string(slurp) != wantBody { + return fmt.Errorf("body = %q; want %q", slurp, wantBody) + } + if trailers == noHeader { + if len(res.Trailer) > 0 { + t.Errorf("Trailer = %v; want none", res.Trailer) + } + } else { + want := http.Header{"Some-Trailer": {"some-value"}} + if !reflect.DeepEqual(res.Trailer, want) { + t.Errorf("Trailer = %v; want %v", res.Trailer, want) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + endStream := false + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *DataFrame: + if !f.StreamEnded() { + // No need to send flow control tokens. The test request body is tiny. + continue + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"}) + enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"}) + if trailers != noHeader { + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"}) + } + endStream = withData == false && trailers == noHeader + send(resHeader) + } + if withData { + endStream = trailers == noHeader + ct.fr.WriteData(f.StreamID, endStream, []byte(resBody)) + } + if trailers != noHeader { + endStream = true + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) + send(trailers) + } + if endStream { + return nil + } + case *HeadersFrame: + if expect100Continue != noHeader { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + send(expect100Continue) + } + } + } + } + ct.run() +} + +func TestTransportReceiveUndeclaredTrailer(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + if _, ok := res.Trailer["Some-Trailer"]; !ok { + return fmt.Errorf("expected Some-Trailer") + } + return nil + } + ct.server = func() error { + ct.greet() + + var n int + var hf *HeadersFrame + for hf == nil && n < 10 { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + hf, _ = f.(*HeadersFrame) + n++ + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + // send headers without Trailer header + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // send trailers + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + ct.run() +} + +func TestTransportInvalidTrailer_Pseudo1(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, oneHeader) +} +func TestTransportInvalidTrailer_Pseudo2(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, splitHeader) +} +func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + }) +} + +func TestTransportInvalidTrailer_Capital1(t *testing.T) { + testTransportInvalidTrailer_Capital(t, oneHeader) +} +func TestTransportInvalidTrailer_Capital2(t *testing.T) { + testTransportInvalidTrailer_Capital(t, splitHeader) +} +func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) + }) +} + +func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + se, ok := err.(StreamError) + if !ok || se.Cause != wantErr { + return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + var endStream bool + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"}) + endStream = false + send(oneHeader) + } + // Trailers: + { + endStream = true + buf.Reset() + writeTrailer(enc) + send(trailers) + } + return nil + } + } + } + ct.run() +} + +// headerListSize returns the HTTP2 header list size of h. +// http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE +// http://httpwg.org/specs/rfc7540.html#MaxHeaderBlock +func headerListSize(h http.Header) (size uint32) { + for k, vv := range h { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + size += hf.Size() + } + } + return size +} + +// padHeaders adds data to an http.Header until headerListSize(h) == +// limit. Due to the way header list sizes are calculated, padHeaders +// cannot add fewer than len("Pad-Headers") + 32 bytes to h, and will +// call t.Fatal if asked to do so. PadHeaders first reserves enough +// space for an empty "Pad-Headers" key, then adds as many copies of +// filler as possible. Any remaining bytes necessary to push the +// header list size up to limit are added to h["Pad-Headers"]. +func padHeaders(t *testing.T, h http.Header, limit uint64, filler string) { + if limit > 0xffffffff { + t.Fatalf("padHeaders: refusing to pad to more than 2^32-1 bytes. limit = %v", limit) + } + hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} + minPadding := uint64(hf.Size()) + size := uint64(headerListSize(h)) + + minlimit := size + minPadding + if limit < minlimit { + t.Fatalf("padHeaders: limit %v < %v", limit, minlimit) + } + + // Use a fixed-width format for name so that fieldSize + // remains constant. + nameFmt := "Pad-Headers-%06d" + hf = hpack.HeaderField{Name: fmt.Sprintf(nameFmt, 1), Value: filler} + fieldSize := uint64(hf.Size()) + + // Add as many complete filler values as possible, leaving + // room for at least one empty "Pad-Headers" key. + limit = limit - minPadding + for i := 0; size+fieldSize < limit; i++ { + name := fmt.Sprintf(nameFmt, i) + h.Add(name, filler) + size += fieldSize + } + + // Add enough bytes to reach limit. + remain := limit - size + lastValue := strings.Repeat("*", int(remain)) + h.Add("Pad-Headers", lastValue) +} + +func TestPadHeaders(t *testing.T) { + check := func(h http.Header, limit uint32, fillerLen int) { + if h == nil { + h = make(http.Header) + } + filler := strings.Repeat("f", fillerLen) + padHeaders(t, h, uint64(limit), filler) + gotSize := headerListSize(h) + if gotSize != limit { + t.Errorf("Got size = %v; want %v", gotSize, limit) + } + } + // Try all possible combinations for small fillerLen and limit. + hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} + minLimit := hf.Size() + for limit := minLimit; limit <= 128; limit++ { + for fillerLen := 0; uint32(fillerLen) <= limit; fillerLen++ { + check(nil, limit, fillerLen) + } + } + + // Try a few tests with larger limits, plus cumulative + // tests. Since these tests are cumulative, tests[i+1].limit + // must be >= tests[i].limit + minLimit. See the comment on + // padHeaders for more info on why the limit arg has this + // restriction. + tests := []struct { + fillerLen int + limit uint32 + }{ + { + fillerLen: 64, + limit: 1024, + }, + { + fillerLen: 1024, + limit: 1286, + }, + { + fillerLen: 256, + limit: 2048, + }, + { + fillerLen: 1024, + limit: 10 * 1024, + }, + { + fillerLen: 1023, + limit: 11 * 1024, + }, + } + h := make(http.Header) + for _, tc := range tests { + check(nil, tc.limit, tc.fillerLen) + check(h, tc.limit, tc.fillerLen) + } +} + +func TestTransportChecksRequestHeaderListSize(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + // Consume body & force client to send + // trailers before writing response. + // ioutil.ReadAll returns non-nil err for + // requests that attempt to send greater than + // maxHeaderListSize bytes of trailers, since + // those requests generate a stream reset. + ioutil.ReadAll(r.Body) + r.Body.Close() + }, + func(ts *httptest.Server) { + ts.Config.MaxHeaderBytes = 16 << 10 + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + checkRoundTrip := func(req *http.Request, wantErr error, desc string) { + res, err := tr.RoundTrip(req) + if err != wantErr { + if res != nil { + res.Body.Close() + } + t.Errorf("%v: RoundTrip err = %v; want %v", desc, err, wantErr) + return + } + if err == nil { + if res == nil { + t.Errorf("%v: response nil; want non-nil.", desc) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + t.Errorf("%v: response status = %v; want %v", desc, res.StatusCode, http.StatusOK) + } + return + } + if res != nil { + t.Errorf("%v: RoundTrip err = %v but response non-nil", desc, err) + } + } + headerListSizeForRequest := func(req *http.Request) (size uint64) { + contentLen := actualContentLength(req) + trailers, err := commaSeparatedTrailers(req) + if err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(req, true, trailers, contentLen) + cc.mu.Unlock() + if err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(hf hpack.HeaderField) { + size += uint64(hf.Size()) + }) + if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + } + return size + } + // Create a new Request for each test, rather than reusing the + // same Request, to avoid a race when modifying req.Headers. + // See https://github.com/golang/go/issues/21316 + newRequest := func() *http.Request { + // Body must be non-nil to enable writing trailers. + body := strings.NewReader("hello") + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("newRequest: NewRequest: %v", err) + } + return req + } + + // Make an arbitrary request to ensure we get the server's + // settings frame and initialize peerMaxHeaderListSize. + req := newRequest() + checkRoundTrip(req, nil, "Initial request") + + // Get the ClientConn associated with the request and validate + // peerMaxHeaderListSize. + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + cc, err := tr.connPool().GetClientConn(req, addr) + if err != nil { + t.Fatalf("GetClientConn: %v", err) + } + cc.mu.Lock() + peerSize := cc.peerMaxHeaderListSize + cc.mu.Unlock() + st.scMu.Lock() + wantSize := uint64(st.sc.maxHeaderListSize()) + st.scMu.Unlock() + if peerSize != wantSize { + t.Errorf("peerMaxHeaderListSize = %v; want %v", peerSize, wantSize) + } + + // Sanity check peerSize. (*serverConn) maxHeaderListSize adds + // 320 bytes of padding. + wantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320 + if peerSize != wantHeaderBytes { + t.Errorf("peerMaxHeaderListSize = %v; want %v.", peerSize, wantHeaderBytes) + } + + // Pad headers & trailers, but stay under peerSize. + req = newRequest() + req.Header = make(http.Header) + req.Trailer = make(http.Header) + filler := strings.Repeat("*", 1024) + padHeaders(t, req.Trailer, peerSize, filler) + // cc.encodeHeaders adds some default headers to the request, + // so we need to leave room for those. + defaultBytes := headerListSizeForRequest(req) + padHeaders(t, req.Header, peerSize-defaultBytes, filler) + checkRoundTrip(req, nil, "Headers & Trailers under limit") + + // Add enough header bytes to push us over peerSize. + req = newRequest() + req.Header = make(http.Header) + padHeaders(t, req.Header, peerSize, filler) + checkRoundTrip(req, errRequestHeaderListSize, "Headers over limit") + + // Push trailers over the limit. + req = newRequest() + req.Trailer = make(http.Header) + padHeaders(t, req.Trailer, peerSize+1, filler) + checkRoundTrip(req, errRequestHeaderListSize, "Trailers over limit") + + // Send headers with a single large value. + req = newRequest() + filler = strings.Repeat("*", int(peerSize)) + req.Header = make(http.Header) + req.Header.Set("Big", filler) + checkRoundTrip(req, errRequestHeaderListSize, "Single large header") + + // Send trailers with a single large value. + req = newRequest() + req.Trailer = make(http.Header) + req.Trailer.Set("Big", filler) + checkRoundTrip(req, errRequestHeaderListSize, "Single large trailer") +} + +func TestTransportChecksResponseHeaderListSize(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != errResponseHeaderListSize { + if res != nil { + res.Body.Close() + } + size := int64(0) + for k, vv := range res.Header { + for _, v := range vv { + size += int64(len(k)) + int64(len(v)) + 32 + } + } + return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + large := strings.Repeat("a", 1<<10) + for i := 0; i < 5042; i++ { + enc.WriteField(hpack.HeaderField{Name: large, Value: large}) + } + if size, want := buf.Len(), 6329; size != want { + // Note: this number might change if + // our hpack implementation + // changes. That's fine. This is + // just a sanity check that our + // response can fit in a single + // header block fragment frame. + return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want) + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + } + } + ct.run() +} + +// Test that the Transport returns a typed error from Response.Body.Read calls +// when the server sends an error. (here we use a panic, since that should generate +// a stream error, but others like cancel should be similar) +func TestTransportBodyReadErrorType(t *testing.T) { + doPanic := make(chan bool, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() // force headers out + <-doPanic + panic("boom") + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + doPanic <- true + buf := make([]byte, 100) + n, err := res.Body.Read(buf) + want := StreamError{StreamID: 0x1, Code: 0x2} + if !reflect.DeepEqual(want, err) { + t.Errorf("Read = %v, %#v; want error %#v", n, err, want) + } +} + +// golang.org/issue/13924 +// This used to fail after many iterations, especially with -race: +// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race +func TestTransportDoubleCloseOnWriteError(t *testing.T) { + var ( + mu sync.Mutex + conn net.Conn // to close if set + ) + + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + if conn != nil { + conn.Close() + } + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + mu.Lock() + defer mu.Unlock() + conn = tc + return tc, nil + }, + } + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + c.Get(st.ts.URL) +} + +// Test that the http1 Transport.DisableKeepAlives option is respected +// and connections are closed as soon as idle. +// See golang.org/issue/14008 +func TestTransportDisableKeepAlives(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + connClosed := make(chan struct{}) // closed on tls.Conn.Close + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + return ¬eCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil + }, + } + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + select { + case <-connClosed: + case <-time.After(1 * time.Second): + t.Errorf("timeout") + } + +} + +// Test concurrent requests with Transport.DisableKeepAlives. We can share connections, +// but when things are totally idle, it still needs to close. +func TestTransportDisableKeepAlives_Concurrency(t *testing.T) { + const D = 25 * time.Millisecond + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + time.Sleep(D) + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + var dials int32 + var conns sync.WaitGroup + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + atomic.AddInt32(&dials, 1) + conns.Add(1) + return ¬eCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil + }, + } + c := &http.Client{Transport: tr} + var reqs sync.WaitGroup + const N = 20 + for i := 0; i < N; i++ { + reqs.Add(1) + if i == N-1 { + // For the final request, try to make all the + // others close. This isn't verified in the + // count, other than the Log statement, since + // it's so timing dependent. This test is + // really to make sure we don't interrupt a + // valid request. + time.Sleep(D * 2) + } + go func() { + defer reqs.Done() + res, err := c.Get(st.ts.URL) + if err != nil { + t.Error(err) + return + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Error(err) + return + } + res.Body.Close() + }() + } + reqs.Wait() + conns.Wait() + t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N) +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} + +func isTimeout(err error) bool { + switch err := err.(type) { + case nil: + return false + case *url.Error: + return isTimeout(err.Err) + case net.Error: + return err.Timeout() + } + return false +} + +// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent. +func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) { + testTransportResponseHeaderTimeout(t, false) +} +func TestTransportResponseHeaderTimeout_Body(t *testing.T) { + testTransportResponseHeaderTimeout(t, true) +} + +func testTransportResponseHeaderTimeout(t *testing.T, body bool) { + ct := newClientTester(t) + ct.tr.t1 = &http.Transport{ + ResponseHeaderTimeout: 5 * time.Millisecond, + } + ct.client = func() error { + c := &http.Client{Transport: ct.tr} + var err error + var n int64 + const bodySize = 4 << 20 + if body { + _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize)) + } else { + _, err = c.Get("https://dummy.tld/") + } + if !isTimeout(err) { + t.Errorf("client expected timeout error; got %#v", err) + } + if body && n != bodySize { + t.Errorf("only read %d bytes of body; want %d", n, bodySize) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + switch f := f.(type) { + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + case *RSTStreamFrame: + if f.StreamID == 1 && f.ErrCode == ErrCodeCancel { + return nil + } + } + } + } + ct.run() +} + +func TestTransportDisableCompression(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + want := http.Header{ + "User-Agent": []string{"Go-http-client/2.0"}, + } + if !reflect.DeepEqual(r.Header, want) { + t.Errorf("request headers = %v; want %v", r.Header, want) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + t1: &http.Transport{ + DisableCompression: true, + }, + } + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() +} + +// RFC 7540 section 8.1.2.2 +func TestTransportRejectsConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + tests := []struct { + key string + value []string + want string + }{ + { + key: "Upgrade", + value: []string{"anything"}, + want: "ERROR: http2: invalid Upgrade request header: [\"anything\"]", + }, + { + key: "Connection", + value: []string{"foo"}, + want: "ERROR: http2: invalid Connection request header: [\"foo\"]", + }, + { + key: "Connection", + value: []string{"close"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Connection", + value: []string{"close", "something-else"}, + want: "ERROR: http2: invalid Connection request header: [\"close\" \"something-else\"]", + }, + { + key: "Connection", + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Proxy-Connection", // just deleted and ignored + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{""}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"foo"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"foo\"]", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked", "other"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"chunked\" \"other\"]", + }, + { + key: "Content-Length", + value: []string{"123"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Keep-Alive", + value: []string{"doop"}, + want: "Accept-Encoding,User-Agent", + }, + } + + for _, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header[tt.key] = tt.value + res, err := tr.RoundTrip(req) + var got string + if err != nil { + got = fmt.Sprintf("ERROR: %v", err) + } else { + got = res.Header.Get("Got-Header") + res.Body.Close() + } + if got != tt.want { + t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want) + } + } +} + +// golang.org/issue/14048 +func TestTransportFailsOnInvalidHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tests := [...]struct { + h http.Header + wantErr string + }{ + 0: { + h: http.Header{"with space": {"foo"}}, + wantErr: `invalid HTTP header name "with space"`, + }, + 1: { + h: http.Header{"name": {"БрÑд"}}, + wantErr: "", // okay + }, + 2: { + h: http.Header{"имÑ": {"Brad"}}, + wantErr: `invalid HTTP header name "имÑ"`, + }, + 3: { + h: http.Header{"foo": {"foo\x01bar"}}, + wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`, + }, + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header = tt.h + res, err := tr.RoundTrip(req) + var bad bool + if tt.wantErr == "" { + if err != nil { + bad = true + t.Errorf("case %d: error = %v; want no error", i, err) + } + } else { + if !strings.Contains(fmt.Sprint(err), tt.wantErr) { + bad = true + t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr) + } + } + if err == nil { + if bad { + t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header")) + } + res.Body.Close() + } + } +} + +// Tests that gzipReader doesn't crash on a second Read call following +// the first Read call's gzip.NewReader returning an error. +func TestGzipReader_DoubleReadCrash(t *testing.T) { + gz := &gzipReader{ + body: ioutil.NopCloser(strings.NewReader("0123456789")), + } + var buf [1]byte + n, err1 := gz.Read(buf[:]) + if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") { + t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1) + } + n, err2 := gz.Read(buf[:]) + if n != 0 || err2 != err1 { + t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1) + } +} + +func TestTransportNewTLSConfig(t *testing.T) { + tests := [...]struct { + conf *tls.Config + host string + want *tls.Config + }{ + // Normal case. + 0: { + conf: nil, + host: "foo.com", + want: &tls.Config{ + ServerName: "foo.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // User-provided name (bar.com) takes precedence: + 1: { + conf: &tls.Config{ + ServerName: "bar.com", + }, + host: "foo.com", + want: &tls.Config{ + ServerName: "bar.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // NextProto is prepended: + 2: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar"}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{NextProtoTLS, "foo", "bar"}, + }, + }, + + // NextProto is not duplicated: + 3: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + }, + } + for i, tt := range tests { + // Ignore the session ticket keys part, which ends up populating + // unexported fields in the Config: + if tt.conf != nil { + tt.conf.SessionTicketsDisabled = true + } + + tr := &Transport{TLSClientConfig: tt.conf} + got := tr.newTLSConfig(tt.host) + + got.SessionTicketsDisabled = false + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. got %#v; want %#v", i, got, tt.want) + } + } +} + +// The Google GFE responds to HEAD requests with a HEADERS frame +// without END_STREAM, followed by a 0-length DATA frame with +// END_STREAM. Make sure we don't get confused by that. (We did.) +func TestTransportReadHeadResponse(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != 123 { + return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, // as the GFE does + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, nil) + + <-clientDone + return nil + } + } + ct.run() +} + +func TestTransportReadHeadResponseWithBody(t *testing.T) { + // This test use not valid response format. + // Discarding logger output to not spam tests output. + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + response := "redirecting to /elsewhere" + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != int64(len(response)) { + return fmt.Errorf("Content-Length = %d; want %d", res.ContentLength, len(response)) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: strconv.Itoa(len(response))}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, []byte(response)) + + <-clientDone + return nil + } + } + ct.run() +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (int, error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// golang.org/issue/15425: test that a handler closing the request +// body doesn't terminate the stream to the peer. (It just stops +// readability from the handler's side, and eventually the client +// runs out of flow control tokens) +func TestTransportHandlerBodyClose(t *testing.T) { + const bodySize = 10 << 20 + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + r.Body.Close() + io.Copy(w, io.LimitReader(neverEnding('A'), bodySize)) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + g0 := runtime.NumGoroutine() + + const numReq = 10 + for i := 0; i < numReq; i++ { + req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if n != bodySize || err != nil { + t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize) + } + } + tr.CloseIdleConnections() + + gd := runtime.NumGoroutine() - g0 + if gd > numReq/2 { + t.Errorf("appeared to leak goroutines") + } + +} + +// https://golang.org/issue/15930 +func TestTransportFlowControl(t *testing.T) { + const bufLen = 64 << 10 + var total int64 = 100 << 20 // 100MB + if testing.Short() { + total = 10 << 20 + } + + var wrote int64 // updated atomically + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, bufLen) + for wrote < total { + n, err := w.Write(b) + atomic.AddInt64(&wrote, int64(n)) + if err != nil { + t.Errorf("ResponseWriter.Write error: %v", err) + break + } + w.(http.Flusher).Flush() + } + }, optOnlyServer) + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal("NewRequest error:", err) + } + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal("RoundTrip error:", err) + } + defer resp.Body.Close() + + var read int64 + b := make([]byte, bufLen) + for { + n, err := resp.Body.Read(b) + if err == io.EOF { + break + } + if err != nil { + t.Fatal("Read error:", err) + } + read += int64(n) + + const max = transportDefaultStreamFlow + if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max { + t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read) + } + + // Let the server get ahead of the client. + time.Sleep(1 * time.Millisecond) + } +} + +// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make +// the Transport remember it and return it back to users (via +// RoundTrip or request body reads) if needed (e.g. if the server +// proceeds to close the TCP connection before the client gets its +// response) +func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) { + testTransportUsesGoAwayDebugError(t, false) +} + +func TestTransportUsesGoAwayDebugError_Body(t *testing.T) { + testTransportUsesGoAwayDebugError(t, true) +} + +func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const goAwayErrCode = ErrCodeHTTP11Required // arbitrary + const goAwayDebugData = "some debug data" + + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if failMidBody { + if err != nil { + return fmt.Errorf("unexpected client RoundTrip error: %v", err) + } + _, err = io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + } + want := GoAwayError{ + LastStreamID: 5, + ErrCode: goAwayErrCode, + DebugData: goAwayDebugData, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + if failMidBody { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + // Write two GOAWAY frames, to test that the Transport takes + // the interesting parts of both. + ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) + ct.fr.WriteGoAway(5, goAwayErrCode, nil) + ct.sc.(*net.TCPConn).CloseWrite() + <-clientDone + return nil + } + } + ct.run() +} + +func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) { + ct := newClientTester(t) + + clientClosed := make(chan struct{}) + serverWroteFirstByte := make(chan struct{}) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + <-serverWroteFirstByte + + if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 { + return fmt.Errorf("body read = %v, %v; want 1, nil", n, err) + } + res.Body.Close() // leaving 4999 bytes unread + close(clientClosed) + + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // Two cases: + // - Send one DATA frame with 5000 bytes. + // - Send two DATA frames with 1 and 4999 bytes each. + // + // In both cases, the client should consume one byte of data, + // refund that byte, then refund the following 4999 bytes. + // + // In the second case, the server waits for the client connection to + // close before seconding the second DATA frame. This tests the case + // where the client receives a DATA frame after it has reset the stream. + if oneDataFrame { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000)) + close(serverWroteFirstByte) + <-clientClosed + } else { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1)) + close(serverWroteFirstByte) + <-clientClosed + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999)) + } + + waitingFor := "RSTStreamFrame" + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for %s: %v", waitingFor, err) + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + switch waitingFor { + case "RSTStreamFrame": + if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel { + return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f)) + } + waitingFor = "WindowUpdateFrame" + case "WindowUpdateFrame": + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 { + return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f)) + } + return nil + } + } + } + ct.run() +} + +// See golang.org/issue/16481 +func TestTransportReturnsUnusedFlowControlSingleWrite(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, true) +} + +// See golang.org/issue/20469 +func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, false) +} + +// Issue 16612: adjust flow control on open streams when transport +// receives SETTINGS with INITIAL_WINDOW_SIZE from server. +func TestTransportAdjustsFlowControl(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const bodySize = 1 << 20 + + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + res.Body.Close() + return nil + } + ct.server = func() error { + _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface))) + if err != nil { + return fmt.Errorf("reading client preface: %v", err) + } + + var gotBytes int64 + var sentSettings bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + return nil + default: + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + } + switch f := f.(type) { + case *DataFrame: + gotBytes += int64(len(f.Data())) + // After we've got half the client's + // initial flow control window's worth + // of request body data, give it just + // enough flow control to finish. + if gotBytes >= initialWindowSize/2 && !sentSettings { + sentSettings = true + + ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize}) + ct.fr.WriteWindowUpdate(0, bodySize) + ct.fr.WriteSettingsAck() + } + + if f.StreamEnded() { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + } + } + } + ct.run() +} + +// See golang.org/issue/16556 +func TestTransportReturnsDataPaddingFlowControl(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool, 1) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + defer res.Body.Close() + <-unblockClient + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + pad := make([]byte, 5) + ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream + + f, err := ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for first WindowUpdateFrame: %v", err) + } + wantBack := uint32(len(pad)) + 1 // one byte for the length of the padding + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 { + return fmt.Errorf("Expected conn WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + + f, err = ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for second WindowUpdateFrame: %v", err) + } + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 { + return fmt.Errorf("Expected stream WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + unblockClient <- true + return nil + } + ct.run() +} + +// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a +// StreamError as a result of the response HEADERS +func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) { + ct := newClientTester(t) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err == nil { + res.Body.Close() + return errors.New("unexpected successful GET") + } + want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")} + if !reflect.DeepEqual(want, err) { + t.Errorf("RoundTrip error = %#v; want %#v", err, want) + } + return nil + } + ct.server = func() error { + ct.greet() + + hf, err := ct.firstHeaders() + if err != nil { + return err + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + for { + fr, err := ct.readFrame() + if err != nil { + return fmt.Errorf("error waiting for RST_STREAM from client: %v", err) + } + if _, ok := fr.(*SettingsFrame); ok { + continue + } + if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol { + t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr)) + } + break + } + + return nil + } + ct.run() +} + +// byteAndEOFReader returns is in an io.Reader which reads one byte +// (the underlying byte) and io.EOF at once in its Read call. +type byteAndEOFReader byte + +func (b byteAndEOFReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + panic("unexpected useless call") + } + p[0] = byte(b) + return 1, io.EOF +} + +// Issue 16788: the Transport had a regression where it started +// sending a spurious DATA frame with a duplicate END_STREAM bit after +// the request body writer goroutine had already read an EOF from the +// Request.Body and included the END_STREAM on a data-carrying DATA +// frame. +// +// Notably, to trigger this, the requests need to use a Request.Body +// which returns (non-0, io.EOF) and also needs to set the ContentLength +// explicitly. +func TestTransportBodyDoubleEndStream(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Nothing. + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i := 0; i < 2; i++ { + req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a')) + req.ContentLength = 1 + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatalf("failure on req %d: %v", i+1, err) + } + defer res.Body.Close() + } +} + +// golang.org/issue/16847, golang.org/issue/19103 +func TestTransportRequestPathPseudo(t *testing.T) { + type result struct { + path string + err string + } + tests := []struct { + req *http.Request + want result + }{ + 0: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "/foo", + }, + }, + want: result{path: "/foo"}, + }, + // In Go 1.7, we accepted paths of "//foo". + // In Go 1.8, we rejected it (issue 16847). + // In Go 1.9, we accepted it again (issue 19103). + 1: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "//foo", + }, + }, + want: result{path: "//foo"}, + }, + + // Opaque with //$Matching_Hostname/path + 2: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//foo.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque with some other Request.Host instead: + 3: { + req: &http.Request{ + Method: "GET", + Host: "bar.com", + URL: &url.URL{ + Scheme: "https", + Opaque: "//bar.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque without the leading "//": + 4: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Opaque: "/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque we can't handle: + 5: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//unknown_host/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{err: `invalid request :path "https://unknown_host/path" from URL.Opaque = "//unknown_host/path"`}, + }, + + // A CONNECT request: + 6: { + req: &http.Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "foo.com", + }, + }, + want: result{}, + }, + } + for i, tt := range tests { + cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(tt.req, false, "", -1) + cc.mu.Unlock() + var got result + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) { + if f.Name == ":path" { + got.path = f.Value + } + }) + if err != nil { + got.err = err.Error() + } else if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Errorf("%d. bogus hpack: %v", i, err) + continue + } + } + if got != tt.want { + t.Errorf("%d. got %+v; want %+v", i, got, tt.want) + } + + } + +} + +// golang.org/issue/17071 -- don't sniff the first byte of the request body +// before we've determined that the ClientConn is usable. +func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) { + const body = "foo" + req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body))) + cc := &ClientConn{ + closed: true, + } + _, err := cc.RoundTrip(req) + if err != errClientConnUnusable { + t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err) + } + slurp, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ReadAll = %v", err) + } + if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func TestClientConnPing(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cc, err := tr.dialClientConn(st.ts.Listener.Addr().String(), false) + if err != nil { + t.Fatal(err) + } + if err = cc.Ping(testContext{}); err != nil { + t.Fatal(err) + } +} + +// Issue 16974: if the server sent a DATA frame after the user +// canceled the Transport's Request, the Transport previously wrote to a +// closed pipe, got an error, and ended up closing the whole TCP +// connection. +func TestTransportCancelDataResponseRace(t *testing.T) { + cancel := make(chan struct{}) + clientGotError := make(chan bool, 1) + + const msg = "Hello." + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/hello") { + time.Sleep(50 * time.Millisecond) + io.WriteString(w, msg) + return + } + for i := 0; i < 50; i++ { + io.WriteString(w, "Some data.") + w.(http.Flusher).Flush() + if i == 2 { + close(cancel) + <-clientGotError + } + time.Sleep(10 * time.Millisecond) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Cancel = cancel + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, res.Body); err == nil { + t.Fatal("unexpected success") + } + clientGotError <- true + + res, err = c.Get(st.ts.URL + "/hello") + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != msg { + t.Errorf("Got = %q; want %q", slurp, msg) + } +} + +// Issue 21316: It should be safe to reuse an http.Request after the +// request has completed. +func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + io.WriteString(w, "body") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("GET", st.ts.URL, nil) + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + t.Fatalf("error reading response body: %v", err) + } + if err := resp.Body.Close(); err != nil { + t.Fatalf("error closing response body: %v", err) + } + + // This access of req.Header should not race with code in the transport. + req.Header = http.Header{} +} + +func TestTransportRetryAfterGOAWAY(t *testing.T) { + var dialer struct { + sync.Mutex + count int + } + ct1 := make(chan *clientTester) + ct2 := make(chan *clientTester) + + ln := newLocalListener(t) + defer ln.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer.Lock() + defer dialer.Unlock() + dialer.count++ + if dialer.count == 3 { + return nil, errors.New("unexpected number of dials") + } + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + return nil, fmt.Errorf("dial error: %v", err) + } + sc, err := ln.Accept() + if err != nil { + return nil, fmt.Errorf("accept error: %v", err) + } + ct := &clientTester{ + t: t, + tr: tr, + cc: cc, + sc: sc, + fr: NewFramer(sc, sc), + } + switch dialer.count { + case 1: + ct1 <- ct + case 2: + ct2 <- ct + } + return cc, nil + } + + errs := make(chan error, 3) + done := make(chan struct{}) + defer close(done) + + // Client. + go func() { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := tr.RoundTrip(req) + if res != nil { + res.Body.Close() + if got := res.Header.Get("Foo"); got != "bar" { + err = fmt.Errorf("foo header = %q; want bar", got) + } + } + if err != nil { + err = fmt.Errorf("RoundTrip: %v", err) + } + errs <- err + }() + + connToClose := make(chan io.Closer, 2) + + // Server for the first request. + go func() { + var ct *clientTester + select { + case ct = <-ct1: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server1 failed reading HEADERS: %v", err) + return + } + t.Logf("server1 got %v", hf) + if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil { + errs <- fmt.Errorf("server1 failed writing GOAWAY: %v", err) + return + } + errs <- nil + }() + + // Server for the second request. + go func() { + var ct *clientTester + select { + case ct = <-ct2: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server2 failed reading HEADERS: %v", err) + return + } + t.Logf("server2 got %v", hf) + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + err = ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + if err != nil { + errs <- fmt.Errorf("server2 failed writing response HEADERS: %v", err) + } else { + errs <- nil + } + }() + + for k := 0; k < 3; k++ { + select { + case err := <-errs: + if err != nil { + t.Error(err) + } + case <-time.After(1 * time.Second): + t.Errorf("timed out") + } + } + + for { + select { + case c := <-connToClose: + c.Close() + default: + return + } + } +} + +func TestTransportRetryAfterRefusedStream(t *testing.T) { + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + resp, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + resp.Body.Close() + if resp.StatusCode != 204 { + return fmt.Errorf("Status = %v; want 204", resp.StatusCode) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + nreq := 0 + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + nreq++ + if nreq == 1 { + ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) + } else { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +func TestTransportRetryHasLimit(t *testing.T) { + // Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s. + if testing.Short() { + t.Skip("skipping long test in short mode") + } + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + resp, err := ct.tr.RoundTrip(req) + if err == nil { + return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + } + t.Logf("expected error, got: %v", err) + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +func TestTransportResponseDataBeforeHeaders(t *testing.T) { + // This test use not valid response format. + // Discarding logger output to not spam tests output. + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + req := httptest.NewRequest("GET", "https://dummy.tld/", nil) + // First request is normal to ensure the check is per stream and not per connection. + _, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip expected no error, got: %v", err) + } + // Second request returns a DATA frame with no HEADERS. + resp, err := ct.tr.RoundTrip(req) + if err == nil { + return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + } + if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol { + return fmt.Errorf("expected stream PROTOCOL_ERROR, got: %v", err) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + switch f.StreamID { + case 1: + // Send a valid response to first request. + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + case 3: + ct.fr.WriteData(f.StreamID, true, []byte("payload")) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} +func TestTransportRequestsStallAtServerLimit(t *testing.T) { + const maxConcurrent = 2 + + greet := make(chan struct{}) // server sends initial SETTINGS frame + gotRequest := make(chan struct{}) // server received a request + clientDone := make(chan struct{}) + + // Collect errors from goroutines. + var wg sync.WaitGroup + errs := make(chan error, 100) + defer func() { + wg.Wait() + close(errs) + for err := range errs { + t.Error(err) + } + }() + + // We will send maxConcurrent+2 requests. This checker goroutine waits for the + // following stages: + // 1. The first maxConcurrent requests are received by the server. + // 2. The client will cancel the next request + // 3. The server is unblocked so it can service the first maxConcurrent requests + // 4. The client will send the final request + wg.Add(1) + unblockClient := make(chan struct{}) + clientRequestCancelled := make(chan struct{}) + unblockServer := make(chan struct{}) + go func() { + defer wg.Done() + // Stage 1. + for k := 0; k < maxConcurrent; k++ { + <-gotRequest + } + // Stage 2. + close(unblockClient) + <-clientRequestCancelled + // Stage 3: give some time for the final RoundTrip call to be scheduled and + // verify that the final request is not sent. + time.Sleep(50 * time.Millisecond) + select { + case <-gotRequest: + errs <- errors.New("last request did not stall") + close(unblockServer) + return + default: + } + close(unblockServer) + // Stage 4. + <-gotRequest + }() + + ct := newClientTester(t) + ct.client = func() error { + var wg sync.WaitGroup + defer func() { + wg.Wait() + close(clientDone) + ct.cc.(*net.TCPConn).CloseWrite() + }() + for k := 0; k < maxConcurrent+2; k++ { + wg.Add(1) + go func(k int) { + defer wg.Done() + // Don't send the second request until after receiving SETTINGS from the server + // to avoid a race where we use the default SettingMaxConcurrentStreams, which + // is much larger than maxConcurrent. We have to send the first request before + // waiting because the first request triggers the dial and greet. + if k > 0 { + <-greet + } + // Block until maxConcurrent requests are sent before sending any more. + if k >= maxConcurrent { + <-unblockClient + } + req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), nil) + if k == maxConcurrent { + // This request will be canceled. + cancel := make(chan struct{}) + req.Cancel = cancel + close(cancel) + _, err := ct.tr.RoundTrip(req) + close(clientRequestCancelled) + if err == nil { + errs <- fmt.Errorf("RoundTrip(%d) should have failed due to cancel", k) + return + } + } else { + resp, err := ct.tr.RoundTrip(req) + if err != nil { + errs <- fmt.Errorf("RoundTrip(%d): %v", k, err) + return + } + ioutil.ReadAll(resp.Body) + resp.Body.Close() + if resp.StatusCode != 204 { + errs <- fmt.Errorf("Status = %v; want 204", resp.StatusCode) + return + } + } + }(k) + } + return nil + } + + ct.server = func() error { + var wg sync.WaitGroup + defer wg.Wait() + + ct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent}) + + // Server write loop. + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + writeResp := make(chan uint32, maxConcurrent+1) + + wg.Add(1) + go func() { + defer wg.Done() + <-unblockServer + for id := range writeResp { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: id, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + }() + + // Server read loop. + var nreq int + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it will have reported any errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame: + case *SettingsFrame: + // Wait for the client SETTINGS ack until ending the greet. + close(greet) + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + gotRequest <- struct{}{} + nreq++ + writeResp <- f.StreamID + if nreq == maxConcurrent+1 { + close(writeResp) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + + ct.run() +} + +func TestAuthorityAddr(t *testing.T) { + tests := []struct { + scheme, authority string + want string + }{ + {"http", "foo.com", "foo.com:80"}, + {"https", "foo.com", "foo.com:443"}, + {"https", "foo.com:1234", "foo.com:1234"}, + {"https", "1.2.3.4:1234", "1.2.3.4:1234"}, + {"https", "1.2.3.4", "1.2.3.4:443"}, + {"https", "[::1]:1234", "[::1]:1234"}, + {"https", "[::1]", "[::1]:443"}, + } + for _, tt := range tests { + got := authorityAddr(tt.scheme, tt.authority) + if got != tt.want { + t.Errorf("authorityAddr(%q, %q) = %q; want %q", tt.scheme, tt.authority, got, tt.want) + } + } +} + +// Issue 20448: stop allocating for DATA frames' payload after +// Response.Body.Close is called. +func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { + megabyteZero := make([]byte, 1<<20) + + writeErr := make(chan error, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + var sum int64 + for i := 0; i < 100; i++ { + n, err := w.Write(megabyteZero) + sum += int64(n) + if err != nil { + writeErr <- err + return + } + } + t.Logf("wrote all %d bytes", sum) + writeErr <- nil + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + var buf [1]byte + if _, err := res.Body.Read(buf[:]); err != nil { + t.Error(err) + } + if err := res.Body.Close(); err != nil { + t.Error(err) + } + + trb, ok := res.Body.(transportResponseBody) + if !ok { + t.Fatalf("res.Body = %T; want transportResponseBody", res.Body) + } + if trb.cs.bufPipe.b != nil { + t.Errorf("response body pipe is still open") + } + + gotErr := <-writeErr + if gotErr == nil { + t.Errorf("Handler unexpectedly managed to write its entire response without getting an error") + } else if gotErr != errStreamClosed { + t.Errorf("Handler Write err = %v; want errStreamClosed", gotErr) + } +} + +// Issue 18891: make sure Request.Body == NoBody means no DATA frame +// is ever sent, even if empty. +func TestTransportNoBodyMeansNoDATA(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", go18httpNoBody()) + ct.tr.RoundTrip(req) + <-unblockClient + return nil + } + ct.server = func() error { + defer close(unblockClient) + defer ct.cc.(*net.TCPConn).Close() + ct.greet() + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f := f.(type) { + default: + return fmt.Errorf("Got %T; want HeadersFrame", f) + case *WindowUpdateFrame, *SettingsFrame: + continue + case *HeadersFrame: + if !f.StreamEnded() { + return fmt.Errorf("got headers frame without END_STREAM") + } + return nil + } + } + } + ct.run() +} + +func benchSimpleRoundTrip(b *testing.B, nHeaders int) { + defer disableGoroutineTracking()() + b.ReportAllocs() + st := newServerTester(b, + func(w http.ResponseWriter, r *http.Request) { + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < nHeaders; i++ { + name := fmt.Sprint("A-", i) + req.Header.Set(name, "*") + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + res, err := tr.RoundTrip(req) + if err != nil { + if res != nil { + res.Body.Close() + } + b.Fatalf("RoundTrip err = %v; want nil", err) + } + res.Body.Close() + if res.StatusCode != http.StatusOK { + b.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) + } + } +} + +type infiniteReader struct{} + +func (r infiniteReader) Read(b []byte) (int, error) { + return len(b), nil +} + +// Issue 20521: it is not an error to receive a response and end stream +// from the server without the body being consumed. +func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // The request body needs to be big enough to trigger flow control. + req, _ := http.NewRequest("PUT", st.ts.URL, infiniteReader{}) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != http.StatusOK { + t.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) + } +} + +// Verify transport doesn't crash when receiving bogus response lacking a :status header. +// Issue 22880. +func TestTransportHandlesInvalidStatuslessResponse(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + _, err := ct.tr.RoundTrip(req) + const substr = "malformed response from server: missing status pseudo header" + if !strings.Contains(fmt.Sprint(err), substr) { + return fmt.Errorf("RoundTrip error = %v; want substring %q", err, substr) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: "content-type", Value: "text/html"}) // no :status header + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, // we'll send some DATA to try to crash the transport + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(f.StreamID, true, []byte("payload")) + return nil + } + } + } + ct.run() +} + +func BenchmarkClientRequestHeaders(b *testing.B) { + b.Run(" 0 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 0) }) + b.Run(" 10 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 10) }) + b.Run(" 100 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 100) }) + b.Run("1000 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 1000) }) +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 0000000..54ab4a8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 0000000..4fe3073 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 0000000..848fed6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority_test.go b/vendor/golang.org/x/net/http2/writesched_priority_test.go new file mode 100644 index 0000000..f2b535a --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_test.go @@ -0,0 +1,541 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "sort" + "testing" +) + +func defaultPriorityWriteScheduler() *priorityWriteScheduler { + return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler) +} + +func checkPriorityWellFormed(ws *priorityWriteScheduler) error { + for id, n := range ws.nodes { + if id != n.id { + return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id) + } + if n.parent == nil { + if n.next != nil || n.prev != nil { + return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id) + } + continue + } + found := false + for k := n.parent.kids; k != nil; k = k.next { + if k.id == id { + found = true + break + } + } + if !found { + return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id) + } + } + return nil +} + +func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string { + var ids []int + for _, n := range ws.nodes { + ids = append(ids, int(n.id)) + } + sort.Ints(ids) + + var buf bytes.Buffer + for _, id := range ids { + if buf.Len() != 0 { + buf.WriteString(" ") + } + if id == 0 { + buf.WriteString(fmtNode(&ws.root)) + } else { + buf.WriteString(fmtNode(ws.nodes[uint32(id)])) + } + } + return buf.String() +} + +func fmtNodeParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{parent:nil}", n.id) + default: + return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id) + } +} + +func fmtNodeWeightParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight) + default: + return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id) + } +} + +func TestPriorityTwoStreams(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + // Move 1's parent to 2. + ws.AdjustStream(1, PriorityParam{ + StreamDep: 2, + Weight: 32, + Exclusive: false, + }) + want = "1{weight:32,parent:2} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustExclusiveZero(t *testing.T) { + // 1, 2, and 3 are all children of the 0 stream. + // Exclusive reprioritization to any of the streams should bring + // the rest of the streams under the reprioritized stream. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.OpenStream(3, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + ws.AdjustStream(2, PriorityParam{ + StreamDep: 0, + Weight: 20, + Exclusive: true, + }) + want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustOwnParent(t *testing.T) { + // Assigning a node as its own parent should have no effect. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.AdjustStream(2, PriorityParam{ + StreamDep: 2, + Weight: 20, + Exclusive: true, + }) + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + // Close the first three streams. We lose 1, but keep 2 and 3. + ws.CloseStream(1) + ws.CloseStream(2) + ws.CloseStream(3) + + want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } + + // Adding a stream as an exclusive child of 1 gives it default + // priorities, since 1 is gone. + ws.OpenStream(5, OpenStreamOptions{}) + ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true}) + + // Adding a stream as an exclusive child of 2 should work, since 2 is not gone. + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true}) + + want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After add streams\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + + // Close the first two streams. We keep only 3. + ws.CloseStream(1) + ws.CloseStream(2) + + want := "3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + ws.OpenStream(5, OpenStreamOptions{}) + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15}) + ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15}) + ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15}) + + want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + + want := "4{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func makeSection533Tree() *priorityWriteScheduler { + // Initial tree from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + return ws +} + +func TestPrioritySection533NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection533Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func checkPopAll(ws WriteScheduler, order []uint32) error { + for k, id := range order { + wr, ok := ws.Pop() + if !ok { + return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order) + } + if got := wr.StreamID(); got != id { + return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order) + } + } + wr, ok := ws.Pop() + if ok { + return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order) + } + return nil +} + +func TestPriorityPopFrom533Tree(t *testing.T) { + ws := makeSection533Tree() + + ws.Push(makeWriteHeadersRequest(3 /*C*/)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteHeadersRequest(5 /*E*/)) + ws.Push(makeWriteHeadersRequest(1 /*A*/)) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil { + t.Error(err) + } +} + +func TestPriorityPopFromLinearTree(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil { + t.Error(err) + } +} + +func TestPriorityFlowControl(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 16} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // No flow-control bytes available. + if wr, ok := ws.Pop(); ok { + t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr) + } + + // Add enough flow-control bytes to write st2 in two Pop calls. + // Should write data from st2 even though it's lower priority than st1. + for i := 1; i <= 2; i++ { + st2.flow.add(8) + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(%d)=false, want true", i) + } + if got, want := wr.DataSize(), 8; got != want { + t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want) + } + } +} + +func TestPriorityThrottleOutOfOrderWrites(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 4096} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(4096) + st2.flow.add(4096) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // We have enough flow-control bytes to write st2 in a single Pop call. + // However, due to out-of-order write throttling, the first call should + // only write 1KB. + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(st2.first)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want) + } + + // Now add data on st1. This should take precedence. + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil}) + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st1)=false, want true") + } + if got, want := wr.StreamID(), uint32(1); got != want { + t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 4096; got != want { + t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want) + } + + // Should go back to writing 1KB from st2. + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st2.last)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want) + } +} + +func TestPriorityWeights(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + sc := &serverConn{maxFrameSize: 8} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(40) + st2.flow.add(40) + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil}) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34}) + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9}) + + // st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)). + // The maximum frame size is 8 bytes. The write sequence should be: + // st1, total bytes so far is (st1=8, st=0) + // st2, total bytes so far is (st1=8, st=8) + // st1, total bytes so far is (st1=16, st=8) + // st1, total bytes so far is (st1=24, st=8) // 3x bandwidth + // st1, total bytes so far is (st1=32, st=8) // 4x bandwidth + // st2, total bytes so far is (st1=32, st=16) // 2x bandwidth + // st1, total bytes so far is (st1=40, st=16) + // st2, total bytes so far is (st1=40, st=24) + // st2, total bytes so far is (st1=40, st=32) + // st2, total bytes so far is (st1=40, st=40) + if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil { + t.Error(err) + } +} + +func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 0, + MaxIdleNodesInTree: 0, + }) + ws.OpenStream(1, OpenStreamOptions{}) + ws.CloseStream(1) + ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)}) + ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)}) + + if err := checkPopAll(ws, []uint32{1, 2}); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 0000000..36d7919 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_random_test.go b/vendor/golang.org/x/net/http2/writesched_random_test.go new file mode 100644 index 0000000..3bf4aa3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random_test.go @@ -0,0 +1,44 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "testing" + +func TestRandomScheduler(t *testing.T) { + ws := NewRandomWriteScheduler() + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + + // Pop all frames. Should get the non-stream requests first, + // followed by the stream requests in any order. + var order []FrameWriteRequest + for { + wr, ok := ws.Pop() + if !ok { + break + } + order = append(order, wr) + } + t.Logf("got frames: %v", order) + if len(order) != 6 { + t.Fatalf("got %d frames, expected 6", len(order)) + } + if order[0].StreamID() != 0 || order[1].StreamID() != 0 { + t.Fatal("expected non-stream frames first", order[0], order[1]) + } + got := make(map[uint32]bool) + for _, wr := range order[2:] { + got[wr.StreamID()] = true + } + for id := uint32(1); id <= 4; id++ { + if !got[id] { + t.Errorf("frame not found for stream %d", id) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched_test.go b/vendor/golang.org/x/net/http2/writesched_test.go new file mode 100644 index 0000000..0807056 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_test.go @@ -0,0 +1,125 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "reflect" + "testing" +) + +func makeWriteNonStreamRequest() FrameWriteRequest { + return FrameWriteRequest{writeSettingsAck{}, nil, nil} +} + +func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest { + st := &stream{id: streamID} + return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil} +} + +func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error { + consumed, rest, n := wr.Consume(nbytes) + var wantConsumed, wantRest FrameWriteRequest + switch len(want) { + case 0: + case 1: + wantConsumed = want[0] + case 2: + wantConsumed = want[0] + wantRest = want[1] + } + if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) { + return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want)) + } + return nil +} + +func TestFrameWriteRequestNonData(t *testing.T) { + wr := makeWriteNonStreamRequest() + if got, want := wr.DataSize(), 0; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // Non-DATA frames are always consumed whole. + if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil { + t.Errorf("Consume:\n%v", err) + } +} + +func TestFrameWriteRequestData(t *testing.T) { + st := &stream{ + id: 1, + sc: &serverConn{maxFrameSize: 16}, + } + const size = 32 + wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)} + if got, want := wr.DataSize(), size; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // No flow-control bytes available: cannot consume anything. + if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil { + t.Errorf("Consume(limited by flow control):\n%v", err) + } + + // Add enough flow-control bytes to consume the entire frame, + // but we're now restricted by st.sc.maxFrameSize. + st.flow.add(size) + want := []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(wr, math.MaxInt32, want); err != nil { + t.Errorf("Consume(limited by maxFrameSize):\n%v", err) + } + rest := want[1] + + // Consume 8 bytes from the remaining frame. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, 8), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, 8, want); err != nil { + t.Errorf("Consume(8):\n%v", err) + } + rest = want[1] + + // Consume all remaining bytes. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, math.MaxInt32, want); err != nil { + t.Errorf("Consume(remainder):\n%v", err) + } +} + +func TestFrameWriteRequest_StreamID(t *testing.T) { + const streamID = 123 + wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)} + if got := wr.StreamID(); got != streamID { + t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID) + } +} diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go new file mode 100644 index 0000000..610b2cd --- /dev/null +++ b/vendor/golang.org/x/net/http2/z_spec_test.go @@ -0,0 +1,356 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/xml" + "flag" + "fmt" + "io" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "testing" +) + +var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") + +// The global map of sentence coverage for the http2 spec. +var defaultSpecCoverage specCoverage + +var loadSpecOnce sync.Once + +func loadSpec() { + if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { + panic(err) + } else { + defaultSpecCoverage = readSpecCov(f) + f.Close() + } +} + +// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not +// "covered" will be included in report outputted by TestSpecCoverage. +func covers(sec, sentences string) { + loadSpecOnce.Do(loadSpec) + defaultSpecCoverage.cover(sec, sentences) +} + +type specPart struct { + section string + sentence string +} + +func (ss specPart) Less(oo specPart) bool { + atoi := func(s string) int { + n, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return n + } + a := strings.Split(ss.section, ".") + b := strings.Split(oo.section, ".") + for len(a) > 0 { + if len(b) == 0 { + return false + } + x, y := atoi(a[0]), atoi(b[0]) + if x == y { + a, b = a[1:], b[1:] + continue + } + return x < y + } + if len(b) > 0 { + return true + } + return false +} + +type bySpecSection []specPart + +func (a bySpecSection) Len() int { return len(a) } +func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } +func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type specCoverage struct { + coverage map[specPart]bool + d *xml.Decoder +} + +func joinSection(sec []int) string { + s := fmt.Sprintf("%d", sec[0]) + for _, n := range sec[1:] { + s = fmt.Sprintf("%s.%d", s, n) + } + return s +} + +func (sc specCoverage) readSection(sec []int) { + var ( + buf = new(bytes.Buffer) + sub = 0 + ) + for { + tk, err := sc.d.Token() + if err != nil { + if err == io.EOF { + return + } + panic(err) + } + switch v := tk.(type) { + case xml.StartElement: + if skipElement(v) { + if err := sc.d.Skip(); err != nil { + panic(err) + } + if v.Name.Local == "section" { + sub++ + } + break + } + switch v.Name.Local { + case "section": + sub++ + sc.readSection(append(sec, sub)) + case "xref": + buf.Write(sc.readXRef(v)) + } + case xml.CharData: + if len(sec) == 0 { + break + } + buf.Write(v) + case xml.EndElement: + if v.Name.Local == "section" { + sc.addSentences(joinSection(sec), buf.String()) + return + } + } + } +} + +func (sc specCoverage) readXRef(se xml.StartElement) []byte { + var b []byte + for { + tk, err := sc.d.Token() + if err != nil { + panic(err) + } + switch v := tk.(type) { + case xml.CharData: + if b != nil { + panic("unexpected CharData") + } + b = []byte(string(v)) + case xml.EndElement: + if v.Name.Local != "xref" { + panic("expected ") + } + if b != nil { + return b + } + sig := attrSig(se) + switch sig { + case "target": + return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) + case "fmt-of,rel,target", "fmt-,,rel,target": + return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) + case "fmt-of,sec,target", "fmt-,,sec,target": + return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) + case "fmt-of,rel,sec,target": + return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) + default: + panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) + } + default: + panic(fmt.Sprintf("unexpected tag %q", v)) + } + } +} + +var skipAnchor = map[string]bool{ + "intro": true, + "Overview": true, +} + +var skipTitle = map[string]bool{ + "Acknowledgements": true, + "Change Log": true, + "Document Organization": true, + "Conventions and Terminology": true, +} + +func skipElement(s xml.StartElement) bool { + switch s.Name.Local { + case "artwork": + return true + case "section": + for _, attr := range s.Attr { + switch attr.Name.Local { + case "anchor": + if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { + return true + } + case "title": + if skipTitle[attr.Value] { + return true + } + } + } + } + return false +} + +func readSpecCov(r io.Reader) specCoverage { + sc := specCoverage{ + coverage: map[specPart]bool{}, + d: xml.NewDecoder(r)} + sc.readSection(nil) + return sc +} + +func (sc specCoverage) addSentences(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + sc.coverage[specPart{sec, s}] = false + } +} + +func (sc specCoverage) cover(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + p := specPart{sec, s} + if _, ok := sc.coverage[p]; !ok { + panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) + } + sc.coverage[specPart{sec, s}] = true + } + +} + +var whitespaceRx = regexp.MustCompile(`\s+`) + +func parseSentences(sens string) []string { + sens = strings.TrimSpace(sens) + if sens == "" { + return nil + } + ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") + for i, s := range ss { + s = strings.TrimSpace(s) + if !strings.HasSuffix(s, ".") { + s += "." + } + ss[i] = s + } + return ss +} + +func TestSpecParseSentences(t *testing.T) { + tests := []struct { + ss string + want []string + }{ + {"Sentence 1. Sentence 2.", + []string{ + "Sentence 1.", + "Sentence 2.", + }}, + {"Sentence 1. \nSentence 2.\tSentence 3.", + []string{ + "Sentence 1.", + "Sentence 2.", + "Sentence 3.", + }}, + } + + for i, tt := range tests { + got := parseSentences(tt.ss) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d: got = %q, want %q", i, got, tt.want) + } + } +} + +func TestSpecCoverage(t *testing.T) { + if !*coverSpec { + t.Skip() + } + + loadSpecOnce.Do(loadSpec) + + var ( + list []specPart + cv = defaultSpecCoverage.coverage + total = len(cv) + complete = 0 + ) + + for sp, touched := range defaultSpecCoverage.coverage { + if touched { + complete++ + } else { + list = append(list, sp) + } + } + sort.Stable(bySpecSection(list)) + + if testing.Short() && len(list) > 5 { + list = list[:5] + } + + for _, p := range list { + t.Errorf("\tSECTION %s: %s", p.section, p.sentence) + } + + t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100) +} + +func attrSig(se xml.StartElement) string { + var names []string + for _, attr := range se.Attr { + if attr.Name.Local == "fmt" { + names = append(names, "fmt-"+attr.Value) + } else { + names = append(names, attr.Name.Local) + } + } + sort.Strings(names) + return strings.Join(names, ",") +} + +func attrValue(se xml.StartElement, attr string) string { + for _, a := range se.Attr { + if a.Name.Local == attr { + return a.Value + } + } + panic("unknown attribute " + attr) +} + +func TestSpecPartLess(t *testing.T) { + tests := []struct { + sec1, sec2 string + want bool + }{ + {"6.2.1", "6.2", false}, + {"6.2", "6.2.1", true}, + {"6.10", "6.10.1", true}, + {"6.10", "6.1.1", false}, // 10, not 1 + {"6.1", "6.1", false}, // equal, so not less + } + for _, tt := range tests { + got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) + if got != tt.want { + t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/diag_test.go b/vendor/golang.org/x/net/icmp/diag_test.go new file mode 100644 index 0000000..2ecd465 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/diag_test.go @@ -0,0 +1,274 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "errors" + "fmt" + "net" + "os" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +type diagTest struct { + network, address string + protocol int + m icmp.Message +} + +func TestDiag(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + + t.Run("Ping/NonPrivileged", func(t *testing.T) { + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Logf("not supported on %s", runtime.GOOS) + return + } + for i, dt := range []diagTest{ + { + "udp4", "0.0.0.0", iana.ProtocolICMP, + icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + }, + + { + "udp6", "::", iana.ProtocolIPv6ICMP, + icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + }, + } { + if err := doDiag(dt, i); err != nil { + t.Error(err) + } + } + }) + t.Run("Ping/Privileged", func(t *testing.T) { + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + for i, dt := range []diagTest{ + { + "ip4:icmp", "0.0.0.0", iana.ProtocolICMP, + icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + }, + + { + "ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, + icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + }, + } { + if err := doDiag(dt, i); err != nil { + t.Error(err) + } + } + }) + t.Run("Probe/Privileged", func(t *testing.T) { + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + for i, dt := range []diagTest{ + { + "ip4:icmp", "0.0.0.0", iana.ProtocolICMP, + icmp.Message{ + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: os.Getpid() & 0xffff, + Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, Type: 1, + Name: "doesnotexist", + }, + }, + }, + }, + }, + + { + "ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, + icmp.Message{ + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: os.Getpid() & 0xffff, + Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, Type: 1, + Name: "doesnotexist", + }, + }, + }, + }, + }, + } { + if err := doDiag(dt, i); err != nil { + t.Error(err) + } + } + }) +} + +func doDiag(dt diagTest, seq int) error { + c, err := icmp.ListenPacket(dt.network, dt.address) + if err != nil { + return err + } + defer c.Close() + + dst, err := googleAddr(c, dt.protocol) + if err != nil { + return err + } + + if dt.network != "udp6" && dt.protocol == iana.ProtocolIPv6ICMP { + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeDestinationUnreachable) + f.Accept(ipv6.ICMPTypePacketTooBig) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeParameterProblem) + f.Accept(ipv6.ICMPTypeEchoReply) + f.Accept(ipv6.ICMPTypeExtendedEchoReply) + if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { + return err + } + } + + switch m := dt.m.Body.(type) { + case *icmp.Echo: + m.Seq = 1 << uint(seq) + case *icmp.ExtendedEchoRequest: + m.Seq = 1 << uint(seq) + } + wb, err := dt.m.Marshal(nil) + if err != nil { + return err + } + if n, err := c.WriteTo(wb, dst); err != nil { + return err + } else if n != len(wb) { + return fmt.Errorf("got %v; want %v", n, len(wb)) + } + + rb := make([]byte, 1500) + if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + return err + } + n, peer, err := c.ReadFrom(rb) + if err != nil { + return err + } + rm, err := icmp.ParseMessage(dt.protocol, rb[:n]) + if err != nil { + return err + } + switch { + case dt.m.Type == ipv4.ICMPTypeEcho && rm.Type == ipv4.ICMPTypeEchoReply: + fallthrough + case dt.m.Type == ipv6.ICMPTypeEchoRequest && rm.Type == ipv6.ICMPTypeEchoReply: + fallthrough + case dt.m.Type == ipv4.ICMPTypeExtendedEchoRequest && rm.Type == ipv4.ICMPTypeExtendedEchoReply: + fallthrough + case dt.m.Type == ipv6.ICMPTypeExtendedEchoRequest && rm.Type == ipv6.ICMPTypeExtendedEchoReply: + return nil + default: + return fmt.Errorf("got %+v from %v; want echo reply or extended echo reply", rm, peer) + } +} + +func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { + host := "ipv4.google.com" + if protocol == iana.ProtocolIPv6ICMP { + host = "ipv6.google.com" + } + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + netaddr := func(ip net.IP) (net.Addr, error) { + switch c.LocalAddr().(type) { + case *net.UDPAddr: + return &net.UDPAddr{IP: ip}, nil + case *net.IPAddr: + return &net.IPAddr{IP: ip}, nil + default: + return nil, errors.New("neither UDPAddr nor IPAddr") + } + } + if len(ips) > 0 { + return netaddr(ips[0]) + } + return nil, errors.New("no A or AAAA record") +} + +func TestConcurrentNonPrivilegedListenPacket(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + network, address := "udp4", "127.0.0.1" + if !nettest.SupportsIPv4() { + network, address = "udp6", "::1" + } + const N = 1000 + var wg sync.WaitGroup + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + c, err := icmp.ListenPacket(network, address) + if err != nil { + t.Error(err) + return + } + c.Close() + }() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go new file mode 100644 index 0000000..7464bf7 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/dstunreach.go @@ -0,0 +1,41 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A DstUnreach represents an ICMP destination unreachable message +// body. +type DstUnreach struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *DstUnreach) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DstUnreach) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) +} + +// parseDstUnreach parses b as an ICMP destination unreachable message +// body. +func parseDstUnreach(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &DstUnreach{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go new file mode 100644 index 0000000..c611f65 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/echo.go @@ -0,0 +1,157 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Echo represents an ICMP echo request or reply message body. +type Echo struct { + ID int // identifier + Seq int // sequence number + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *Echo) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *Echo) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) + copy(b[4:], p.Data) + return b, nil +} + +// parseEcho parses b as an ICMP echo request or reply message body. +func parseEcho(proto int, _ Type, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} + +// An ExtendedEchoRequest represents an ICMP extended echo request +// message body. +type ExtendedEchoRequest struct { + ID int // identifier + Seq int // sequence number + Local bool // must be true when identifying by name or index + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ExtendedEchoRequest) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, false, nil, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ExtendedEchoRequest) Marshal(proto int) ([]byte, error) { + b, err := marshalMultipartMessageBody(proto, false, nil, p.Extensions) + if err != nil { + return nil, err + } + bb := make([]byte, 4) + binary.BigEndian.PutUint16(bb[:2], uint16(p.ID)) + bb[2] = byte(p.Seq) + if p.Local { + bb[3] |= 0x01 + } + bb = append(bb, b...) + return bb, nil +} + +// parseExtendedEchoRequest parses b as an ICMP extended echo request +// message body. +func parseExtendedEchoRequest(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4+4 { + return nil, errMessageTooShort + } + p := &ExtendedEchoRequest{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(b[2])} + if b[3]&0x01 != 0 { + p.Local = true + } + var err error + _, p.Extensions, err = parseMultipartMessageBody(proto, typ, b[4:]) + if err != nil { + return nil, err + } + return p, nil +} + +// An ExtendedEchoReply represents an ICMP extended echo reply message +// body. +type ExtendedEchoReply struct { + ID int // identifier + Seq int // sequence number + State int // 3-bit state working together with Message.Code + Active bool // probed interface is active + IPv4 bool // probed interface runs IPv4 + IPv6 bool // probed interface runs IPv6 +} + +// Len implements the Len method of MessageBody interface. +func (p *ExtendedEchoReply) Len(proto int) int { + if p == nil { + return 0 + } + return 4 +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ExtendedEchoReply) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + b[2] = byte(p.Seq) + b[3] = byte(p.State<<5) & 0xe0 + if p.Active { + b[3] |= 0x04 + } + if p.IPv4 { + b[3] |= 0x02 + } + if p.IPv6 { + b[3] |= 0x01 + } + return b, nil +} + +// parseExtendedEchoReply parses b as an ICMP extended echo reply +// message body. +func parseExtendedEchoReply(proto int, _ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ExtendedEchoReply{ + ID: int(binary.BigEndian.Uint16(b[:2])), + Seq: int(b[2]), + State: int(b[3]) >> 5, + } + if b[3]&0x04 != 0 { + p.Active = true + } + if b[3]&0x02 != 0 { + p.IPv4 = true + } + if b[3]&0x01 != 0 { + p.IPv6 = true + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go new file mode 100644 index 0000000..a68bfb0 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/endpoint.go @@ -0,0 +1,113 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "runtime" + "syscall" + "time" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var _ net.PacketConn = &PacketConn{} + +// A PacketConn represents a packet network endpoint that uses either +// ICMPv4 or ICMPv6. +type PacketConn struct { + c net.PacketConn + p4 *ipv4.PacketConn + p6 *ipv6.PacketConn +} + +func (c *PacketConn) ok() bool { return c != nil && c.c != nil } + +// IPv4PacketConn returns the ipv4.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv4. +func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { + if !c.ok() { + return nil + } + return c.p4 +} + +// IPv6PacketConn returns the ipv6.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv6. +func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { + if !c.ok() { + return nil + } + return c.p6 +} + +// ReadFrom reads an ICMP message from the connection. +func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + // Please be informed that ipv4.NewPacketConn enables + // IP_STRIPHDR option by default on Darwin. + // See golang.org/issue/9395 for further information. + if runtime.GOOS == "darwin" && c.p4 != nil { + n, _, peer, err := c.p4.ReadFrom(b) + return n, peer, err + } + return c.c.ReadFrom(b) +} + +// WriteTo writes the ICMP message b to dst. +// Dst must be net.UDPAddr when c is a non-privileged +// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr. +func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.c.WriteTo(b, dst) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.Close() +} + +// LocalAddr returns the local network address. +func (c *PacketConn) LocalAddr() net.Addr { + if !c.ok() { + return nil + } + return c.c.LocalAddr() +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetWriteDeadline(t) +} diff --git a/vendor/golang.org/x/net/icmp/example_test.go b/vendor/golang.org/x/net/icmp/example_test.go new file mode 100644 index 0000000..1df4cec --- /dev/null +++ b/vendor/golang.org/x/net/icmp/example_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "log" + "net" + "os" + "runtime" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExamplePacketConn_nonPrivilegedPing() { + switch runtime.GOOS { + case "darwin": + case "linux": + log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + log.Println("not supported on", runtime.GOOS) + return + } + + c, err := icmp.ListenPacket("udp6", "fe80::1%en0") + if err != nil { + log.Fatal(err) + } + defer c.Close() + + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1, + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP("ff02::1"), Zone: "en0"}); err != nil { + log.Fatal(err) + } + + rb := make([]byte, 1500) + n, peer, err := c.ReadFrom(rb) + if err != nil { + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + switch rm.Type { + case ipv6.ICMPTypeEchoReply: + log.Printf("got reflection from %v", peer) + default: + log.Printf("got %+v; want echo reply", rm) + } +} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go new file mode 100644 index 0000000..2005068 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension.go @@ -0,0 +1,108 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// An Extension represents an ICMP extension. +type Extension interface { + // Len returns the length of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +const extensionVersion = 2 + +func validExtensionHeader(b []byte) bool { + v := int(b[0]&0xf0) >> 4 + s := binary.BigEndian.Uint16(b[2:4]) + if s != 0 { + s = checksum(b) + } + if v != extensionVersion || s != 0 { + return false + } + return true +} + +// parseExtensions parses b as a list of ICMP extensions. +// The length attribute l must be the length attribute field in +// received icmp messages. +// +// It will return a list of ICMP extensions and an adjusted length +// attribute that represents the length of the padded original +// datagram field. Otherwise, it returns an error. +func parseExtensions(typ Type, b []byte, l int) ([]Extension, int, error) { + // Still a lot of non-RFC 4884 compliant implementations are + // out there. Set the length attribute l to 128 when it looks + // inappropriate for backwards compatibility. + // + // A minimal extension at least requires 8 octets; 4 octets + // for an extension header, and 4 octets for a single object + // header. + // + // See RFC 4884 for further information. + switch typ { + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + if len(b) < 8 || !validExtensionHeader(b) { + return nil, -1, errNoExtension + } + l = 0 + default: + if 128 > l || l+8 > len(b) { + l = 128 + } + if l+8 > len(b) { + return nil, -1, errNoExtension + } + if !validExtensionHeader(b[l:]) { + if l == 128 { + return nil, -1, errNoExtension + } + l = 128 + if !validExtensionHeader(b[l:]) { + return nil, -1, errNoExtension + } + } + } + var exts []Extension + for b = b[l+4:]; len(b) >= 4; { + ol := int(binary.BigEndian.Uint16(b[:2])) + if 4 > ol || ol > len(b) { + break + } + switch b[2] { + case classMPLSLabelStack: + ext, err := parseMPLSLabelStack(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceInfo: + ext, err := parseInterfaceInfo(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceIdent: + ext, err := parseInterfaceIdent(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + } + b = b[ol:] + } + return exts, l, nil +} diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go new file mode 100644 index 0000000..a7669da --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension_test.go @@ -0,0 +1,333 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "fmt" + "net" + "reflect" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +func TestMarshalAndParseExtension(t *testing.T) { + fn := func(t *testing.T, proto int, typ Type, hdr, obj []byte, te Extension) error { + b, err := te.Marshal(proto) + if err != nil { + return err + } + if !reflect.DeepEqual(b, obj) { + return fmt.Errorf("got %#v; want %#v", b, obj) + } + switch typ { + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + exts, l, err := parseExtensions(typ, append(hdr, obj...), 0) + if err != nil { + return err + } + if l != 0 { + return fmt.Errorf("got %d; want 0", l) + } + if !reflect.DeepEqual(exts, []Extension{te}) { + return fmt.Errorf("got %#v; want %#v", exts[0], te) + } + default: + for i, wire := range []struct { + data []byte // original datagram + inlattr int // length of padded original datagram, a hint + outlattr int // length of padded original datagram, a want + err error + }{ + {nil, 0, -1, errNoExtension}, + {make([]byte, 127), 128, -1, errNoExtension}, + + {make([]byte, 128), 127, -1, errNoExtension}, + {make([]byte, 128), 128, -1, errNoExtension}, + {make([]byte, 128), 129, -1, errNoExtension}, + + {append(make([]byte, 128), append(hdr, obj...)...), 127, 128, nil}, + {append(make([]byte, 128), append(hdr, obj...)...), 128, 128, nil}, + {append(make([]byte, 128), append(hdr, obj...)...), 129, 128, nil}, + + {append(make([]byte, 512), append(hdr, obj...)...), 511, -1, errNoExtension}, + {append(make([]byte, 512), append(hdr, obj...)...), 512, 512, nil}, + {append(make([]byte, 512), append(hdr, obj...)...), 513, -1, errNoExtension}, + } { + exts, l, err := parseExtensions(typ, wire.data, wire.inlattr) + if err != wire.err { + return fmt.Errorf("#%d: got %v; want %v", i, err, wire.err) + } + if wire.err != nil { + continue + } + if l != wire.outlattr { + return fmt.Errorf("#%d: got %d; want %d", i, l, wire.outlattr) + } + if !reflect.DeepEqual(exts, []Extension{te}) { + return fmt.Errorf("#%d: got %#v; want %#v", i, exts[0], te) + } + } + } + return nil + } + + t.Run("MPLSLabelStack", func(t *testing.T) { + for _, et := range []struct { + proto int + typ Type + hdr []byte + obj []byte + ext Extension + }{ + // MPLS label stack with no label + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x01, 0x01, + }, + ext: &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + }, + }, + // MPLS label stack with a single label + { + proto: iana.ProtocolIPv6ICMP, + typ: ipv6.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x08, 0x01, 0x01, + 0x03, 0xe8, 0xe9, 0xff, + }, + ext: &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + // MPLS label stack with multiple labels + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x01, 0x01, + 0x03, 0xe8, 0xde, 0xfe, + 0x03, 0xe8, 0xe1, 0xff, + }, + ext: &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16013, + TC: 0x7, + S: false, + TTL: 254, + }, + { + Label: 16014, + TC: 0, + S: true, + TTL: 255, + }, + }, + }, + }, + } { + if err := fn(t, et.proto, et.typ, et.hdr, et.obj, et.ext); err != nil { + t.Error(err) + } + } + }) + t.Run("InterfaceInfo", func(t *testing.T) { + for _, et := range []struct { + proto int + typ Type + hdr []byte + obj []byte + ext Extension + }{ + // Interface information with no attribute + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x02, 0x00, + }, + ext: &InterfaceInfo{ + Class: classInterfaceInfo, + }, + }, + // Interface information with ifIndex and name + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x02, 0x0a, + 0x00, 0x00, 0x00, 0x10, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + }, + ext: &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0a, + Interface: &net.Interface{ + Index: 16, + Name: "en101", + }, + }, + }, + // Interface information with ifIndex, IPAddr, name and MTU + { + proto: iana.ProtocolIPv6ICMP, + typ: ipv6.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x28, 0x02, 0x0f, + 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x02, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, + }, + ext: &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + } { + if err := fn(t, et.proto, et.typ, et.hdr, et.obj, et.ext); err != nil { + t.Error(err) + } + } + }) + t.Run("InterfaceIdent", func(t *testing.T) { + for _, et := range []struct { + proto int + typ Type + hdr []byte + obj []byte + ext Extension + }{ + // Interface identification by name + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeExtendedEchoRequest, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x03, 0x01, + byte('e'), byte('n'), byte('1'), byte('0'), + byte('1'), 0x00, 0x00, 0x00, + }, + ext: &InterfaceIdent{ + Class: classInterfaceIdent, + Type: typeInterfaceByName, + Name: "en101", + }, + }, + // Interface identification by index + { + proto: iana.ProtocolIPv6ICMP, + typ: ipv6.ICMPTypeExtendedEchoRequest, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x03, 0x02, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x8f, + }, + ext: &InterfaceIdent{ + Class: classInterfaceIdent, + Type: typeInterfaceByIndex, + Index: 911, + }, + }, + // Interface identification by address + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeExtendedEchoRequest, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x03, 0x03, + byte(iana.AddrFamily48bitMAC >> 8), byte(iana.AddrFamily48bitMAC & 0x0f), 0x06, 0x00, + 0x01, 0x23, 0x45, 0x67, + 0x89, 0xab, 0x00, 0x00, + }, + ext: &InterfaceIdent{ + Class: classInterfaceIdent, + Type: typeInterfaceByAddress, + AFI: iana.AddrFamily48bitMAC, + Addr: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}, + }, + }, + } { + if err := fn(t, et.proto, et.typ, et.hdr, et.obj, et.ext); err != nil { + t.Error(err) + } + } + }) +} + +func TestParseInterfaceName(t *testing.T) { + ifi := InterfaceInfo{Interface: &net.Interface{}} + for i, tt := range []struct { + b []byte + error + }{ + {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, + {[]byte{4, 'e', 'n', '0'}, nil}, + {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, + {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, + } { + if _, err := ifi.parseName(tt.b); err != tt.error { + t.Errorf("#%d: got %v; want %v", i, err, tt.error) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go new file mode 100644 index 0000000..398fd38 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "strconv" + "syscall" +) + +func sockaddr(family int, address string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + a, err := net.ResolveIPAddr("ip4", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv4zero + } + if a.IP = a.IP.To4(); a.IP == nil { + return nil, net.InvalidAddrError("non-ipv4 address") + } + sa := &syscall.SockaddrInet4{} + copy(sa.Addr[:], a.IP) + return sa, nil + case syscall.AF_INET6: + a, err := net.ResolveIPAddr("ip6", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv6unspecified + } + if a.IP.Equal(net.IPv4zero) { + a.IP = net.IPv6unspecified + } + if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil { + return nil, net.InvalidAddrError("non-ipv6 address") + } + sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)} + copy(sa.Addr[:], a.IP) + return sa, nil + default: + return nil, net.InvalidAddrError("unexpected family") + } +} + +func zoneToUint32(zone string) uint32 { + if zone == "" { + return 0 + } + if ifi, err := net.InterfaceByName(zone); err == nil { + return uint32(ifi.Index) + } + n, err := strconv.Atoi(zone) + if err != nil { + return 0 + } + return uint32(n) +} + +func last(s string, b byte) int { + i := len(s) + for i--; i >= 0; i-- { + if s[i] == b { + break + } + } + return i +} diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go new file mode 100644 index 0000000..617f757 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/interface.go @@ -0,0 +1,322 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "strings" + + "golang.org/x/net/internal/iana" +) + +const ( + classInterfaceInfo = 2 +) + +const ( + attrMTU = 1 << iota + attrName + attrIPAddr + attrIfIndex +) + +// An InterfaceInfo represents interface and next-hop identification. +type InterfaceInfo struct { + Class int // extension object class number + Type int // extension object sub-type + Interface *net.Interface + Addr *net.IPAddr +} + +func (ifi *InterfaceInfo) nameLen() int { + if len(ifi.Interface.Name) > 63 { + return 64 + } + l := 1 + len(ifi.Interface.Name) + return (l + 3) &^ 3 +} + +func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) { + l = 4 + if ifi.Interface != nil && ifi.Interface.Index > 0 { + attrs |= attrIfIndex + l += 4 + if len(ifi.Interface.Name) > 0 { + attrs |= attrName + l += ifi.nameLen() + } + if ifi.Interface.MTU > 0 { + attrs |= attrMTU + l += 4 + } + } + if ifi.Addr != nil { + switch proto { + case iana.ProtocolICMP: + if ifi.Addr.IP.To4() != nil { + attrs |= attrIPAddr + l += 4 + net.IPv4len + } + case iana.ProtocolIPv6ICMP: + if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + attrs |= attrIPAddr + l += 4 + net.IPv6len + } + } + } + return +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceInfo) Len(proto int) int { + _, l := ifi.attrsAndLen(proto) + return l +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { + attrs, l := ifi.attrsAndLen(proto) + b := make([]byte, l) + if err := ifi.marshal(proto, b, attrs, l); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceInfo, byte(ifi.Type) + for b = b[4:]; len(b) > 0 && attrs != 0; { + switch { + case attrs&attrIfIndex != 0: + b = ifi.marshalIfIndex(proto, b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b = ifi.marshalIPAddr(proto, b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b = ifi.marshalName(proto, b) + attrs &^= attrName + case attrs&attrMTU != 0: + b = ifi.marshalMTU(proto, b) + attrs &^= attrMTU + } + } + return nil +} + +func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { + switch proto { + case iana.ProtocolICMP: + binary.BigEndian.PutUint16(b[:2], uint16(iana.AddrFamilyIPv4)) + copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) + b = b[4+net.IPv4len:] + case iana.ProtocolIPv6ICMP: + binary.BigEndian.PutUint16(b[:2], uint16(iana.AddrFamilyIPv6)) + copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) + b = b[4+net.IPv6len:] + } + return b +} + +func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + afi := int(binary.BigEndian.Uint16(b[:2])) + b = b[4:] + switch afi { + case iana.AddrFamilyIPv4: + if len(b) < net.IPv4len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv4len) + copy(ifi.Addr.IP, b[:net.IPv4len]) + b = b[net.IPv4len:] + case iana.AddrFamilyIPv6: + if len(b) < net.IPv6len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv6len) + copy(ifi.Addr.IP, b[:net.IPv6len]) + b = b[net.IPv6len:] + } + return b, nil +} + +func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte { + l := byte(ifi.nameLen()) + b[0] = l + copy(b[1:], []byte(ifi.Interface.Name)) + return b[l:] +} + +func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { + if 4 > len(b) || len(b) < int(b[0]) { + return nil, errMessageTooShort + } + l := int(b[0]) + if l%4 != 0 || 4 > l || l > 64 { + return nil, errInvalidExtension + } + var name [63]byte + copy(name[:], b[1:l]) + ifi.Interface.Name = strings.Trim(string(name[:]), "\000") + return b[l:], nil +} + +func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func parseInterfaceInfo(b []byte) (Extension, error) { + ifi := &InterfaceInfo{ + Class: int(b[2]), + Type: int(b[3]), + } + if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 { + ifi.Interface = &net.Interface{} + } + if ifi.Type&attrIPAddr != 0 { + ifi.Addr = &net.IPAddr{} + } + attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU) + for b = b[4:]; len(b) > 0 && attrs != 0; { + var err error + switch { + case attrs&attrIfIndex != 0: + b, err = ifi.parseIfIndex(b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b, err = ifi.parseIPAddr(b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b, err = ifi.parseName(b) + attrs &^= attrName + case attrs&attrMTU != 0: + b, err = ifi.parseMTU(b) + attrs &^= attrMTU + } + if err != nil { + return nil, err + } + } + if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + ifi.Addr.Zone = ifi.Interface.Name + } + return ifi, nil +} + +const ( + classInterfaceIdent = 3 + typeInterfaceByName = 1 + typeInterfaceByIndex = 2 + typeInterfaceByAddress = 3 +) + +// An InterfaceIdent represents interface identification. +type InterfaceIdent struct { + Class int // extension object class number + Type int // extension object sub-type + Name string // interface name + Index int // interface index + AFI int // address family identifier; see address family numbers in IANA registry + Addr []byte // address +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceIdent) Len(_ int) int { + switch ifi.Type { + case typeInterfaceByName: + l := len(ifi.Name) + if l > 255 { + l = 255 + } + return 4 + (l+3)&^3 + case typeInterfaceByIndex: + return 4 + 8 + case typeInterfaceByAddress: + return 4 + 4 + (len(ifi.Addr)+3)&^3 + default: + return 4 + } +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceIdent) Marshal(proto int) ([]byte, error) { + b := make([]byte, ifi.Len(proto)) + if err := ifi.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceIdent) marshal(proto int, b []byte) error { + l := ifi.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceIdent, byte(ifi.Type) + switch ifi.Type { + case typeInterfaceByName: + copy(b[4:], ifi.Name) + case typeInterfaceByIndex: + binary.BigEndian.PutUint64(b[4:4+8], uint64(ifi.Index)) + case typeInterfaceByAddress: + binary.BigEndian.PutUint16(b[4:4+2], uint16(ifi.AFI)) + b[4+2] = byte(len(ifi.Addr)) + copy(b[4+4:], ifi.Addr) + } + return nil +} + +func parseInterfaceIdent(b []byte) (Extension, error) { + ifi := &InterfaceIdent{ + Class: int(b[2]), + Type: int(b[3]), + } + switch ifi.Type { + case typeInterfaceByName: + ifi.Name = strings.Trim(string(b[4:]), string(0)) + case typeInterfaceByIndex: + if len(b[4:]) < 8 { + return nil, errInvalidExtension + } + ifi.Index = int(binary.BigEndian.Uint64(b[4 : 4+8])) + case typeInterfaceByAddress: + if len(b[4:]) < 4 { + return nil, errInvalidExtension + } + ifi.AFI = int(binary.BigEndian.Uint16(b[4 : 4+2])) + l := int(b[4+2]) + if len(b[4+4:]) < l { + return nil, errInvalidExtension + } + ifi.Addr = make([]byte, l) + copy(ifi.Addr, b[4+4:]) + } + return ifi, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go new file mode 100644 index 0000000..ffc66ed --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "runtime" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +// freebsdVersion is set in sys_freebsd.go. +// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. +var freebsdVersion uint32 + +// ParseIPv4Header parses b as an IPv4 header of ICMP error message +// invoking packet, which is contained in ICMP error message. +func ParseIPv4Header(b []byte) (*ipv4.Header, error) { + if len(b) < ipv4.HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &ipv4.Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + case "freebsd": + if freebsdVersion >= 1000000 { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } else { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } + h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-ipv4.HeaderLen > 0 { + h.Options = make([]byte, hdrlen-ipv4.HeaderLen) + copy(h.Options, b[ipv4.HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go new file mode 100644 index 0000000..3fdee83 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4_test.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +func TestParseIPv4Header(t *testing.T) { + switch socket.NativeEndian { + case binary.LittleEndian: + t.Run("LittleEndian", func(t *testing.T) { + // TODO(mikio): Add platform dependent wire + // header formats when we support new + // platforms. + wireHeaderFromKernel := [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + wireHeaderFromTradBSDKernel := [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + th := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: ipv4.DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + } + var wh []byte + switch runtime.GOOS { + case "darwin": + wh = wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion >= 1000000 { + wh = wireHeaderFromKernel[:] + } else { + wh = wireHeaderFromTradBSDKernel[:] + } + default: + wh = wireHeaderFromKernel[:] + } + h, err := ParseIPv4Header(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, th) { + t.Fatalf("got %#v; want %#v", h, th) + } + }) + } +} diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go new file mode 100644 index 0000000..2e8cfeb --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv6.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + + "golang.org/x/net/internal/iana" +) + +const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 + +// IPv6PseudoHeader returns an IPv6 pseudo header for checksum +// calculation. +func IPv6PseudoHeader(src, dst net.IP) []byte { + b := make([]byte, ipv6PseudoHeaderLen) + copy(b, src.To16()) + copy(b[net.IPv6len:], dst.To16()) + b[len(b)-1] = byte(iana.ProtocolIPv6ICMP) + return b +} diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go new file mode 100644 index 0000000..7fac4f9 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -0,0 +1,100 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "os" + "runtime" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + var family, proto int + switch network { + case "udp4": + family, proto = syscall.AF_INET, iana.ProtocolICMP + case "udp6": + family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP + default: + i := last(network, ':') + switch network[:i] { + case "ip4": + proto = iana.ProtocolICMP + case "ip6": + proto = iana.ProtocolIPv6ICMP + } + } + var cerr error + var c net.PacketConn + switch family { + case syscall.AF_INET, syscall.AF_INET6: + s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + if runtime.GOOS == "darwin" && family == syscall.AF_INET { + if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("setsockopt", err) + } + } + sa, err := sockaddr(family, address) + if err != nil { + syscall.Close(s) + return nil, err + } + if err := syscall.Bind(s, sa); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("bind", err) + } + f := os.NewFile(uintptr(s), "datagram-oriented icmp") + c, cerr = net.FilePacketConn(f) + f.Close() + default: + c, cerr = net.ListenPacket(network, address) + } + if cerr != nil { + return nil, cerr + } + switch proto { + case iana.ProtocolICMP: + return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil + case iana.ProtocolIPv6ICMP: + return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil + default: + return &PacketConn{c: c}, nil + } +} diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go new file mode 100644 index 0000000..668728d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package icmp + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + return nil, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go new file mode 100644 index 0000000..46fe95a --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message.go @@ -0,0 +1,157 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package icmp provides basic functions for the manipulation of +// messages used in the Internet Control Message Protocols, +// ICMPv4 and ICMPv6. +// +// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443. +// Multi-part message support for ICMP is defined in RFC 4884. +// ICMP extensions for MPLS are defined in RFC 4950. +// ICMP extensions for interface and next-hop identification are +// defined in RFC 5837. +// PROBE: A utility for probing interfaces is defined in RFC 8335. +package icmp // import "golang.org/x/net/icmp" + +import ( + "encoding/binary" + "errors" + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. + +var ( + errMessageTooShort = errors.New("message too short") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errOpNoSupport = errors.New("operation not supported") + errNoExtension = errors.New("no extension") + errInvalidExtension = errors.New("invalid extension") +) + +func checksum(b []byte) uint16 { + csumcv := len(b) - 1 // checksum coverage + s := uint32(0) + for i := 0; i < csumcv; i += 2 { + s += uint32(b[i+1])<<8 | uint32(b[i]) + } + if csumcv&1 == 0 { + s += uint32(b[csumcv]) + } + s = s>>16 + s&0xffff + s = s + s>>16 + return ^uint16(s) +} + +// A Type represents an ICMP message type. +type Type interface { + Protocol() int +} + +// A Message represents an ICMP message. +type Message struct { + Type Type // type, either ipv4.ICMPType or ipv6.ICMPType + Code int // code + Checksum int // checksum + Body MessageBody // body +} + +// Marshal returns the binary encoding of the ICMP message m. +// +// For an ICMPv4 message, the returned message always contains the +// calculated checksum field. +// +// For an ICMPv6 message, the returned message contains the calculated +// checksum field when psh is not nil, otherwise the kernel will +// compute the checksum field during the message transmission. +// When psh is not nil, it must be the pseudo header for IPv6. +func (m *Message) Marshal(psh []byte) ([]byte, error) { + var mtype int + switch typ := m.Type.(type) { + case ipv4.ICMPType: + mtype = int(typ) + case ipv6.ICMPType: + mtype = int(typ) + default: + return nil, syscall.EINVAL + } + b := []byte{byte(mtype), byte(m.Code), 0, 0} + if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil { + b = append(psh, b...) + } + if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 { + mb, err := m.Body.Marshal(m.Type.Protocol()) + if err != nil { + return nil, err + } + b = append(b, mb...) + } + if m.Type.Protocol() == iana.ProtocolIPv6ICMP { + if psh == nil { // cannot calculate checksum here + return b, nil + } + off, l := 2*net.IPv6len, len(b)-len(psh) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) + } + s := checksum(b) + // Place checksum back in header; using ^= avoids the + // assumption the checksum bytes are zero. + b[len(psh)+2] ^= byte(s) + b[len(psh)+3] ^= byte(s >> 8) + return b[len(psh):], nil +} + +var parseFns = map[Type]func(int, Type, []byte) (MessageBody, error){ + ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv4.ICMPTypeParameterProblem: parseParamProb, + + ipv4.ICMPTypeEcho: parseEcho, + ipv4.ICMPTypeEchoReply: parseEcho, + ipv4.ICMPTypeExtendedEchoRequest: parseExtendedEchoRequest, + ipv4.ICMPTypeExtendedEchoReply: parseExtendedEchoReply, + + ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv6.ICMPTypePacketTooBig: parsePacketTooBig, + ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv6.ICMPTypeParameterProblem: parseParamProb, + + ipv6.ICMPTypeEchoRequest: parseEcho, + ipv6.ICMPTypeEchoReply: parseEcho, + ipv6.ICMPTypeExtendedEchoRequest: parseExtendedEchoRequest, + ipv6.ICMPTypeExtendedEchoReply: parseExtendedEchoReply, +} + +// ParseMessage parses b as an ICMP message. +// Proto must be either the ICMPv4 or ICMPv6 protocol number. +func ParseMessage(proto int, b []byte) (*Message, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + var err error + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} + switch proto { + case iana.ProtocolICMP: + m.Type = ipv4.ICMPType(b[0]) + case iana.ProtocolIPv6ICMP: + m.Type = ipv6.ICMPType(b[0]) + default: + return nil, syscall.EINVAL + } + if fn, ok := parseFns[m.Type]; !ok { + m.Body, err = parseDefaultMessageBody(proto, b[4:]) + } else { + m.Body, err = fn(proto, m.Type, b[4:]) + } + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go new file mode 100644 index 0000000..c278b8b --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message_test.go @@ -0,0 +1,155 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +func TestMarshalAndParseMessage(t *testing.T) { + fn := func(t *testing.T, proto int, tms []icmp.Message) { + var pshs [][]byte + switch proto { + case iana.ProtocolICMP: + pshs = [][]byte{nil} + case iana.ProtocolIPv6ICMP: + pshs = [][]byte{ + icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")), + nil, + } + } + for i, tm := range tms { + for _, psh := range pshs { + b, err := tm.Marshal(psh) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(proto, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tm.Type || m.Code != tm.Code { + t.Errorf("#%d: got %#v; want %#v", i, m, &tm) + } + if !reflect.DeepEqual(m.Body, tm.Body) { + t.Errorf("#%d: got %#v; want %#v", i, m.Body, tm.Body) + } + } + } + } + + t.Run("IPv4", func(t *testing.T) { + fn(t, iana.ProtocolICMP, + []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoReply, Code: 0, + Body: &icmp.ExtendedEchoReply{ + State: 4 /* Delay */, Active: true, IPv4: true, + }, + }, + { + Type: ipv4.ICMPTypePhoturis, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, + }) + }) + t.Run("IPv6", func(t *testing.T) { + fn(t, iana.ProtocolIPv6ICMP, + []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypePacketTooBig, Code: 0, + Body: &icmp.PacketTooBig{ + MTU: 1<<16 - 1, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoReply, Code: 0, + Body: &icmp.ExtendedEchoReply{ + State: 5 /* Probe */, Active: true, IPv6: true, + }, + }, + { + Type: ipv6.ICMPTypeDuplicateAddressConfirmation, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, + }) + }) +} diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go new file mode 100644 index 0000000..2463730 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/messagebody.go @@ -0,0 +1,41 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A MessageBody represents an ICMP message body. +type MessageBody interface { + // Len returns the length of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +// A DefaultMessageBody represents the default message body. +type DefaultMessageBody struct { + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *DefaultMessageBody) Len(proto int) int { + if p == nil { + return 0 + } + return len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) { + return p.Data, nil +} + +// parseDefaultMessageBody parses b as an ICMP message body. +func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) { + p := &DefaultMessageBody{Data: make([]byte, len(b))} + copy(p.Data, b) + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go new file mode 100644 index 0000000..c314917 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/mpls.go @@ -0,0 +1,77 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A MPLSLabel represents a MPLS label stack entry. +type MPLSLabel struct { + Label int // label value + TC int // traffic class; formerly experimental use + S bool // bottom of stack + TTL int // time to live +} + +const ( + classMPLSLabelStack = 1 + typeIncomingMPLSLabelStack = 1 +) + +// A MPLSLabelStack represents a MPLS label stack. +type MPLSLabelStack struct { + Class int // extension object class number + Type int // extension object sub-type + Labels []MPLSLabel +} + +// Len implements the Len method of Extension interface. +func (ls *MPLSLabelStack) Len(proto int) int { + return 4 + (4 * len(ls.Labels)) +} + +// Marshal implements the Marshal method of Extension interface. +func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { + b := make([]byte, ls.Len(proto)) + if err := ls.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { + l := ls.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack + off := 4 + for _, ll := range ls.Labels { + b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0) + b[off+2] |= byte(ll.TC << 1 & 0x0e) + if ll.S { + b[off+2] |= 0x1 + } + b[off+3] = byte(ll.TTL) + off += 4 + } + return nil +} + +func parseMPLSLabelStack(b []byte) (Extension, error) { + ls := &MPLSLabelStack{ + Class: int(b[2]), + Type: int(b[3]), + } + for b = b[4:]; len(b) >= 4; b = b[4:] { + ll := MPLSLabel{ + Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4, + TC: int(b[2]&0x0e) >> 1, + TTL: int(b[3]), + } + if b[2]&0x1 != 0 { + ll.S = true + } + ls.Labels = append(ls.Labels, ll) + } + return ls, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go new file mode 100644 index 0000000..9ebbbaf --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -0,0 +1,121 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "golang.org/x/net/internal/iana" + +// multipartMessageBodyDataLen takes b as an original datagram and +// exts as extensions, and returns a required length for message body +// and a required length for a padded original datagram in wire +// format. +func multipartMessageBodyDataLen(proto int, withOrigDgram bool, b []byte, exts []Extension) (bodyLen, dataLen int) { + for _, ext := range exts { + bodyLen += ext.Len(proto) + } + if bodyLen > 0 { + if withOrigDgram { + dataLen = multipartMessageOrigDatagramLen(proto, b) + } + bodyLen += 4 // length of extension header + } else { + dataLen = len(b) + } + bodyLen += dataLen + return bodyLen, dataLen +} + +// multipartMessageOrigDatagramLen takes b as an original datagram, +// and returns a required length for a padded orignal datagram in wire +// format. +func multipartMessageOrigDatagramLen(proto int, b []byte) int { + roundup := func(b []byte, align int) int { + // According to RFC 4884, the padded original datagram + // field must contain at least 128 octets. + if len(b) < 128 { + return 128 + } + r := len(b) + return (r + align - 1) & ^(align - 1) + } + switch proto { + case iana.ProtocolICMP: + return roundup(b, 4) + case iana.ProtocolIPv6ICMP: + return roundup(b, 8) + default: + return len(b) + } +} + +// marshalMultipartMessageBody takes data as an original datagram and +// exts as extesnsions, and returns a binary encoding of message body. +// It can be used for non-multipart message bodies when exts is nil. +func marshalMultipartMessageBody(proto int, withOrigDgram bool, data []byte, exts []Extension) ([]byte, error) { + bodyLen, dataLen := multipartMessageBodyDataLen(proto, withOrigDgram, data, exts) + b := make([]byte, 4+bodyLen) + copy(b[4:], data) + off := dataLen + 4 + if len(exts) > 0 { + b[dataLen+4] = byte(extensionVersion << 4) + off += 4 // length of object header + for _, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceInfo: + attrs, l := ext.attrsAndLen(proto) + if err := ext.marshal(proto, b[off:], attrs, l); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceIdent: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + } + } + s := checksum(b[dataLen+4:]) + b[dataLen+4+2] ^= byte(s) + b[dataLen+4+3] ^= byte(s >> 8) + if withOrigDgram { + switch proto { + case iana.ProtocolICMP: + b[1] = byte(dataLen / 4) + case iana.ProtocolIPv6ICMP: + b[0] = byte(dataLen / 8) + } + } + } + return b, nil +} + +// parseMultipartMessageBody parses b as either a non-multipart +// message body or a multipart message body. +func parseMultipartMessageBody(proto int, typ Type, b []byte) ([]byte, []Extension, error) { + var l int + switch proto { + case iana.ProtocolICMP: + l = 4 * int(b[1]) + case iana.ProtocolIPv6ICMP: + l = 8 * int(b[0]) + } + if len(b) == 4 { + return nil, nil, nil + } + exts, l, err := parseExtensions(typ, b[4:], l) + if err != nil { + l = len(b) - 4 + } + var data []byte + if l > 0 { + data = make([]byte, l) + copy(data, b[4:]) + } + return data, exts, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go new file mode 100644 index 0000000..7440882 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart_test.go @@ -0,0 +1,575 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "errors" + "fmt" + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +func TestMarshalAndParseMultipartMessage(t *testing.T) { + fn := func(t *testing.T, proto int, tm icmp.Message) error { + b, err := tm.Marshal(nil) + if err != nil { + return err + } + switch tm.Type { + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + default: + switch proto { + case iana.ProtocolICMP: + if b[5] != 32 { + return fmt.Errorf("got %d; want 32", b[5]) + } + case iana.ProtocolIPv6ICMP: + if b[4] != 16 { + return fmt.Errorf("got %d; want 16", b[4]) + } + default: + return fmt.Errorf("unknown protocol: %d", proto) + } + } + m, err := icmp.ParseMessage(proto, b) + if err != nil { + return err + } + if m.Type != tm.Type || m.Code != tm.Code { + return fmt.Errorf("got %v; want %v", m, &tm) + } + switch m.Type { + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + got, want := m.Body.(*icmp.ExtendedEchoRequest), tm.Body.(*icmp.ExtendedEchoRequest) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + case ipv4.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tm.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + return fmt.Errorf("got %d; want 128", len(got.Data)) + } + case ipv4.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tm.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + return fmt.Errorf("got %d; want 128", len(got.Data)) + } + case ipv4.ICMPTypeParameterProblem: + got, want := m.Body.(*icmp.ParamProb), tm.Body.(*icmp.ParamProb) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + return fmt.Errorf("got %d; want 128", len(got.Data)) + } + case ipv6.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tm.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + return fmt.Errorf("got %d; want 128", len(got.Data)) + } + case ipv6.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tm.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + return fmt.Errorf("got %d; want 128", len(got.Data)) + } + default: + return fmt.Errorf("unknown message type: %v", m.Type) + } + return nil + } + + t.Run("IPv4", func(t *testing.T) { + for i, tm := range []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 2).To4(), + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 1, + Name: "en101", + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 2, + Index: 911, + }, + &icmp.InterfaceIdent{ + Class: 3, + Type: 1, + Name: "en101", + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 3, + AFI: iana.AddrFamily48bitMAC, + Addr: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}, + }, + }, + }, + }, + } { + if err := fn(t, iana.ProtocolICMP, tm); err != nil { + t.Errorf("#%d: %v", i, err) + } + } + }) + t.Run("IPv6", func(t *testing.T) { + for i, tm := range []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en102", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 1, + Name: "en101", + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 1, + Name: "en101", + }, + &icmp.InterfaceIdent{ + Class: 3, + Type: 2, + Index: 911, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 3, + AFI: iana.AddrFamilyIPv4, + Addr: []byte{192, 0, 2, 1}, + }, + }, + }, + }, + } { + if err := fn(t, iana.ProtocolIPv6ICMP, tm); err != nil { + t.Errorf("#%d: %v", i, err) + } + } + }) +} + +func dumpExtensions(gotExts, wantExts []icmp.Extension) string { + var s string + for i, got := range gotExts { + switch got := got.(type) { + case *icmp.MPLSLabelStack: + want := wantExts[i].(*icmp.MPLSLabelStack) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%d: got %#v; want %#v\n", i, got, want) + } + case *icmp.InterfaceInfo: + want := wantExts[i].(*icmp.InterfaceInfo) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%d: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, got, got.Interface, got.Addr, want, want.Interface, want.Addr) + } + case *icmp.InterfaceIdent: + want := wantExts[i].(*icmp.InterfaceIdent) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%d: got %#v; want %#v\n", i, got, want) + } + } + } + if len(s) == 0 { + return "" + } + return s[:len(s)-1] +} + +func TestMultipartMessageBodyLen(t *testing.T) { + for i, tt := range []struct { + proto int + in icmp.MessageBody + out int + }{ + { + iana.ProtocolICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.PacketTooBig{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // mtu and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // pointer and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 127), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ExtendedEchoRequest{}, + 4, // [id, seq, l-bit] + }, + { + iana.ProtocolICMP, + &icmp.ExtendedEchoRequest{ + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{}, + }, + }, + 4 + 4 + 4, // [id, seq, l-bit], extension header, object header + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ExtendedEchoRequest{ + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Type: 3, + AFI: iana.AddrFamilyNSAP, + Addr: []byte{0x49, 0x00, 0x01, 0xaa, 0xaa, 0xbb, 0xbb, 0xcc, 0xcc, 0x00}, + }, + }, + }, + 4 + 4 + 4 + 16, // [id, seq, l-bit], extension header, object header, object payload + }, + } { + if out := tt.in.Len(tt.proto); out != tt.out { + t.Errorf("#%d: got %d; want %d", i, out, tt.out) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go new file mode 100644 index 0000000..afbf24f --- /dev/null +++ b/vendor/golang.org/x/net/icmp/packettoobig.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A PacketTooBig represents an ICMP packet too big message body. +type PacketTooBig struct { + MTU int // maximum transmission unit of the nexthop link + Data []byte // data, known as original datagram field +} + +// Len implements the Len method of MessageBody interface. +func (p *PacketTooBig) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) + copy(b[4:], p.Data) + return b, nil +} + +// parsePacketTooBig parses b as an ICMP packet too big message body. +func parsePacketTooBig(proto int, _ Type, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go new file mode 100644 index 0000000..8587255 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/paramprob.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "golang.org/x/net/internal/iana" +) + +// A ParamProb represents an ICMP parameter problem message body. +type ParamProb struct { + Pointer uintptr // offset within the data where the error was detected + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ParamProb) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ParamProb) Marshal(proto int) ([]byte, error) { + if proto == iana.ProtocolIPv6ICMP { + b := make([]byte, p.Len(proto)) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) + copy(b[4:], p.Data) + return b, nil + } + b, err := marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) + if err != nil { + return nil, err + } + b[0] = byte(p.Pointer) + return b, nil +} + +// parseParamProb parses b as an ICMP parameter problem message body. +func parseParamProb(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ParamProb{} + if proto == iana.ProtocolIPv6ICMP { + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) + p.Data = make([]byte, len(b)-4) + copy(p.Data, b[4:]) + return p, nil + } + p.Pointer = uintptr(b[0]) + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go new file mode 100644 index 0000000..c75f3dd --- /dev/null +++ b/vendor/golang.org/x/net/icmp/sys_freebsd.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "syscall" + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") +} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go new file mode 100644 index 0000000..14e9e23 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/timeexceeded.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A TimeExceeded represents an ICMP time exceeded message body. +type TimeExceeded struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *TimeExceeded) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) +} + +// parseTimeExceeded parses b as an ICMP time exceeded message body. +func parseTimeExceeded(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &TimeExceeded{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/idna/example_test.go b/vendor/golang.org/x/net/idna/example_test.go new file mode 100644 index 0000000..948f6eb --- /dev/null +++ b/vendor/golang.org/x/net/idna/example_test.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna_test + +import ( + "fmt" + + "golang.org/x/net/idna" +) + +func ExampleProfile() { + // Raw Punycode has no restrictions and does no mappings. + fmt.Println(idna.ToASCII("")) + fmt.Println(idna.ToASCII("*.faß.com")) + fmt.Println(idna.Punycode.ToASCII("*.faß.com")) + + // Rewrite IDN for lookup. This (currently) uses transitional mappings to + // find a balance between IDNA2003 and IDNA2008 compatibility. + fmt.Println(idna.Lookup.ToASCII("")) + fmt.Println(idna.Lookup.ToASCII("www.faß.com")) + + // Convert an IDN to ASCII for registration purposes. This changes the + // encoding, but reports an error if the input was illformed. + fmt.Println(idna.Registration.ToASCII("")) + fmt.Println(idna.Registration.ToASCII("www.faß.com")) + + // Output: + // + // *.xn--fa-hia.com + // *.xn--fa-hia.com + // + // www.fass.com + // idna: invalid label "" + // www.xn--fa-hia.com +} + +func ExampleNew() { + var p *idna.Profile + + // Raw Punycode has no restrictions and does no mappings. + p = idna.New() + fmt.Println(p.ToASCII("*.faß.com")) + + // Do mappings. Note that star is not allowed in a DNS lookup. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true)) // Map ß -> ss + fmt.Println(p.ToASCII("*.faß.com")) + + // Lookup for registration. Also does not allow '*'. + p = idna.New(idna.ValidateForRegistration()) + fmt.Println(p.ToUnicode("*.faß.com")) + + // Set up a profile maps for lookup, but allows wild cards. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true), // Map ß -> ss + idna.StrictDomainName(false)) // Set more permissive ASCII rules. + fmt.Println(p.ToASCII("*.faß.com")) + + // Output: + // *.xn--fa-hia.com + // *.fass.com idna: disallowed rune U+002A + // *.faß.com idna: disallowed rune U+002A + // *.fass.com +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 0000000..346fe44 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,732 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check of the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/idna_test.go b/vendor/golang.org/x/net/idna/idna_test.go new file mode 100644 index 0000000..0b067ca --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna_test.go @@ -0,0 +1,108 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "testing" +) + +var idnaTestCases = [...]struct { + ascii, unicode string +}{ + // Labels. + {"books", "books"}, + {"xn--bcher-kva", "bücher"}, + + // Domains. + {"foo--xn--bar.org", "foo--xn--bar.org"}, + {"golang.org", "golang.org"}, + {"example.xn--p1ai", "example.рф"}, + {"xn--czrw28b.tw", "商業.tw"}, + {"www.xn--mller-kva.de", "www.müller.de"}, +} + +func TestIDNA(t *testing.T) { + for _, tc := range idnaTestCases { + if a, err := ToASCII(tc.unicode); err != nil { + t.Errorf("ToASCII(%q): %v", tc.unicode, err) + } else if a != tc.ascii { + t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii) + } + + if u, err := ToUnicode(tc.ascii); err != nil { + t.Errorf("ToUnicode(%q): %v", tc.ascii, err) + } else if u != tc.unicode { + t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode) + } + } +} + +func TestIDNASeparators(t *testing.T) { + type subCase struct { + unicode string + wantASCII string + wantErr bool + } + + testCases := []struct { + name string + profile *Profile + subCases []subCase + }{ + { + name: "Punycode", profile: Punycode, + subCases: []subCase{ + {"example\u3002jp", "xn--examplejp-ck3h", false}, + {"æ±äº¬\uFF0Ejp", "xn--jp-l92cn98g071o", false}, + {"大阪\uFF61jp", "xn--jp-ku9cz72u463f", false}, + }, + }, + { + name: "Lookup", profile: Lookup, + subCases: []subCase{ + {"example\u3002jp", "example.jp", false}, + {"æ±äº¬\uFF0Ejp", "xn--1lqs71d.jp", false}, + {"大阪\uFF61jp", "xn--pssu33l.jp", false}, + }, + }, + { + name: "Display", profile: Display, + subCases: []subCase{ + {"example\u3002jp", "example.jp", false}, + {"æ±äº¬\uFF0Ejp", "xn--1lqs71d.jp", false}, + {"大阪\uFF61jp", "xn--pssu33l.jp", false}, + }, + }, + { + name: "Registration", profile: Registration, + subCases: []subCase{ + {"example\u3002jp", "", true}, + {"æ±äº¬\uFF0Ejp", "", true}, + {"大阪\uFF61jp", "", true}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, c := range tc.subCases { + gotA, err := tc.profile.ToASCII(c.unicode) + if c.wantErr { + if err == nil { + t.Errorf("ToASCII(%q): got no error, but an error expected", c.unicode) + } + } else { + if err != nil { + t.Errorf("ToASCII(%q): got err=%v, but no error expected", c.unicode, err) + } else if gotA != c.wantASCII { + t.Errorf("ToASCII(%q): got %q, want %q", c.unicode, gotA, c.wantASCII) + } + } + } + }) + } +} + +// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode +// return errors. diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 0000000..02c7d59 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/punycode_test.go b/vendor/golang.org/x/net/idna/punycode_test.go new file mode 100644 index 0000000..bfec81d --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode_test.go @@ -0,0 +1,198 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "strings" + "testing" +) + +var punycodeTestCases = [...]struct { + s, encoded string +}{ + {"", ""}, + {"-", "--"}, + {"-a", "-a-"}, + {"-a-", "-a--"}, + {"a", "a-"}, + {"a-", "a--"}, + {"a-b", "a-b-"}, + {"books", "books-"}, + {"bücher", "bcher-kva"}, + {"Hello世界", "Hello-ck1hg65u"}, + {"ü", "tda"}, + {"üý", "tdac"}, + + // The test cases below come from RFC 3492 section 7.1 with Errata 3026. + { + // (A) Arabic (Egyptian). + "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + + "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", + "egbpdaj6bu4bxfgehfvwxn", + }, + { + // (B) Chinese (simplified). + "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", + "ihqwcrb4cv8a8dqg056pqjye", + }, + { + // (C) Chinese (traditional). + "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", + "ihqwctvzc91f659drss3x8bo0yb", + }, + { + // (D) Czech. + "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + + "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + + "\u0065\u0073\u006B\u0079", + "Proprostnemluvesky-uyb24dma41a", + }, + { + // (E) Hebrew. + "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + + "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + + "\u05D1\u05E8\u05D9\u05EA", + "4dbcagdahymbxekheh6e0a7fei0b", + }, + { + // (F) Hindi (Devanagari). + "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + + "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + + "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + + "\u0939\u0948\u0902", + "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", + }, + { + // (G) Japanese (kanji and hiragana). + "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + + "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", + "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", + }, + { + // (H) Korean (Hangul syllables). + "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + + "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + + "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", + "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + + "psd879ccm6fea98c", + }, + { + // (I) Russian (Cyrillic). + "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + + "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + + "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + + "\u0438", + "b1abfaaepdrnnbgefbadotcwatmq2g4l", + }, + { + // (J) Spanish. + "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + + "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + + "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + + "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + + "\u0061\u00F1\u006F\u006C", + "PorqunopuedensimplementehablarenEspaol-fmd56a", + }, + { + // (K) Vietnamese. + "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + + "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + + "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + + "\u0056\u0069\u1EC7\u0074", + "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", + }, + { + // (L) 3B. + "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", + "3B-ww4c5e180e575a65lsy2b", + }, + { + // (M) -with-SUPER-MONKEYS. + "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + + "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + + "\u004F\u004E\u004B\u0045\u0059\u0053", + "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", + }, + { + // (N) Hello-Another-Way-. + "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + + "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + + "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", + "Hello-Another-Way--fc4qua05auwb3674vfr0b", + }, + { + // (O) 2. + "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", + "2-u9tlzr9756bt3uc0v", + }, + { + // (P) MajiKoi5 + "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + + "\u308B\u0035\u79D2\u524D", + "MajiKoi5-783gue6qz075azm5e", + }, + { + // (Q) de + "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", + "de-jg4avhby1noc0d", + }, + { + // (R) + "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", + "d9juau41awczczp", + }, + { + // (S) -> $1.00 <- + "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + + "\u003C\u002D", + "-> $1.00 <--", + }, +} + +func TestPunycode(t *testing.T) { + for _, tc := range punycodeTestCases { + if got, err := decode(tc.encoded); err != nil { + t.Errorf("decode(%q): %v", tc.encoded, err) + } else if got != tc.s { + t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s) + } + + if got, err := encode("", tc.s); err != nil { + t.Errorf(`encode("", %q): %v`, tc.s, err) + } else if got != tc.encoded { + t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) + } + } +} + +var punycodeErrorTestCases = [...]string{ + "decode -", // A sole '-' is invalid. + "decode foo\x00bar", // '\x00' is not in [0-9A-Za-z]. + "decode foo#bar", // '#' is not in [0-9A-Za-z]. + "decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z]. + "decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated. + "decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF. + "decode 9999999999a", // "9999999999a" overflows the int32 calculation. + + "encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow. +} + +func TestPunycodeErrors(t *testing.T) { + for _, tc := range punycodeErrorTestCases { + var err error + switch { + case strings.HasPrefix(tc, "decode "): + _, err = decode(tc[7:]) + case strings.HasPrefix(tc, "encode "): + _, err = encode("", tc[7:]) + } + if err == nil { + if len(tc) > 256 { + tc = tc[:100] + "..." + tc[len(tc)-100:] + } + t.Errorf("no error for %s", tc) + } + } +} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 0000000..f910b26 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4557 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 Ì„\x012\x013\x03 Ì\x03 ̧\x011\x01o\x051â„4\x051â„2" + + "\x053â„4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03â±¥\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ÌŠ\x03 ̨\x03 ̃\x03 Ì‹\x01l\x01x\x04̈Ì\x03 ι\x01;\x05 ̈Ì" + + "\x04Õ¥Ö‚\x04اٴ\x04وٴ\x04Û‡Ù´\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06à¹à¸²\x06à»àº²\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06à¾à¾µ\x02" + + "в\x02д\x02о\x02Ñ\x02Ñ‚\x02ÑŠ\x02Ñ£\x02æ\x01b\x01d\x01e\x02Ç\x01g\x01i\x01k" + + "\x01m\x01n\x02È£\x01p\x01t\x01u\x02É\x02É‘\x02É™\x02É›\x02Éœ\x02Å‹\x02É”\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02Ï\x02н\x02É’\x01c\x02É•\x02ð\x01f\x02ÉŸ" + + "\x02É¡\x02É¥\x02ɨ\x02É©\x02ɪ\x02Ê\x02É­\x02ÊŸ\x02ɱ\x02É°\x02ɲ\x02ɳ\x02É´\x02ɵ" + + "\x02ɸ\x02Ê‚\x02ʃ\x02Æ«\x02ʉ\x02ÊŠ\x02Ê‹\x02ÊŒ\x01z\x02Ê\x02Ê‘\x02Ê’\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ÏŒ\x02Ï\x02ÏŽ\x05ἀι\x05á¼Î¹\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 Ì“Ì€\x05 Ì“Ì\x05 Ì“Í‚\x02Î\x05 ̔̀\x05 Ì”Ì\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02Ã¥\x02×\x02ב\x02×’" + + "\x02ד\x02Ï€\x051â„7\x051â„9\x061â„10\x051â„3\x052â„3\x051â„5\x052â„5\x053â„5\x054" + + "â„5\x051â„6\x055â„6\x051â„8\x053â„8\x055â„8\x057â„8\x041â„\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050â„3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05â«Ì¸\x02É«\x02ɽ\x02È¿\x02É€\x01.\x04 ã‚™\x04 ã‚š\x06より\x06コト\x05(á„€)\x05" + + "(á„‚)\x05(ᄃ)\x05(á„…)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(á„‹)\x05(á„Œ)\x05(á„Ž)\x05(á„)\x05(á„" + + ")\x05(á„‘)\x05(á„’)\x05(ê°€)\x05(나)\x05(다)\x05(ë¼)\x05(마)\x05(ë°”)\x05(사)\x05(ì•„)" + + "\x05(ìž)\x05(ì°¨)\x05(ì¹´)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(å››)\x05(五)\x05(å…­)\x05(七)\x05(å…«)\x05(ä¹)\x05(å)\x05(月)" + + "\x05(ç«)\x05(æ°´)\x05(木)\x05(金)\x05(土)\x05(æ—¥)\x05(æ ª)\x05(有)\x05(社)\x05(å)" + + "\x05(特)\x05(財)\x05(ç¥)\x05(労)\x05(代)\x05(呼)\x05(å­¦)\x05(監)\x05(ä¼)\x05(資)" + + "\x05(å”)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주ì˜\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インãƒ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローãƒ\x09ケース\x09コルナ\x09コーãƒ\x0cサイクル\x0fサンãƒ" + + "ーム\x0cシリング\x09センãƒ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ãƒã‚¤ãƒ„" + + "\x0fパーセント\x09パーツ\x0cãƒãƒ¼ãƒ¬ãƒ«\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cãƒã‚¤" + + "ント\x09ボルト\x06ホン\x09ãƒãƒ³ãƒ‰\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッãƒ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリãƒãƒ¼ãƒ«\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06å¹³æˆ\x06昭和\x06大正\x06明治\x0cæ ª" + + "å¼ä¼šç¤¾\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041æ—¥\x042æ—¥\x043æ—¥\x044æ—¥\x045æ—¥\x046æ—¥\x047æ—¥\x048æ—¥\x049æ—¥" + + "\x0510æ—¥\x0511æ—¥\x0512æ—¥\x0513æ—¥\x0514æ—¥\x0515æ—¥\x0516æ—¥\x0517æ—¥\x0518æ—¥\x0519æ—¥" + + "\x0520æ—¥\x0521æ—¥\x0522æ—¥\x0523æ—¥\x0524æ—¥\x0525æ—¥\x0526æ—¥\x0527æ—¥\x0528æ—¥\x0529æ—¥" + + "\x0530æ—¥\x0531æ—¥\x02ÑŒ\x02ɦ\x02ɬ\x02Êž\x02ʇ\x02Å“\x04𤋮\x04𢡊\x04𢡄\x04ð£•\x04𥉉" + + "\x04ð¥³\x04𧻓\x02ff\x02fi\x02fl\x02st\x04Õ´Õ¶\x04Õ´Õ¥\x04Õ´Õ«\x04Õ¾Õ¶\x04Õ´Õ­\x04×™Ö´" + + "\x04ײַ\x02×¢\x02×”\x02×›\x02ל\x02×\x02ר\x02ת\x04ש×\x04שׂ\x06שּ×\x06שּׂ\x04×" + + "Ö·\x04×Ö¸\x04×Ö¼\x04בּ\x04×’Ö¼\x04דּ\x04×”Ö¼\x04וּ\x04×–Ö¼\x04טּ\x04×™Ö¼\x04ךּ\x04" + + "×›Ö¼\x04לּ\x04מּ\x04× Ö¼\x04סּ\x04×£Ö¼\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04×›Ö¿\x04פֿ\x04×ל\x02Ù±\x02Ù»\x02Ù¾\x02Ú€\x02Ùº\x02Ù¿\x02Ù¹\x02Ú¤" + + "\x02Ú¦\x02Ú„\x02Úƒ\x02Ú†\x02Ú‡\x02Ú\x02ÚŒ\x02ÚŽ\x02Úˆ\x02Ú˜\x02Ú‘\x02Ú©\x02Ú¯\x02Ú³" + + "\x02Ú±\x02Úº\x02Ú»\x02Û€\x02Û\x02Ú¾\x02Û’\x02Û“\x02Ú­\x02Û‡\x02Û†\x02Ûˆ\x02Û‹\x02Û…" + + "\x02Û‰\x02Û\x02Ù‰\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئÛ\x04ئى\x02ÛŒ\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04Ùج\x04ÙØ­\x04ÙØ®\x04ÙÙ…" + + "\x04ÙÙ‰\x04ÙÙŠ\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ÙÙ‘\x05" + + " ÙŽÙ‘\x05 ÙÙ‘\x05 ÙÙ‘\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06Ù€ÙŽÙ‘\x06Ù€ÙÙ‘\x06Ù€ÙÙ‘\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06Ùخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06Ùمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04Ù€ÙŽ\x04Ù€Ù\x04Ù€Ù\x04ـّ\x04ـْ\x02Ø¡\x02Ø¢\x02Ø£\x02ؤ\x02Ø¥" + + "\x02ئ\x02ا\x02ب\x02Ø©\x02ت\x02Ø«\x02ج\x02Ø­\x02Ø®\x02د\x02Ø°\x02ر\x02ز\x02س" + + "\x02Ø´\x02ص\x02ض\x02Ø·\x02ظ\x02ع\x02غ\x02Ù\x02Ù‚\x02Ùƒ\x02Ù„\x02Ù…\x02Ù†\x02Ù‡" + + "\x02Ùˆ\x02ÙŠ\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02Â¥\x08ð…—ð…¥\x08ð…˜ð…¥\x0cð…˜ð…¥ð…®\x0cð…˜ð…¥ð…¯\x0cð…˜ð…¥ð…°\x0cð…˜ð…¥ð…±\x0cð…˜ð…¥ð…²\x08ð†¹" + + "ð…¥\x08ð†ºð…¥\x0cð†¹ð…¥ð…®\x0cð†ºð…¥ð…®\x0cð†¹ð…¥ð…¯\x0cð†ºð…¥ð…¯\x02ı\x02È·\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02Ï„\x02Ï…\x02ψ\x03∇\x03∂\x02Ï\x02Ù®\x02Ú¡" + + "\x02Ù¯\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ã»ã‹\x06ココ\x03サ\x03手\x03å­—\x03åŒ\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03ç„¡\x03æ–™\x03å‰\x03後\x03å†\x03æ–°\x03åˆ\x03終\x03生\x03販\x03声" + + "\x03å¹\x03æ¼”\x03投\x03æ•\x03一\x03三\x03éŠ\x03å·¦\x03中\x03å³\x03指\x03èµ°\x03打\x03ç¦" + + "\x03空\x03åˆ\x03満\x03有\x03月\x03申\x03割\x03å–¶\x03é…\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔å‹ã€•\x09〔敗〕\x03å¾—\x03å¯\x03丽\x03丸\x03ä¹\x03ä½ \x03" + + "ä¾®\x03ä¾»\x03倂\x03åº\x03å‚™\x03僧\x03åƒ\x03ã’ž\x03å…\x03å…”\x03å…¤\x03å…·\x03ã’¹\x03å…§\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03ã“Ÿ\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03å‰\x03å‘\x03åš\x03å³\x03å½\x03å¿\x03ç°\x03åŠ\x03åŸ\x03" + + "å«\x03å±\x03å†\x03å’ž\x03å¸\x03呈\x03周\x03å’¢\x03哶\x03å”\x03å•“\x03å•£\x03å–„\x03å–™\x03" + + "å–«\x03å–³\x03å—‚\x03圖\x03嘆\x03圗\x03噑\x03å™´\x03切\x03壮\x03城\x03埴\x03å \x03åž‹\x03" + + "å ²\x03å ±\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03ã›®\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03å°†\x03å°¢\x03ãž\x03å± \x03å±®\x03å³€\x03å²\x03" + + "嵃\x03åµ®\x03嵫\x03åµ¼\x03å·¡\x03å·¢\x03ã ¯\x03å·½\x03帨\x03帽\x03幩\x03ã¡¢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03èˆ\x03å¼¢\x03㣇\x03å½¢\x03彫\x03㣣\x03徚\x03å¿\x03å¿—\x03忹\x03" + + "æ‚\x03㤺\x03㤜\x03æ‚”\x03惇\x03æ…ˆ\x03æ…Œ\x03æ…Ž\x03æ…º\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03æˆ\x03戛\x03æ‰\x03抱\x03æ‹”\x03æ\x03挽\x03拼\x03æ¨\x03掃\x03æ¤\x03æ¢\x03" + + "æ…\x03掩\x03㨮\x03æ‘©\x03摾\x03æ’\x03æ‘·\x03㩬\x03æ•\x03敬\x03æ—£\x03書\x03晉\x03㬙\x03" + + "æš‘\x03㬈\x03㫤\x03冒\x03冕\x03最\x03æšœ\x03è‚­\x03ä™\x03朗\x03望\x03朡\x03æž\x03æ“\x03" + + "ã­‰\x03柺\x03æž…\x03æ¡’\x03梅\x03梎\x03æ Ÿ\x03椔\x03ã®\x03楂\x03榣\x03槪\x03檨\x03æ«›\x03" + + "ã°˜\x03次\x03æ­”\x03㱎\x03æ­²\x03殟\x03殺\x03æ®»\x03汎\x03沿\x03æ³\x03汧\x03æ´–\x03æ´¾\x03" + + "æµ·\x03æµ\x03浩\x03浸\x03涅\x03æ´´\x03港\x03æ¹®\x03ã´³\x03滋\x03滇\x03æ·¹\x03æ½®\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03çŠ\x03ç½\x03ç·\x03ç‚­\x03ç……\x03熜\x03爨\x03爵\x03ç‰\x03犀\x03" + + "犕\x03çº\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03ç‘œ\x03瑱\x03ç’…\x03ç“Š\x03ã¼›\x03甤\x03甾\x03" + + "ç•°\x03ç˜\x03㿼\x03䀈\x03ç›´\x03眞\x03真\x03çŠ\x03䀹\x03çž‹\x03ä†\x03ä‚–\x03ç¡Ž\x03碌\x03" + + "磌\x03䃣\x03祖\x03ç¦\x03秫\x03䄯\x03ç©€\x03ç©Š\x03ç©\x03䈂\x03篆\x03築\x03䈧\x03ç³’\x03" + + "䊠\x03糨\x03ç³£\x03ç´€\x03çµ£\x03äŒ\x03ç·‡\x03縂\x03ç¹…\x03䌴\x03ä™\x03罺\x03羕\x03翺\x03" + + "者\x03è \x03è°\x03ä•\x03育\x03脃\x03ä‹\x03脾\x03媵\x03舄\x03辞\x03ä‘«\x03芑\x03芋\x03" + + "èŠ\x03劳\x03花\x03芳\x03芽\x03苦\x03è‹¥\x03èŒ\x03è£\x03莭\x03茣\x03莽\x03è§\x03è‘—\x03" + + "è“\x03èŠ\x03èŒ\x03èœ\x03䔫\x03蓱\x03蓳\x03è”–\x03蕤\x03ä•\x03ä•¡\x03ä•«\x03è™\x03虜\x03" + + "虧\x03虩\x03èš©\x03蚈\x03蜎\x03蛢\x03è¹\x03蜨\x03è«\x03螆\x03蟡\x03è \x03ä—¹\x03è¡ \x03" + + "è¡£\x03裗\x03裞\x03䘵\x03裺\x03ã’»\x03äš¾\x03䛇\x03誠\x03è«­\x03變\x03豕\x03貫\x03è³\x03" + + "è´›\x03èµ·\x03è·‹\x03趼\x03è·°\x03è»”\x03輸\x03é‚”\x03郱\x03é„‘\x03é„›\x03鈸\x03é‹—\x03鋘\x03" + + "鉼\x03é¹\x03é•\x03é–‹\x03䦕\x03é–·\x03䧦\x03雃\x03嶲\x03霣\x03ä©®\x03䩶\x03韠\x03䪲\x03" + + "é ‹\x03é ©\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03é±€\x03é³½\x03䳎\x03ä³­\x03" + + "鵧\x03䳸\x03麻\x03äµ–\x03黹\x03黾\x03é¼…\x03é¼\x03é¼–\x03é¼»" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03Ê \x9d\x03Ê£\x9c\x03Ê¢\x9f\x03Ê¥\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03Ê©\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03Ø“\x89\x03ß”\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 0000000..c4ef847 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 0000000..7a8cf88 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 0000000..826633e --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,227 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12 +const ( + DiffServCS0 = 0x0 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEF = 0xb8 // EF + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT +) + +// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06 +const ( + NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x3 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2017-10-13 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) + +// Address Family Numbers, Updated: 2016-10-25 +const ( + AddrFamilyIPv4 = 1 // IP (IP version 4) + AddrFamilyIPv6 = 2 // IP6 (IP version 6) + AddrFamilyNSAP = 3 // NSAP + AddrFamilyHDLC = 4 // HDLC (8-bit multidrop) + AddrFamilyBBN1822 = 5 // BBN 1822 + AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format") + AddrFamilyE163 = 7 // E.163 + AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM) + AddrFamilyF69 = 9 // F.69 (Telex) + AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay) + AddrFamilyIPX = 11 // IPX + AddrFamilyAppletalk = 12 // Appletalk + AddrFamilyDecnetIV = 13 // Decnet IV + AddrFamilyBanyanVines = 14 // Banyan Vines + AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress + AddrFamilyDNS = 16 // DNS (Domain Name System) + AddrFamilyDistinguishedName = 17 // Distinguished Name + AddrFamilyASNumber = 18 // AS Number + AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4 + AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6 + AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP + AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name + AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name + AddrFamilyGWID = 24 // GWID + AddrFamilyL2VPN = 25 // AFI for L2VPN information + AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier + AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier + AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier + AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4 + AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6 + AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family + AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family + AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family + AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF) + AddrFamilyBGPLS = 16388 // BGP-LS + AddrFamily48bitMAC = 16389 // 48-bit MAC + AddrFamily64bitMAC = 16390 // 64-bit MAC + AddrFamilyOUI = 16391 // OUI + AddrFamilyMACFinal24bits = 16392 // MAC/24 + AddrFamilyMACFinal40bits = 16393 // MAC/40 + AddrFamilyIPv6Initial64bits = 16394 // IPv6/64 + AddrFamilyRBridgePortID = 16395 // RBridge Port ID + AddrFamilyTRILLNickname = 16396 // TRILL Nickname +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go new file mode 100644 index 0000000..2227e09 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -0,0 +1,387 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates internet protocol constants and tables by +// reading IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" +) + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml", + parseDSCPRegistry, + }, + { + "https://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", + parseTOSTCByte, + }, + { + "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", + parseProtocolNumbers, + }, + { + "http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml", + parseAddrFamilyNumbers, + }, +} + +func main() { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") + fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) + os.Exit(1) + } + if err := r.parse(&bb, resp.Body); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile("const.go", b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func parseDSCPRegistry(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var dr dscpRegistry + if err := dec.Decode(&dr); err != nil { + return err + } + drs := dr.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) + fmt.Fprintf(w, "const (\n") + for _, dr := range drs { + fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) + fmt.Fprintf(w, "// %s\n", dr.OrigName) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type dscpRegistry struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + PoolRecords []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>record"` + Records []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>registry>record"` +} + +type canonDSCPRecord struct { + OrigName string + Name string + Value int +} + +func (drr *dscpRegistry) escape() []canonDSCPRecord { + drs := make([]canonDSCPRecord, len(drr.Records)) + sr := strings.NewReplacer( + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, dr := range drr.Records { + s := strings.TrimSpace(dr.Name) + drs[i].OrigName = s + drs[i].Name = sr.Replace(s) + n, err := strconv.ParseUint(dr.Space, 2, 8) + if err != nil { + continue + } + drs[i].Value = int(n) << 2 + } + return drs +} + +func parseTOSTCByte(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var ttb tosTCByte + if err := dec.Decode(&ttb); err != nil { + return err + } + trs := ttb.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) + fmt.Fprintf(w, "const (\n") + for _, tr := range trs { + fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) + fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type tosTCByte struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + Records []struct { + Binary string `xml:"binary"` + Keyword string `xml:"keyword"` + } `xml:"registry>record"` +} + +type canonTOSTCByteRecord struct { + OrigKeyword string + Keyword string + Value int +} + +func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { + trs := make([]canonTOSTCByteRecord, len(ttb.Records)) + sr := strings.NewReplacer( + "Capable", "", + "(", "", + ")", "", + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, tr := range ttb.Records { + s := strings.TrimSpace(tr.Keyword) + trs[i].OrigKeyword = s + ss := strings.Split(s, " ") + if len(ss) > 1 { + trs[i].Keyword = strings.Join(ss[1:], " ") + } else { + trs[i].Keyword = ss[0] + } + trs[i].Keyword = sr.Replace(trs[i].Keyword) + n, err := strconv.ParseUint(tr.Binary, 2, 8) + if err != nil { + continue + } + trs[i].Value = int(n) + } + return trs +} + +func parseProtocolNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var pn protocolNumbers + if err := dec.Decode(&pn); err != nil { + return err + } + prs := pn.escape() + prs = append([]canonProtocolRecord{{ + Name: "IP", + Descr: "IPv4 encapsulation, pseudo protocol number", + Value: 0, + }}, prs...) + fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) + s := pr.Descr + if s == "" { + s = pr.OrigName + } + fmt.Fprintf(w, "// %s\n", s) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type protocolNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonProtocolRecord struct { + OrigName string + Name string + Descr string + Value int +} + +func (pn *protocolNumbers) escape() []canonProtocolRecord { + prs := make([]canonProtocolRecord, len(pn.Records)) + sr := strings.NewReplacer( + "-in-", "in", + "-within-", "within", + "-over-", "over", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range pn.Records { + if strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "deprecated") { + continue + } + prs[i].OrigName = pr.Name + s := strings.TrimSpace(pr.Name) + switch pr.Name { + case "ISIS over IPv4": + prs[i].Name = "ISIS" + case "manet": + prs[i].Name = "MANET" + default: + prs[i].Name = sr.Replace(s) + } + ss := strings.Split(pr.Descr, "\n") + for i := range ss { + ss[i] = strings.TrimSpace(ss[i]) + } + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} + +func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var afn addrFamilylNumbers + if err := dec.Decode(&afn); err != nil { + return err + } + afrs := afn.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated) + fmt.Fprintf(w, "const (\n") + for _, afr := range afrs { + if afr.Name == "" { + continue + } + fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value) + fmt.Fprintf(w, "// %s\n", afr.Descr) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type addrFamilylNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonAddrFamilyRecord struct { + Name string + Descr string + Value int +} + +func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord { + afrs := make([]canonAddrFamilyRecord, len(afn.Records)) + sr := strings.NewReplacer( + "IP version 4", "IPv4", + "IP version 6", "IPv6", + "Identifier", "ID", + "-", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, afr := range afn.Records { + if strings.Contains(afr.Descr, "Unassigned") || + strings.Contains(afr.Descr, "Reserved") { + continue + } + afrs[i].Descr = afr.Descr + s := strings.TrimSpace(afr.Descr) + switch s { + case "IP (IP version 4)": + afrs[i].Name = "IPv4" + case "IP6 (IP version 6)": + afrs[i].Name = "IPv6" + case "AFI for L2VPN information": + afrs[i].Name = "L2VPN" + case "E.164 with NSAP format subaddress": + afrs[i].Name = "E164withSubaddress" + case "MT IP: Multi-Topology IP version 4": + afrs[i].Name = "MTIPv4" + case "MAC/24": + afrs[i].Name = "MACFinal24bits" + case "MAC/40": + afrs[i].Name = "MACFinal40bits" + case "IPv6/64": + afrs[i].Name = "IPv6Initial64bits" + default: + n := strings.Index(s, "(") + if n > 0 { + s = s[:n] + } + n = strings.Index(s, ":") + if n > 0 { + s = s[:n] + } + afrs[i].Name = sr.Replace(s) + } + afrs[i].Value, _ = strconv.Atoi(afr.Value) + } + return afrs +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_bsd.go b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go new file mode 100644 index 0000000..a6e433b --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package nettest + +import ( + "runtime" + "strconv" + "strings" + "syscall" +) + +var darwinVersion int + +func init() { + if runtime.GOOS == "darwin" { + // See http://support.apple.com/kb/HT1633. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + darwinVersion, _ = strconv.Atoi(ss[0]) + } +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + switch runtime.GOOS { + case "freebsd": + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + // Even after the fix, it looks like the latest + // kernels don't deliver link-local scoped multicast + // packets correctly. + return false + case "darwin": + return !causesIPv6Crash() + default: + return true + } +} + +func causesIPv6Crash() bool { + // We see some kernel crash when running IPv6 with IP-level + // options on Darwin kernel version 12 or below. + // See golang.org/issues/17015. + return darwinVersion < 13 +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go new file mode 100644 index 0000000..bc7da5e --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux solaris + +package nettest + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_posix.go b/vendor/golang.org/x/net/internal/nettest/helper_posix.go new file mode 100644 index 0000000..963ed99 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_posix.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package nettest + +import ( + "os" + "syscall" +) + +func protocolNotSupported(err error) bool { + switch err := err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + case *os.SyscallError: + switch err := err.Err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + } + } + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_stub.go b/vendor/golang.org/x/net/internal/nettest/helper_stub.go new file mode 100644 index 0000000..ea61b6f --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_stub.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +import ( + "fmt" + "runtime" +) + +func maxOpenFiles() int { + return defaultMaxOpenFiles +} + +func supportsRawIPSocket() (string, bool) { + return fmt.Sprintf("not supported on %s", runtime.GOOS), false +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return false +} + +func causesIPv6Crash() bool { + return false +} + +func protocolNotSupported(err error) bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_unix.go b/vendor/golang.org/x/net/internal/nettest/helper_unix.go new file mode 100644 index 0000000..ed13e44 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import ( + "fmt" + "os" + "runtime" + "syscall" +) + +func maxOpenFiles() int { + var rlim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { + return defaultMaxOpenFiles + } + return int(rlim.Cur) +} + +func supportsRawIPSocket() (string, bool) { + if os.Getuid() != 0 { + return fmt.Sprintf("must be root on %s", runtime.GOOS), false + } + return "", true +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_windows.go b/vendor/golang.org/x/net/internal/nettest/helper_windows.go new file mode 100644 index 0000000..3dcb727 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_windows.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import ( + "fmt" + "runtime" + "syscall" +) + +func maxOpenFiles() int { + return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ +} + +func supportsRawIPSocket() (string, bool) { + // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: + // Note: To use a socket of type SOCK_RAW requires administrative privileges. + // Users running Winsock applications that use raw sockets must be a member of + // the Administrators group on the local computer, otherwise raw socket calls + // will fail with an error code of WSAEACCES. On Windows Vista and later, access + // for raw sockets is enforced at socket creation. In earlier versions of Windows, + // access for raw sockets is enforced during other socket operations. + s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0) + if err == syscall.WSAEACCES { + return fmt.Sprintf("no access to raw socket allowed on %s", runtime.GOOS), false + } + if err != nil { + return err.Error(), false + } + syscall.Closesocket(s) + return "", true +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/interface.go b/vendor/golang.org/x/net/internal/nettest/interface.go new file mode 100644 index 0000000..8e6333a --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/interface.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import "net" + +// IsMulticastCapable reports whether ifi is an IP multicast-capable +// network interface. Network must be "ip", "ip4" or "ip6". +func IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) { + switch network { + case "ip", "ip4", "ip6": + default: + return nil, false + } + if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 { + return nil, false + } + return hasRoutableIP(network, ifi) +} + +// RoutedInterface returns a network interface that can route IP +// traffic and satisfies flags. It returns nil when an appropriate +// network interface is not found. Network must be "ip", "ip4" or +// "ip6". +func RoutedInterface(network string, flags net.Flags) *net.Interface { + switch network { + case "ip", "ip4", "ip6": + default: + return nil + } + ift, err := net.Interfaces() + if err != nil { + return nil + } + for _, ifi := range ift { + if ifi.Flags&flags != flags { + continue + } + if _, ok := hasRoutableIP(network, &ifi); !ok { + continue + } + return &ifi + } + return nil +} + +func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) { + ifat, err := ifi.Addrs() + if err != nil { + return nil, false + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + case *net.IPNet: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + } + } + return nil, false +} + +func routableIP(network string, ip net.IP) net.IP { + if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() { + return nil + } + switch network { + case "ip4": + if ip := ip.To4(); ip != nil { + return ip + } + case "ip6": + if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation + return nil + } + if ip := ip.To16(); ip != nil && ip.To4() == nil { + return ip + } + default: + if ip := ip.To4(); ip != nil { + return ip + } + if ip := ip.To16(); ip != nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit.go b/vendor/golang.org/x/net/internal/nettest/rlimit.go new file mode 100644 index 0000000..bb34aec --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +const defaultMaxOpenFiles = 256 + +// MaxOpenFiles returns the maximum number of open files for the +// caller's process. +func MaxOpenFiles() int { return maxOpenFiles() } diff --git a/vendor/golang.org/x/net/internal/nettest/stack.go b/vendor/golang.org/x/net/internal/nettest/stack.go new file mode 100644 index 0000000..06f4e09 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack.go @@ -0,0 +1,152 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest // import "golang.org/x/net/internal/nettest" + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "runtime" +) + +var ( + supportsIPv4 bool + supportsIPv6 bool +) + +func init() { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + ln.Close() + supportsIPv4 = true + } + if ln, err := net.Listen("tcp6", "[::1]:0"); err == nil { + ln.Close() + supportsIPv6 = true + } +} + +// SupportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func SupportsIPv4() bool { return supportsIPv4 } + +// SupportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func SupportsIPv6() bool { return supportsIPv6 } + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + return supportsRawIPSocket() +} + +// SupportsIPv6MulticastDeliveryOnLoopback reports whether the +// platform supports IPv6 multicast packet delivery on software +// loopback interface. +func SupportsIPv6MulticastDeliveryOnLoopback() bool { + return supportsIPv6MulticastDeliveryOnLoopback() +} + +// ProtocolNotSupported reports whether err is a protocol not +// supported error. +func ProtocolNotSupported(err error) bool { + return protocolNotSupported(err) +} + +// TestableNetwork reports whether network is testable on the current +// platform configuration. +func TestableNetwork(network string) bool { + // This is based on logic from standard library's + // net/platform_test.go. + switch network { + case "unix", "unixgram": + switch runtime.GOOS { + case "android", "nacl", "plan9", "windows": + return false + } + if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + return false + } + case "unixpacket": + switch runtime.GOOS { + case "android", "darwin", "freebsd", "nacl", "plan9", "windows": + return false + case "netbsd": + // It passes on amd64 at least. 386 fails (Issue 22927). arm is unknown. + if runtime.GOARCH == "386" { + return false + } + } + } + return true +} + +// NewLocalListener returns a listener which listens to a loopback IP +// address or local file system path. +// Network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket". +func NewLocalListener(network string) (net.Listener, error) { + switch network { + case "tcp": + if supportsIPv4 { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + return ln, nil + } + } + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "tcp4": + if supportsIPv4 { + return net.Listen("tcp4", "127.0.0.1:0") + } + case "tcp6": + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "unix", "unixpacket": + return net.Listen(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +// NewLocalPacketListener returns a packet listener which listens to a +// loopback IP address or local file system path. +// Network must be "udp", "udp4", "udp6" or "unixgram". +func NewLocalPacketListener(network string) (net.PacketConn, error) { + switch network { + case "udp": + if supportsIPv4 { + if c, err := net.ListenPacket("udp4", "127.0.0.1:0"); err == nil { + return c, nil + } + } + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "udp4": + if supportsIPv4 { + return net.ListenPacket("udp4", "127.0.0.1:0") + } + case "udp6": + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "unixgram": + return net.ListenPacket(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +func localPath() string { + f, err := ioutil.TempFile("", "nettest") + if err != nil { + panic(err) + } + path := f.Name() + f.Close() + os.Remove(path) + return path +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 0000000..1eb07d2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 0000000..d1d0c2d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 0000000..bac6681 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 0000000..63f0534 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 0000000..7dedd43 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 0000000..a4e7122 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_darwin.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go new file mode 100644 index 0000000..ce9ec2f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_linux.go @@ -0,0 +1,49 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include +#include + +#define _GNU_SOURCE +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go new file mode 100644 index 0000000..3f84335 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go @@ -0,0 +1,47 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_solaris.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 0000000..93dff91 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/error_windows.go b/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 0000000..6a6379a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 0000000..05d6082 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 0000000..afb34ad --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 0000000..8d17a40 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 0000000..c87d2a9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 0000000..2e80a9c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 0000000..3c42ea7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 0000000..5567afc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 0000000..b8c87b7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = int32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 0000000..5a38798 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 0000000..a7a5987 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 0000000..610fc4f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint64(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 0000000..71a69e2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 0000000..6465b20 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 0000000..64e8173 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 0000000..d6871d5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 0000000..499164a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 0000000..b21d2e6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 0000000..f78832a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !linux + +package socket + +import "errors" + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 0000000..96733cb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go new file mode 100644 index 0000000..d2add1a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/reflect.go b/vendor/golang.org/x/net/internal/socket/reflect.go new file mode 100644 index 0000000..bb179f1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/reflect.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "reflect" + "runtime" +) + +// A Conn represents a raw connection. +type Conn struct { + c net.Conn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + return &Conn{c: c}, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + s, err := socketOf(c.c) + if err != nil { + return 0, err + } + n, err := getsockopt(s, o.Level, o.Name, b) + return n, os.NewSyscallError("getsockopt", err) +} + +func (o *Option) set(c *Conn, b []byte) error { + s, err := socketOf(c.c) + if err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) +} + +func socketOf(c net.Conn) (uintptr, error) { + switch c.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + v := reflect.ValueOf(c) + switch e := v.Elem(); e.Kind() { + case reflect.Struct: + fd := e.FieldByName("conn").FieldByName("fd") + switch e := fd.Elem(); e.Kind() { + case reflect.Struct: + sysfd := e.FieldByName("sysfd") + if runtime.GOOS == "windows" { + return uintptr(sysfd.Uint()), nil + } + return uintptr(sysfd.Int()), nil + } + } + } + return 0, errors.New("invalid type") +} diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 0000000..5f9730e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "unsafe" +) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// m. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on m. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on m. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on m. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on m, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses m as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go new file mode 100644 index 0000000..c4edd4a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go @@ -0,0 +1,259 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +type mockControl struct { + Level int + Type int + Data []byte +} + +func TestControlMessage(t *testing.T) { + for _, tt := range []struct { + cs []mockControl + }{ + { + []mockControl{ + {Level: 1, Type: 1}, + }, + }, + { + []mockControl{ + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + { + []mockControl{ + {Level: 3, Type: 3, Data: []byte{0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + } { + var w []byte + var tailPadLen int + mm := socket.NewControlMessage([]int{0}) + for i, c := range tt.cs { + m := socket.NewControlMessage([]int{len(c.Data)}) + l := len(m) - len(mm) + if i == len(tt.cs)-1 && l > len(c.Data) { + tailPadLen = l - len(c.Data) + } + w = append(w, m...) + } + + var err error + ww := make([]byte, len(w)) + copy(ww, w) + m := socket.ControlMessage(ww) + for _, c := range tt.cs { + if err = m.MarshalHeader(c.Level, c.Type, len(c.Data)); err != nil { + t.Fatalf("(%v).MarshalHeader() = %v", tt.cs, err) + } + copy(m.Data(len(c.Data)), c.Data) + m = m.Next(len(c.Data)) + } + m = socket.ControlMessage(w) + for _, c := range tt.cs { + m, err = m.Marshal(c.Level, c.Type, c.Data) + if err != nil { + t.Fatalf("(%v).Marshal() = %v", tt.cs, err) + } + } + if !bytes.Equal(ww, w) { + t.Fatalf("got %#v; want %#v", ww, w) + } + + ws := [][]byte{w} + if tailPadLen > 0 { + // Test a message with no tail padding. + nopad := w[:len(w)-tailPadLen] + ws = append(ws, [][]byte{nopad}...) + } + for _, w := range ws { + ms, err := socket.ControlMessage(w).Parse() + if err != nil { + t.Fatalf("(%v).Parse() = %v", tt.cs, err) + } + for i, m := range ms { + lvl, typ, dataLen, err := m.ParseHeader() + if err != nil { + t.Fatalf("(%v).ParseHeader() = %v", tt.cs, err) + } + if lvl != tt.cs[i].Level || typ != tt.cs[i].Type || dataLen != len(tt.cs[i].Data) { + t.Fatalf("%v: got %d, %d, %d; want %d, %d, %d", tt.cs[i], lvl, typ, dataLen, tt.cs[i].Level, tt.cs[i].Type, len(tt.cs[i].Data)) + } + } + } + } +} + +func TestUDP(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + + t.Run("Message", func(t *testing.T) { + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: bytes.SplitAfter(data, []byte("-")), + Addr: c.LocalAddr(), + } + if err := cc.SendMsg(&wm, 0); err != nil { + t.Fatal(err) + } + b := make([]byte, 32) + rm := socket.Message{ + Buffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]}, + } + if err := cc.RecvMsg(&rm, 0); err != nil { + t.Fatal(err) + } + if !bytes.Equal(b[:rm.N], data) { + t.Fatalf("got %#v; want %#v", b[:rm.N], data) + } + }) + switch runtime.GOOS { + case "android", "linux": + t.Run("Messages", func(t *testing.T) { + data := []byte("HELLO-R-U-THERE") + wmbs := bytes.SplitAfter(data, []byte("-")) + wms := []socket.Message{ + {Buffers: wmbs[:1], Addr: c.LocalAddr()}, + {Buffers: wmbs[1:], Addr: c.LocalAddr()}, + } + n, err := cc.SendMsgs(wms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(wms) { + t.Fatalf("got %d; want %d", n, len(wms)) + } + b := make([]byte, 32) + rmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}} + rms := []socket.Message{ + {Buffers: rmbs[0]}, + {Buffers: rmbs[1]}, + } + n, err = cc.RecvMsgs(rms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(rms) { + t.Fatalf("got %d; want %d", n, len(rms)) + } + nn := 0 + for i := 0; i < n; i++ { + nn += rms[i].N + } + if !bytes.Equal(b[:nn], data) { + t.Fatalf("got %#v; want %#v", b[:nn], data) + } + }) + } + + // The behavior of transmission for zero byte paylaod depends + // on each platform implementation. Some may transmit only + // protocol header and options, other may transmit nothing. + // We test only that SendMsg and SendMsgs will not crash with + // empty buffers. + wm := socket.Message{ + Buffers: [][]byte{{}}, + Addr: c.LocalAddr(), + } + cc.SendMsg(&wm, 0) + wms := []socket.Message{ + {Buffers: [][]byte{{}}, Addr: c.LocalAddr()}, + } + cc.SendMsgs(wms, 0) +} + +func BenchmarkUDP(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + b.Fatal(err) + } + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: [][]byte{data}, + Addr: c.LocalAddr(), + } + rm := socket.Message{ + Buffers: [][]byte{make([]byte, 128)}, + OOB: make([]byte, 128), + } + + for M := 1; M <= 1<<9; M = M << 1 { + b.Run(fmt.Sprintf("Iter-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + for j := 0; j < M; j++ { + if err := cc.SendMsg(&wm, 0); err != nil { + b.Fatal(err) + } + if err := cc.RecvMsg(&rm, 0); err != nil { + b.Fatal(err) + } + } + } + }) + switch runtime.GOOS { + case "android", "linux": + wms := make([]socket.Message, M) + for i := range wms { + wms[i].Buffers = [][]byte{data} + wms[i].Addr = c.LocalAddr() + } + rms := make([]socket.Message, M) + for i := range rms { + rms[i].Buffers = [][]byte{make([]byte, 128)} + rms[i].OOB = make([]byte, 128) + } + b.Run(fmt.Sprintf("Batch-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := cc.SendMsgs(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := cc.RecvMsgs(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + } + } +} diff --git a/vendor/golang.org/x/net/internal/socket/socket_test.go b/vendor/golang.org/x/net/internal/socket/socket_test.go new file mode 100644 index 0000000..bf3751b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket_test.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket_test + +import ( + "net" + "runtime" + "syscall" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +func TestSocket(t *testing.T) { + t.Run("Option", func(t *testing.T) { + testSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4}) + }) +} + +func testSocketOption(t *testing.T, so *socket.Option) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + const N = 2048 + if err := so.SetInt(cc, N); err != nil { + t.Fatal(err) + } + n, err := so.GetInt(cc) + if err != nil { + t.Fatal(err) + } + if n < N { + t.Fatalf("got %d; want greater than or equal to %d", n, N) + } +} diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 0000000..4f0eead --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 0000000..f13e14f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package socket + +import "errors" + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 0000000..f723fa3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd netbsd openbsd + +package socket + +import "unsafe" + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 0000000..b17d223 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 0000000..b17d223 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 0000000..1559521 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 0000000..235b2cc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.s b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s new file mode 100644 index 0000000..93e7d75 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 0000000..9decee2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 0000000..d753b43 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 0000000..b670894 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 0000000..9c0d740 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 0000000..071a4ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 0000000..071a4ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 0000000..9c0d740 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 0000000..21c1e3f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 0000000..21c1e3f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 0000000..327979e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 0000000..06d7562 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 0000000..431851c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 0000000..dc130c2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,168 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +func (zc *ipv6ZoneCache) update(ift []net.Interface) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if zc.lastFetched.After(now.Add(-60 * time.Second)) { + return + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } +} + +func (zc *ipv6ZoneCache) name(zone int) string { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + name, ok := zoneCache.toName[zone] + if !ok { + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + index, ok := zoneCache.toIndex[zone] + if !ok { + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 0000000..cced74e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 0000000..a18ac5e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 0000000..d9f06d0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errors.New("not implemented") +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errors.New("not implemented") +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errors.New("not implemented") +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 0000000..18eba30 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 0000000..54a470e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x17 + + sysSOCK_RAW = 0x3 +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 0000000..26f8fef --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 0000000..e2987f7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 0000000..26f8fef --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 0000000..e2987f7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 0000000..c582abd --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 0000000..04a2488 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 0000000..35c7cb9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 0000000..04a2488 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 0000000..db60491 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 0000000..2a1a799 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,68 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 0000000..db60491 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 0000000..1c83636 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 0000000..a6c0bf4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 0000000..1c83636 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 0000000..327c632 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_RAW = 0x4 +) + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 0000000..3d6f516 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/dial_test.go b/vendor/golang.org/x/net/internal/socks/dial_test.go new file mode 100644 index 0000000..93101a6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/dial_test.go @@ -0,0 +1,158 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks_test + +import ( + "context" + "io" + "math/rand" + "net" + "os" + "testing" + "time" + + "golang.org/x/net/internal/socks" + "golang.org/x/net/internal/sockstest" +) + +const ( + targetNetwork = "tcp6" + targetHostname = "fqdn.doesnotexist" + targetHostIP = "2001:db8::1" + targetPort = "5963" +) + +func TestDial(t *testing.T) { + t.Run("Connect", func(t *testing.T) { + ss, err := sockstest.NewServer(sockstest.NoAuthRequired, sockstest.NoProxyRequired) + if err != nil { + t.Error(err) + return + } + defer ss.Close() + d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = (&socks.UsernamePassword{ + Username: "username", + Password: "password", + }).Authenticate + c, err := d.Dial(targetNetwork, net.JoinHostPort(targetHostIP, targetPort)) + if err == nil { + c.(*socks.Conn).BoundAddr() + c.Close() + } + if err != nil { + t.Error(err) + return + } + }) + t.Run("Cancel", func(t *testing.T) { + ss, err := sockstest.NewServer(sockstest.NoAuthRequired, blackholeCmdFunc) + if err != nil { + t.Error(err) + return + } + defer ss.Close() + d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dialErr := make(chan error) + go func() { + c, err := d.DialContext(ctx, ss.TargetAddr().Network(), net.JoinHostPort(targetHostname, targetPort)) + if err == nil { + c.Close() + } + dialErr <- err + }() + time.Sleep(100 * time.Millisecond) + cancel() + err = <-dialErr + if perr, nerr := parseDialError(err); perr != context.Canceled && nerr == nil { + t.Errorf("got %v; want context.Canceled or equivalent", err) + return + } + }) + t.Run("Deadline", func(t *testing.T) { + ss, err := sockstest.NewServer(sockstest.NoAuthRequired, blackholeCmdFunc) + if err != nil { + t.Error(err) + return + } + defer ss.Close() + d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(100*time.Millisecond)) + defer cancel() + c, err := d.DialContext(ctx, ss.TargetAddr().Network(), net.JoinHostPort(targetHostname, targetPort)) + if err == nil { + c.Close() + } + if perr, nerr := parseDialError(err); perr != context.DeadlineExceeded && nerr == nil { + t.Errorf("got %v; want context.DeadlineExceeded or equivalent", err) + return + } + }) + t.Run("WithRogueServer", func(t *testing.T) { + ss, err := sockstest.NewServer(sockstest.NoAuthRequired, rogueCmdFunc) + if err != nil { + t.Error(err) + return + } + defer ss.Close() + d := socks.NewDialer(ss.Addr().Network(), ss.Addr().String()) + for i := 0; i < 2*len(rogueCmdList); i++ { + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(100*time.Millisecond)) + defer cancel() + c, err := d.DialContext(ctx, targetNetwork, net.JoinHostPort(targetHostIP, targetPort)) + if err == nil { + t.Log(c.(*socks.Conn).BoundAddr()) + c.Close() + t.Error("should fail") + } + } + }) +} + +func blackholeCmdFunc(rw io.ReadWriter, b []byte) error { + if _, err := sockstest.ParseCmdRequest(b); err != nil { + return err + } + var bb [1]byte + for { + if _, err := rw.Read(bb[:]); err != nil { + return err + } + } +} + +func rogueCmdFunc(rw io.ReadWriter, b []byte) error { + if _, err := sockstest.ParseCmdRequest(b); err != nil { + return err + } + rw.Write(rogueCmdList[rand.Intn(len(rogueCmdList))]) + return nil +} + +var rogueCmdList = [][]byte{ + {0x05}, + {0x06, 0x00, 0x00, 0x01, 192, 0, 2, 1, 0x17, 0x4b}, + {0x05, 0x00, 0xff, 0x01, 192, 0, 2, 2, 0x17, 0x4b}, + {0x05, 0x00, 0x00, 0x01, 192, 0, 2, 3}, + {0x05, 0x00, 0x00, 0x03, 0x04, 'F', 'Q', 'D', 'N'}, +} + +func parseDialError(err error) (perr, nerr error) { + if e, ok := err.(*net.OpError); ok { + err = e.Err + nerr = e + } + if e, ok := err.(*os.SyscallError); ok { + err = e.Err + } + perr = err + return +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 0000000..9158595 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,265 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authetication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authention + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("network not implemented")} + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("command not implemented")} + } + if ctx == nil { + ctx = context.Background() + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// Dial connects to the provided address on the provided network. +// +// Deprecated: Use DialContext instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + return d.DialContext(context.Background(), network, address) +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/internal/sockstest/server.go b/vendor/golang.org/x/net/internal/sockstest/server.go new file mode 100644 index 0000000..3c6e9e9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/sockstest/server.go @@ -0,0 +1,241 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sockstest provides utilities for SOCKS testing. +package sockstest + +import ( + "errors" + "io" + "net" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socks" +) + +// An AuthRequest represents an authentication request. +type AuthRequest struct { + Version int + Methods []socks.AuthMethod +} + +// ParseAuthRequest parses an authentication request. +func ParseAuthRequest(b []byte) (*AuthRequest, error) { + if len(b) < 2 { + return nil, errors.New("short auth request") + } + if b[0] != socks.Version5 { + return nil, errors.New("unexpected protocol version") + } + if len(b)-2 < int(b[1]) { + return nil, errors.New("short auth request") + } + req := &AuthRequest{Version: int(b[0])} + if b[1] > 0 { + req.Methods = make([]socks.AuthMethod, b[1]) + for i, m := range b[2 : 2+b[1]] { + req.Methods[i] = socks.AuthMethod(m) + } + } + return req, nil +} + +// MarshalAuthReply returns an authentication reply in wire format. +func MarshalAuthReply(ver int, m socks.AuthMethod) ([]byte, error) { + return []byte{byte(ver), byte(m)}, nil +} + +// A CmdRequest repesents a command request. +type CmdRequest struct { + Version int + Cmd socks.Command + Addr socks.Addr +} + +// ParseCmdRequest parses a command request. +func ParseCmdRequest(b []byte) (*CmdRequest, error) { + if len(b) < 7 { + return nil, errors.New("short cmd request") + } + if b[0] != socks.Version5 { + return nil, errors.New("unexpected protocol version") + } + if socks.Command(b[1]) != socks.CmdConnect { + return nil, errors.New("unexpected command") + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + req := &CmdRequest{Version: int(b[0]), Cmd: socks.Command(b[1])} + l := 2 + off := 4 + switch b[3] { + case socks.AddrTypeIPv4: + l += net.IPv4len + req.Addr.IP = make(net.IP, net.IPv4len) + case socks.AddrTypeIPv6: + l += net.IPv6len + req.Addr.IP = make(net.IP, net.IPv6len) + case socks.AddrTypeFQDN: + l += int(b[4]) + off = 5 + default: + return nil, errors.New("unknown address type") + } + if len(b[off:]) < l { + return nil, errors.New("short cmd request") + } + if req.Addr.IP != nil { + copy(req.Addr.IP, b[off:]) + } else { + req.Addr.Name = string(b[off : off+l-2]) + } + req.Addr.Port = int(b[off+l-2])<<8 | int(b[off+l-1]) + return req, nil +} + +// MarshalCmdReply returns a command reply in wire format. +func MarshalCmdReply(ver int, reply socks.Reply, a *socks.Addr) ([]byte, error) { + b := make([]byte, 4) + b[0] = byte(ver) + b[1] = byte(reply) + if a.Name != "" { + if len(a.Name) > 255 { + return nil, errors.New("fqdn too long") + } + b[3] = socks.AddrTypeFQDN + b = append(b, byte(len(a.Name))) + b = append(b, a.Name...) + } else if ip4 := a.IP.To4(); ip4 != nil { + b[3] = socks.AddrTypeIPv4 + b = append(b, ip4...) + } else if ip6 := a.IP.To16(); ip6 != nil { + b[3] = socks.AddrTypeIPv6 + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + b = append(b, byte(a.Port>>8), byte(a.Port)) + return b, nil +} + +// A Server repesents a server for handshake testing. +type Server struct { + ln net.Listener +} + +// Addr rerurns a server address. +func (s *Server) Addr() net.Addr { + return s.ln.Addr() +} + +// TargetAddr returns a fake final destination address. +// +// The returned address is only valid for testing with Server. +func (s *Server) TargetAddr() net.Addr { + a := s.ln.Addr() + switch a := a.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 5963} + } + if a.IP.To16() != nil && a.IP.To4() == nil { + return &net.TCPAddr{IP: net.IPv6loopback, Port: 5963} + } + } + return nil +} + +// Close closes the server. +func (s *Server) Close() error { + return s.ln.Close() +} + +func (s *Server) serve(authFunc, cmdFunc func(io.ReadWriter, []byte) error) { + c, err := s.ln.Accept() + if err != nil { + return + } + defer c.Close() + go s.serve(authFunc, cmdFunc) + b := make([]byte, 512) + n, err := c.Read(b) + if err != nil { + return + } + if err := authFunc(c, b[:n]); err != nil { + return + } + n, err = c.Read(b) + if err != nil { + return + } + if err := cmdFunc(c, b[:n]); err != nil { + return + } +} + +// NewServer returns a new server. +// +// The provided authFunc and cmdFunc must parse requests and return +// appropriate replies to clients. +func NewServer(authFunc, cmdFunc func(io.ReadWriter, []byte) error) (*Server, error) { + var err error + s := new(Server) + s.ln, err = nettest.NewLocalListener("tcp") + if err != nil { + return nil, err + } + go s.serve(authFunc, cmdFunc) + return s, nil +} + +// NoAuthRequired handles a no-authentication-required signaling. +func NoAuthRequired(rw io.ReadWriter, b []byte) error { + req, err := ParseAuthRequest(b) + if err != nil { + return err + } + b, err = MarshalAuthReply(req.Version, socks.AuthMethodNotRequired) + if err != nil { + return err + } + n, err := rw.Write(b) + if err != nil { + return err + } + if n != len(b) { + return errors.New("short write") + } + return nil +} + +// NoProxyRequired handles a command signaling without constructing a +// proxy connection to the final destination. +func NoProxyRequired(rw io.ReadWriter, b []byte) error { + req, err := ParseCmdRequest(b) + if err != nil { + return err + } + req.Addr.Port += 1 + if req.Addr.Name != "" { + req.Addr.Name = "boundaddr.doesnotexist" + } else if req.Addr.IP.To4() != nil { + req.Addr.IP = net.IPv4(127, 0, 0, 1) + } else { + req.Addr.IP = net.IPv6loopback + } + b, err = MarshalCmdReply(socks.Version5, socks.StatusSucceeded, &req.Addr) + if err != nil { + return err + } + n, err := rw.Write(b) + if err != nil { + return err + } + if n != len(b) { + return errors.New("short write") + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/sockstest/server_test.go b/vendor/golang.org/x/net/internal/sockstest/server_test.go new file mode 100644 index 0000000..2b02d81 --- /dev/null +++ b/vendor/golang.org/x/net/internal/sockstest/server_test.go @@ -0,0 +1,103 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sockstest + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/internal/socks" +) + +func TestParseAuthRequest(t *testing.T) { + for i, tt := range []struct { + wire []byte + req *AuthRequest + }{ + { + []byte{0x05, 0x00}, + &AuthRequest{ + socks.Version5, + nil, + }, + }, + { + []byte{0x05, 0x01, 0xff}, + &AuthRequest{ + socks.Version5, + []socks.AuthMethod{ + socks.AuthMethodNoAcceptableMethods, + }, + }, + }, + { + []byte{0x05, 0x02, 0x00, 0xff}, + &AuthRequest{ + socks.Version5, + []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodNoAcceptableMethods, + }, + }, + }, + + // corrupted requests + {nil, nil}, + {[]byte{0x00, 0x01}, nil}, + {[]byte{0x06, 0x00}, nil}, + {[]byte{0x05, 0x02, 0x00}, nil}, + } { + req, err := ParseAuthRequest(tt.wire) + if !reflect.DeepEqual(req, tt.req) { + t.Errorf("#%d: got %v, %v; want %v", i, req, err, tt.req) + continue + } + } +} + +func TestParseCmdRequest(t *testing.T) { + for i, tt := range []struct { + wire []byte + req *CmdRequest + }{ + { + []byte{0x05, 0x01, 0x00, 0x01, 192, 0, 2, 1, 0x17, 0x4b}, + &CmdRequest{ + socks.Version5, + socks.CmdConnect, + socks.Addr{ + IP: net.IP{192, 0, 2, 1}, + Port: 5963, + }, + }, + }, + { + []byte{0x05, 0x01, 0x00, 0x03, 0x04, 'F', 'Q', 'D', 'N', 0x17, 0x4b}, + &CmdRequest{ + socks.Version5, + socks.CmdConnect, + socks.Addr{ + Name: "FQDN", + Port: 5963, + }, + }, + }, + + // corrupted requests + {nil, nil}, + {[]byte{0x05}, nil}, + {[]byte{0x06, 0x01, 0x00, 0x01, 192, 0, 2, 2, 0x17, 0x4b}, nil}, + {[]byte{0x05, 0x01, 0xff, 0x01, 192, 0, 2, 3}, nil}, + {[]byte{0x05, 0x01, 0x00, 0x01, 192, 0, 2, 4}, nil}, + {[]byte{0x05, 0x01, 0x00, 0x03, 0x04, 'F', 'Q', 'D', 'N'}, nil}, + } { + req, err := ParseCmdRequest(tt.wire) + if !reflect.DeepEqual(req, tt.req) { + t.Errorf("#%d: got %v, %v; want %v", i, req, err, tt.req) + continue + } + } +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000..685f0e7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go new file mode 100644 index 0000000..66325a9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package timeseries + +import ( + "math" + "testing" + "time" +) + +func isNear(x *Float, y float64, tolerance float64) bool { + return math.Abs(x.Value()-y) < tolerance +} + +func isApproximate(x *Float, y float64) bool { + return isNear(x, y, 1e-2) +} + +func checkApproximate(t *testing.T, o Observable, y float64) { + x := o.(*Float) + if !isApproximate(x, y) { + t.Errorf("Wanted %g, got %g", y, x.Value()) + } +} + +func checkNear(t *testing.T, o Observable, y, tolerance float64) { + x := o.(*Float) + if !isNear(x, y, tolerance) { + t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) + } +} + +var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) + +func tu(s int64) time.Time { + return baseTime.Add(time.Duration(s) * time.Second) +} + +func tu2(s int64, ns int64) time.Time { + return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) +} + +func TestBasicTimeSeries(t *testing.T) { + ts := NewTimeSeries(NewFloat) + fo := new(Float) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(1)), 40) + checkApproximate(t, ts.Total(), 40) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 40) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 70) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 60) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 90) + *fo = Float(100) + ts.AddWithTime(fo, tu(100)) + checkApproximate(t, ts.Range(tu(99), tu(100)), 100) + checkApproximate(t, ts.Range(tu(0), tu(4)), 36) + checkApproximate(t, ts.Total(), 190) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(4)), 44) + checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Total(), 210) + + for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { + if i == 63 { + checkApproximate(t, l, 100) + } else { + checkApproximate(t, l, 0) + } + } + + checkApproximate(t, ts.Range(tu(0), tu(100)), 210) + checkApproximate(t, ts.Range(tu(10), tu(100)), 100) + + for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { + if i < 10 { + checkApproximate(t, l, 11) + } else if i >= 90 { + checkApproximate(t, l, 10) + } else { + checkApproximate(t, l, 0) + } + } +} + +func TestFloat(t *testing.T) { + f := Float(1) + if g, w := f.String(), "1"; g != w { + t.Errorf("Float(1).String = %q; want %q", g, w) + } + f2 := Float(2) + var o Observable = &f2 + f.Add(o) + if g, w := f.Value(), 3.0; g != w { + t.Errorf("Float post-add = %v; want %v", g, w) + } + f.Multiply(2) + if g, w := f.Value(), 6.0; g != w { + t.Errorf("Float post-multiply = %v; want %v", g, w) + } + f.Clear() + if g, w := f.Value(), 0.0; g != w { + t.Errorf("Float post-clear = %v; want %v", g, w) + } + f.CopyFrom(&f2) + if g, w := f.Value(), 2.0; g != w { + t.Errorf("Float post-CopyFrom = %v; want %v", g, w) + } +} + +type mockClock struct { + time time.Time +} + +func (m *mockClock) Time() time.Time { return m.time } +func (m *mockClock) Set(t time.Time) { m.time = t } + +const buckets = 6 + +var testResolutions = []time.Duration{ + 10 * time.Second, // level holds one minute of observations + 100 * time.Second, // level holds ten minutes of observations + 10 * time.Minute, // level holds one hour of observations +} + +// TestTimeSeries uses a small number of buckets to force a higher +// error rate on approximations from the timeseries. +type TestTimeSeries struct { + timeSeries +} + +func TestExpectedErrorRate(t *testing.T) { + ts := new(TestTimeSeries) + fake := new(mockClock) + fake.Set(time.Now()) + ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) + for i := 1; i <= 61*61; i++ { + fake.Set(fake.Time().Add(1 * time.Second)) + ob := Float(1) + ts.AddWithTime(&ob, fake.Time()) + + // The results should be accurate within one missing bucket (1/6) of the observations recorded. + checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) + checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) + checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) + } +} + +func min(a, b float64) float64 { + if a < b { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 0000000..b445499 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,191 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv4/bpf_test.go b/vendor/golang.org/x/net/ipv4/bpf_test.go new file mode 100644 index 0000000..b44da90 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/bpf_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv4.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 0000000..a2b02ca --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 0000000..77e7ad5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) +} + +func parseDst(cm *ControlMessage, b []byte) { + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) +} + +func parseInterface(cm *ControlMessage, b []byte) { + sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 0000000..425338f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 0000000..5a2f7d8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/control_test.go b/vendor/golang.org/x/net/ipv4/control_test.go new file mode 100644 index 0000000..f87fe12 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "testing" + + "golang.org/x/net/ipv4" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv4.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00", + "\f\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 0000000..e1ae816 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 0000000..ce55c66 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go new file mode 100644 index 0000000..c8f2e05 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_STRIPHDR = C.IP_STRIPHDR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go new file mode 100644 index 0000000..f30544e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go new file mode 100644 index 0000000..4dd57d8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_SENDSRCADDR = C.IP_SENDSRCADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_ONESBCAST = C.IP_ONESBCAST + sysIP_BINDANY = C.IP_BINDANY + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_MINTTL = C.IP_MINTTL + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_RECVTOS = C.IP_RECVTOS + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go new file mode 100644 index 0000000..beb1107 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -0,0 +1,122 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_PKTOPTIONS = C.IP_PKTOPTIONS + sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER + sysIP_RECVERR = C.IP_RECVERR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_RECVTOS = C.IP_RECVTOS + sysIP_MTU = C.IP_MTU + sysIP_FREEBIND = C.IP_FREEBIND + sysIP_TRANSPARENT = C.IP_TRANSPARENT + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR + sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR + sysIP_MINTTL = C.IP_MINTTL + sysIP_NODEFRAG = C.IP_NODEFRAG + sysIP_UNICAST_IF = C.IP_UNICAST_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_MSFILTER = C.IP_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL + + //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT + //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT + //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO + //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE + //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE + //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT + + sysICMP_FILTER = C.ICMP_FILTER + + sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE + sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL + sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP + sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 + sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS + sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPFilter = C.sizeof_struct_icmp_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type sockExtendedErr C.struct_sock_extended_err + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpFilter C.struct_icmp_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go new file mode 100644 index 0000000..8f8af1b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go new file mode 100644 index 0000000..8f8af1b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go new file mode 100644 index 0000000..aeb33e9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_NEXTHOP = C.IP_NEXTHOP + + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG + + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_REUSEADDR = C.IP_REUSEADDR + sysIP_DONTROUTE = C.IP_DONTROUTE + sysIP_BROADCAST = C.IP_BROADCAST + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go new file mode 100644 index 0000000..54d77d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 0000000..b43935a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 0000000..2ab8773 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,187 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + p := &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + return nil, err + } + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errOpNoSupport + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/example_test.go b/vendor/golang.org/x/net/ipv4/example_test.go new file mode 100644 index 0000000..ddc7577 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/example_test.go @@ -0,0 +1,224 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "fmt" + "log" + "net" + "os" + "runtime" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "0.0.0.0:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil { + p := ipv4.NewConn(c) + if err := p.SetTOS(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetTTL(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp4", "0.0.0.0:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { + log.Fatal(err) + } + + b := make([]byte, 1500) + for { + _, cm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, nil, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To4() != nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no A record found") + } + + c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + if err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if err := p.SetTTL(i); err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + if _, err := p.WriteTo(wb, nil, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, cm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(1, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv4.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + case ipv4.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + return + default: + log.Printf("unknown ICMP message: %+v\n", rm) + } + } +} + +func ExampleRawConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + log.Fatal(err) + } + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)} + if err := r.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer r.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 24) // fake ospf header, you need to implement this + ospf[0] = 2 // version 2 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + iph := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 0xc0, // DSCP CS6 + TotalLen: ipv4.HeaderLen + len(ospf), + TTL: 1, + Protocol: 89, + Dst: allSPFRouters.IP.To4(), + } + + var cm *ipv4.ControlMessage + switch runtime.GOOS { + case "darwin", "linux": + cm = &ipv4.ControlMessage{IfIndex: en0.Index} + default: + if err := r.SetMulticastInterface(en0); err != nil { + log.Fatal(err) + } + } + if err := r.WriteTo(iph, ospf, cm); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go new file mode 100644 index 0000000..1bb1737 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", + parseICMPv4Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "package ipv4\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv4Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv4Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigDescr) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv4Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv4ParamRecord struct { + OrigDescr string + Descr string + Value int +} + +func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Descr, "Reserved") || + strings.Contains(pr.Descr, "Unassigned") || + strings.Contains(pr.Descr, "Deprecated") || + strings.Contains(pr.Descr, "Experiment") || + strings.Contains(pr.Descr, "experiment") { + continue + } + ss := strings.Split(pr.Descr, "\n") + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + s := strings.TrimSpace(prs[i].Descr) + prs[i].OrigDescr = s + prs[i].Descr = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go new file mode 100644 index 0000000..119bf84 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -0,0 +1,57 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "syscall" + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 0000000..8bb0f0f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,159 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers + maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of h. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, syscall.EINVAL + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// Parse parses b as an IPv4 header and sotres the result in h. +func (h *Header) Parse(b []byte) error { + if h == nil || len(b) < HeaderLen { + return errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return errBufferTooShort + } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + case "freebsd": + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/header_test.go b/vendor/golang.org/x/net/ipv4/header_test.go new file mode 100644 index 0000000..a246aee --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header_test.go @@ -0,0 +1,228 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "bytes" + "encoding/binary" + "net" + "reflect" + "runtime" + "strings" + "testing" + + "golang.org/x/net/internal/socket" +) + +type headerTest struct { + wireHeaderFromKernel []byte + wireHeaderToKernel []byte + wireHeaderFromTradBSDKernel []byte + wireHeaderToTradBSDKernel []byte + wireHeaderFromFreeBSD10Kernel []byte + wireHeaderToFreeBSD10Kernel []byte + *Header +} + +var headerLittleEndianTests = []headerTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + { + wireHeaderFromKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x45, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, + }, + + // with option headers + { + wireHeaderFromKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x46, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen + 4, + TOS: 1, + TotalLen: 0xbef3, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + Options: []byte{0xff, 0xfe, 0xfe, 0xff}, + }, + }, +} + +func TestMarshalHeader(t *testing.T) { + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + b, err := tt.Header.Marshal() + if err != nil { + t.Fatal(err) + } + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderToTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderToTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderToFreeBSD10Kernel + default: + wh = tt.wireHeaderToKernel + } + default: + wh = tt.wireHeaderToKernel + } + if !bytes.Equal(b, wh) { + t.Fatalf("got %#v; want %#v", b, wh) + } + } +} + +func TestParseHeader(t *testing.T) { + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for big endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderFromTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderFromTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderFromFreeBSD10Kernel + default: + wh = tt.wireHeaderFromKernel + } + default: + wh = tt.wireHeaderFromKernel + } + h, err := ParseHeader(wh) + if err != nil { + t.Fatal(err) + } + if err := h.Parse(wh); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 0000000..a5052e3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,63 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errMissingHeader = errors.New("missing header") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 0000000..4375b40 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,38 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris + ICMPTypeExtendedEchoRequest ICMPType = 42 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 43 // Extended Echo Reply +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", + 42: "extended echo request", + 43: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 0000000..9902bb3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + icmpFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 0000000..6e1c5c8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *icmpFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 0000000..21bb29a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sizeofICMPFilter = 0x0 + +type icmpFilter struct { +} + +func (f *icmpFilter) accept(typ ICMPType) { +} + +func (f *icmpFilter) block(typ ICMPType) { +} + +func (f *icmpFilter) setAll(block bool) { +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_test.go b/vendor/golang.org/x/net/ipv4/icmp_test.go new file mode 100644 index 0000000..3324b54 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_test.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var icmpStringTests = []struct { + in ipv4.ICMPType + out string +}{ + {ipv4.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv4.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv4.ICMPType{ + ipv4.ICMPTypeDestinationUnreachable, + ipv4.ICMPTypeEchoReply, + ipv4.ICMPTypeTimeExceeded, + ipv4.ICMPTypeParameterProblem, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + + var f ipv4.ICMPFilter + f.SetAll(true) + f.Accept(ipv4.ICMPTypeEcho) + f.Accept(ipv4.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicast_test.go b/vendor/golang.org/x/net/ipv4/multicast_test.go new file mode 100644 index 0000000..bcf4973 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicast_test.go @@ -0,0 +1,334 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {"232.0.1.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp4", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + switch { + case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} + +var rawConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnReadWriteMulticastICMP(t *testing.T) { + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + if tt.src == nil { + if err := r.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer r.LeaveGroup(ifi, tt.grp) + } else { + if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := r.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := r.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + Protocol: 1, + Dst: tt.grp.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + r.SetMulticastTTL(i + 1) + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + if rh, b, _, err := r.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + switch { + case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go new file mode 100644 index 0000000..e43fbbe --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727 + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}, + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp4", "0.0.0.0:0") // wildcard address with no reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp4", "224.0.0.0:0") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp4", net.JoinHostPort("224.0.0.0", port)) // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv4.PacketConn + ps[0] = ipv4.NewPacketConn(c1) + ps[1] = ipv4.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + port := "0" + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp4", net.JoinHostPort(ip.String(), port)) // unicast address with non-reusable port + if err != nil { + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue + } + defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } + p := ipv4.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := r.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.RawConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip4:253", ip.String()) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{r, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go new file mode 100644 index 0000000..f7efac2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -0,0 +1,195 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp4", "", "224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {"udp4", "", "232.0.0.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +var rawConnMulticastSocketOptionTests = []struct { + grp, src net.Addr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnMulticastSocketOptionTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, r, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src) + } + } +} + +type testIPv4MulticastConn interface { + MulticastTTL() (int, error) + SetMulticastTTL(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) { + const ttl = 255 + if err := c.SetMulticastTTL(ttl); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastTTL(); err != nil { + t.Error(err) + return + } else if v != ttl { + t.Errorf("got %v; want %v", v, ttl) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 0000000..f00f5b0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,69 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + *net.IPConn + *socket.Conn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return syscall.EINVAL + } + return c.writeTo(h, p, cm) +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/vendor/golang.org/x/net/ipv4/packet_go1_8.go new file mode 100644 index 0000000..b47d186 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_8.go @@ -0,0 +1,56 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4 + +import "net" + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + n, nn, _, src, err := c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if nn > 0 { + cm = new(ControlMessage) + if err := cm.Parse(oob[:nn]); err != nil { + return nil, nil, nil, err + } + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + oob := cm.Marshal() + wh, err := h.Marshal() + if err != nil { + return err + } + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/vendor/golang.org/x/net/ipv4/packet_go1_9.go new file mode 100644 index 0000000..082c36d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 0000000..f95f811 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 0000000..3f06d76 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go new file mode 100644 index 0000000..d26ccd9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + nb := make([]byte, maxHeaderLen+len(b)) + if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go new file mode 100644 index 0000000..2f19311 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 0000000..3926de7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go new file mode 100644 index 0000000..1cd926e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go @@ -0,0 +1,248 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go new file mode 100644 index 0000000..365de02 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go @@ -0,0 +1,388 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv4.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv4.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv4.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + var b []byte + if _, ok := dst.(*net.IPAddr); ok { + var h ipv4.Header + if err := h.Parse(ms[0].Buffers[0][:ms[0].N]); err != nil { + t.Error(err) + return + } + b = ms[0].Buffers[0][h.Len:ms[0].N] + } else { + b = ms[0].Buffers[0][:ms[0].N] + } + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv4.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_test.go b/vendor/golang.org/x/net/ipv4/readwrite_test.go new file mode 100644 index 0000000..3896a8a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_test.go @@ -0,0 +1,140 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + dst := c.LocalAddr() + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv4UDP", func(b *testing.B) { + p := ipv4.NewPacketConn(c) + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv4.ControlMessage{TTL: 1} + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %d; want %d", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 0000000..22e90c0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 0000000..e96955b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 0000000..23249b7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go new file mode 100644 index 0000000..0388cba --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -0,0 +1,119 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 0000000..f391920 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go new file mode 100644 index 0000000..1f24f69 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err + } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go new file mode 100644 index 0000000..0711d3d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 0000000..9f30b73 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 0000000..9a21320 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 0000000..58256dd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 0000000..e8fb191 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}} + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 0000000..859764f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 0000000..b800324 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 0000000..60defe1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 0000000..832fef1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 0000000..ae5704e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 0000000..e6b7623 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 0000000..4f07647 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 0000000..b0913d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc +) + +type inetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type ipMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type ipMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/unicast_test.go b/vendor/golang.org/x/net/ipv4/unicast_test.go new file mode 100644 index 0000000..02c089f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicast_test.go @@ -0,0 +1,247 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + loop: + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} + +func TestRawConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + TTL: i + 1, + Protocol: 1, + Dst: dst.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + loop: + if err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if _, b, _, err := r.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go new file mode 100644 index 0000000..db5213b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -0,0 +1,148 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() + + c, err := net.Dial("tcp4", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewConn(c)) + + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp4", "", "127.0.0.1:0"}, + {"ip4", ":icmp", "127.0.0.1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewPacketConn(c)) + } +} + +func TestRawConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + + testUnicastSocketOptions(t, r) +} + +type testIPv4UnicastConn interface { + TOS() (int, error) + SetTOS(int) error + TTL() (int, error) + SetTTL(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) { + tos := iana.DiffServCS0 | iana.NotECNTransport + switch runtime.GOOS { + case "windows": + // IP_TOS option is supported on Windows 8 and beyond. + t.Skipf("not supported on %s", runtime.GOOS) + } + + if err := c.SetTOS(tos); err != nil { + t.Fatal(err) + } + if v, err := c.TOS(); err != nil { + t.Fatal(err) + } else if v != tos { + t.Fatalf("got %v; want %v", v, tos) + } + const ttl = 255 + if err := c.SetTTL(ttl); err != nil { + t.Fatal(err) + } + if v, err := c.TTL(); err != nil { + t.Fatal(err) + } else if v != ttl { + t.Fatalf("got %v; want %v", v, ttl) + } +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 0000000..c07cc88 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 0000000..c4365e9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,31 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 0000000..8c4aec9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 0000000..4b10b7c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 0000000..4b10b7c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 0000000..f65bd9a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 0000000..fd3624d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 0000000..12f36be --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 0000000..0a3875c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,100 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 0000000..4f5fe68 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,119 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv6/bpf_test.go b/vendor/golang.org/x/net/ipv6/bpf_test.go new file mode 100644 index 0000000..8253e1f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/bpf_test.go @@ -0,0 +1,96 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv6" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + l, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv6.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp6", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 0000000..2da6444 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 0000000..9fd9eb1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 0000000..eec529c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) + } + return m.Next(4) +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 0000000..a045f28 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/control_test.go b/vendor/golang.org/x/net/ipv6/control_test.go new file mode 100644 index 0000000..c186ca9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "testing" + + "golang.org/x/net/ipv6" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv6.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00)\x00\x00\x00.\x00\x00\x00", + "\f\x00\x00\x00)\x00\x00\x00,\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 0000000..6651506 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 0000000..ef2563b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go new file mode 100644 index 0000000..55ddc11 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#define __APPLE_USE_RFC_3542 +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go new file mode 100644 index 0000000..a4c383a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go new file mode 100644 index 0000000..53e6253 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -0,0 +1,105 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_BINDANY = C.IPV6_BINDANY + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go new file mode 100644 index 0000000..3308cb2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIPV6_ADDRFORM = C.IPV6_ADDRFORM + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_FLOWINFO = C.IPV6_FLOWINFO + + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP + sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT + sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER + sysIPV6_MTU = C.IPV6_MTU + sysIPV6_RECVERR = C.IPV6_RECVERR + sysIPV6_V6ONLY = C.IPV6_V6ONLY + sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST + sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST + + //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT + //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT + //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO + //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE + //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE + //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT + + sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR + sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES + + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + + sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT + + sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR + sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR + sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT + sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF + + sysICMPV6_FILTER = C.ICMPV6_FILTER + + sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK + sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS + sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS + sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6FlowlabelReq C.struct_in6_flowlabel_req + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go new file mode 100644 index 0000000..be9ceb9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go new file mode 100644 index 0000000..177ddf8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL + sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL + sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL + sysIPSEC6_OUTSA = C.IPSEC6_OUTSA + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_PIPEX = C.IPV6_PIPEX + + sysIPV6_RTABLE = C.IPV6_RTABLE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go new file mode 100644 index 0000000..0f8ce2b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -0,0 +1,114 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + + sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_SEC_OPT = C.IPV6_SEC_OPT + sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + + sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK + sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT + sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK + sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT + sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK + sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT + + sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK + + sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC + + sysICMP6_FILTER = C.ICMP6_FILTER + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go new file mode 100644 index 0000000..703dafe --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -0,0 +1,302 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errOpNoSupport + } + offset, err = so.GetInt(c.Conn) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return errOpNoSupport + } + if !on { + offset = -1 + } + return so.SetInt(c.Conn, offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 0000000..664a97d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,243 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 0000000..0624c17 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,128 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errOpNoSupport + } + _, mtu, err := so.getMTUInfo(c.Conn) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + return &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/example_test.go b/vendor/golang.org/x/net/ipv6/example_test.go new file mode 100644 index 0000000..e761aa2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/example_test.go @@ -0,0 +1,216 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "log" + "net" + "os" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "[::]:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil { + p := ipv6.NewConn(c) + if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetHopLimit(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp6", "[::]:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.ParseIP("ff02::fb")} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + b := make([]byte, 1500) + for { + _, rcm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + wcm.IfIndex = rcm.IfIndex + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, &wcm, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To16() != nil && ip.To4() == nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no AAAA record found") + } + + c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + if err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + wcm.HopLimit = i + if _, err := p.WriteTo(wb, &wcm, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, rcm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv6.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + case ipv6.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + return + } + } +} + +func ExamplePacketConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.ParseIP("ff02::5")} + if err := p.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 16) // fake ospf header, you need to implement this + ospf[0] = 3 // version 3 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + if err := p.SetChecksum(true, 12); err != nil { + log.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: 0xc0, // DSCP CS6 + HopLimit: 1, + IfIndex: en0.Index, + } + if _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go new file mode 100644 index 0000000..5885664 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", + parseICMPv6Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "package ipv6\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv6Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv6Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigName) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv6Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv6ParamRecord struct { + OrigName string + Name string + Value int +} + +func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Name, "Reserved") || + strings.Contains(pr.Name, "Unassigned") || + strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "Experiment") || + strings.Contains(pr.Name, "experiment") { + continue + } + ss := strings.Split(pr.Name, "\n") + if len(ss) > 1 { + prs[i].Name = strings.Join(ss, " ") + } else { + prs[i].Name = ss[0] + } + s := strings.TrimSpace(prs[i].Name) + prs[i].OrigName = s + prs[i].Name = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go new file mode 100644 index 0000000..e9dbc2e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "syscall" + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 0000000..e05cb08 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/header_test.go b/vendor/golang.org/x/net/ipv6/header_test.go new file mode 100644 index 0000000..ca11dc2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header_test.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "strings" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv6" +) + +var ( + wireHeaderFromKernel = [ipv6.HeaderLen]byte{ + 0x69, 0x8b, 0xee, 0xf1, + 0xca, 0xfe, 0x2c, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + } + + testHeader = &ipv6.Header{ + Version: ipv6.Version, + TrafficClass: iana.DiffServAF43, + FlowLabel: 0xbeef1, + PayloadLen: 0xcafe, + NextHeader: iana.ProtocolIPv6Frag, + HopLimit: 1, + Src: net.ParseIP("2001:db8:1::1"), + Dst: net.ParseIP("2001:db8:2::1"), + } +) + +func TestParseHeader(t *testing.T) { + h, err := ipv6.ParseHeader(wireHeaderFromKernel[:]) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, testHeader) { + t.Fatalf("got %#v; want %#v", h, testHeader) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 0000000..2597401 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 0000000..32db1aa --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,86 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message + ICMPTypeExtendedEchoRequest ICMPType = 160 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 161 // Extended Echo Reply +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", + 160: "extended echo request", + 161: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 0000000..b7f48e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + icmpv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 0000000..e1a791d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 0000000..647f6b4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 0000000..7c23bb1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 0000000..c4b9be6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +type icmpv6Filter struct { +} + +func (f *icmpv6Filter) accept(typ ICMPType) { +} + +func (f *icmpv6Filter) block(typ ICMPType) { +} + +func (f *icmpv6Filter) setAll(block bool) { +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_test.go b/vendor/golang.org/x/net/ipv6/icmp_test.go new file mode 100644 index 0000000..d8e9675 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_test.go @@ -0,0 +1,96 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var icmpStringTests = []struct { + in ipv6.ICMPType + out string +}{ + {ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv6.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv6.ICMPType{ + ipv6.ICMPTypeDestinationUnreachable, + ipv6.ICMPTypeEchoReply, + ipv6.ICMPTypeNeighborSolicitation, + ipv6.ICMPTypeDuplicateAddressConfirmation, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoRequest) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 0000000..443cd07 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go new file mode 100644 index 0000000..6efe56c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "testing" +) + +func connector(t *testing.T, network, addr string, done chan<- bool) { + defer func() { done <- true }() + + c, err := net.Dial(network, addr) + if err != nil { + t.Error(err) + return + } + c.Close() +} + +func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { + defer func() { done <- true }() + + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Close() +} diff --git a/vendor/golang.org/x/net/ipv6/multicast_test.go b/vendor/golang.org/x/net/ipv6/multicast_test.go new file mode 100644 index 0000000..69a21cd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicast_test.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {"[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp6", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatal(err) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP) + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + // Solaris never allows to + // modify ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } + } + } else { + psh = pshicmp + // Some platforms never allow to + // disable the kernel checksum + // processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go new file mode 100644 index 0000000..b27713e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go @@ -0,0 +1,261 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.ParseIP("ff02::114")}, // see RFC 4727 + &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}, + &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp6", "[::]:0") // wildcard address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp6", "[ff02::]:0") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp6", net.JoinHostPort("ff02::", port)) // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv6.PacketConn + ps[0] = ipv6.NewPacketConn(c1) + ps[1] = ipv6.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + port := "0" + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp6", net.JoinHostPort(ip.String()+"%"+ifi.Name, port)) // unicast address with non-reusable port + if err != nil { + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue + } + defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip6:ipv6-icmp", ip.String()+"%"+ifi.Name) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go new file mode 100644 index 0000000..9e6b902 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -0,0 +1,157 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp6", "", "[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff02::115")}, nil}, // see RFC 4727 + + {"udp6", "", "[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff30::8000:2")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +type testIPv6MulticastConn interface { + MulticastHopLimit() (int, error) + SetMulticastHopLimit(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) { + const hoplim = 255 + if err := c.SetMulticastHopLimit(hoplim); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastHopLimit(); err != nil { + t.Error(err) + return + } else if v != hoplim { + t.Errorf("got %v; want %v", v, hoplim) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 0000000..a8197f1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 0000000..4ee4b06 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,35 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go new file mode 100644 index 0000000..fdc6c39 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go new file mode 100644 index 0000000..8f6d02e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -0,0 +1,57 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 0000000..99a4354 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,41 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go new file mode 100644 index 0000000..c11d92a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go @@ -0,0 +1,242 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go new file mode 100644 index 0000000..e2fd733 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go @@ -0,0 +1,373 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv6.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv6.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv6.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + b := ms[0].Buffers[0][:ms[0].N] + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv6.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_test.go b/vendor/golang.org/x/net/ipv6/readwrite_test.go new file mode 100644 index 0000000..206b915 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_test.go @@ -0,0 +1,148 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + dst := c.LocalAddr() + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv6UDP", func(b *testing.B) { + p := ipv6.NewPacketConn(c) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %d; want %d", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 0000000..cc3907d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 0000000..0eac86e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errOpNoSupport + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 0000000..1f4a273 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_test.go b/vendor/golang.org/x/net/ipv6/sockopt_test.go new file mode 100644 index 0000000..774338d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_test.go @@ -0,0 +1,133 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var supportsIPv6 bool = nettest.SupportsIPv6() + +func TestConnInitiatorPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestConnResponderPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go connector(t, "tcp6", ln.Addr().String(), done) + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestPacketConnChecksum(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + offset := 12 // see RFC 5340 + + for _, toggle := range []bool{false, true} { + if err := p.SetChecksum(toggle, offset); err != nil { + if toggle { + t.Fatalf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } else { + // Some platforms never allow to disable the kernel + // checksum processing. + t.Logf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } + } + if on, offset, err := p.Checksum(); err != nil { + t.Fatal(err) + } else { + t.Logf("kernel checksum processing enabled=%v, offset=%v", on, offset) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 0000000..b0510c0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 0000000..eece961 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 0000000..b2dbcb2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 0000000..676bea5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 0000000..e416eaa --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 0000000..e3d0443 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,106 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292HOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292PKTINFO, Len: 4}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass} + ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit} + ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo} + ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop} + ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU} + sockOpts[ssoTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}} + sockOpts[ssoReceiveTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}} + sockOpts[ssoReceiveHopLimit] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}} + sockOpts[ssoReceivePacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}} + sockOpts[ssoReceivePathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}} + sockOpts[ssoPathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 0000000..e9349dc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 0000000..bc21810 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 0000000..d348b5f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 0000000..add8ccc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 0000000..581ee49 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 0000000..b845388 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 0000000..fc36b01 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sizeofSockaddrInet6 = 0x1c + + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 +) + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/unicast_test.go b/vendor/golang.org/x/net/ipv6/unicast_test.go new file mode 100644 index 0000000..a0b7d95 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicast_test.go @@ -0,0 +1,184 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveIPAddr("ip6", "::1") + if err != nil { + t.Fatal(err) + } + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + // Solaris never allows to modify + // ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } + } + } else { + psh = pshicmp + // Some platforms never allow to disable the + // kernel checksum processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go new file mode 100644 index 0000000..e175dcc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewConn(c)) + + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp6", "", "[::1]:0"}, + {"ip6", ":ipv6-icmp", "::1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewPacketConn(c)) + } +} + +type testIPv6UnicastConn interface { + TrafficClass() (int, error) + SetTrafficClass(int) error + HopLimit() (int, error) + SetHopLimit(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) { + tclass := iana.DiffServCS0 | iana.NotECNTransport + if err := c.SetTrafficClass(tclass); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_TCLASS option + t.Logf("not supported on %s", runtime.GOOS) + goto next + } + t.Fatal(err) + } + if v, err := c.TrafficClass(); err != nil { + t.Fatal(err) + } else if v != tclass { + t.Fatalf("got %v; want %v", v, tclass) + } + +next: + hoplim := 255 + if err := c.SetHopLimit(hoplim); err != nil { + t.Fatal(err) + } + if v, err := c.HopLimit(); err != nil { + t.Fatal(err) + } else if v != hoplim { + t.Fatalf("got %v; want %v", v, hoplim) + } +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 0000000..6aab1df --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 0000000..d2de804 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,88 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 0000000..919e572 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 0000000..cb8141f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 0000000..cb8141f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 0000000..c9bf6a8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 0000000..bcada13 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 0000000..86cf3c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 0000000..cf1837d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 0000000..20f2b89 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex_test.go b/vendor/golang.org/x/net/lex/httplex/httplex_test.go new file mode 100644 index 0000000..f47adc9 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex_test.go @@ -0,0 +1,119 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httplex + +import ( + "testing" +) + +func isChar(c rune) bool { return c <= 127 } + +func isCtl(c rune) bool { return c <= 31 || c == 127 } + +func isSeparator(c rune) bool { + switch c { + case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': + return true + } + return false +} + +func TestIsToken(t *testing.T) { + for i := 0; i <= 130; i++ { + r := rune(i) + expected := isChar(r) && !isCtl(r) && !isSeparator(r) + if IsTokenRune(r) != expected { + t.Errorf("isToken(0x%x) = %v", r, !expected) + } + } +} + +func TestHeaderValuesContainsToken(t *testing.T) { + tests := []struct { + vals []string + token string + want bool + }{ + { + vals: []string{"foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"bar", "foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo"}, + token: "bar", + want: false, + }, + { + vals: []string{" foo "}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar,foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo ,bar "}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar, foo ,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + } + for _, tt := range tests { + got := HeaderValuesContainsToken(tt.vals, tt.token) + if got != tt.want { + t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want) + } + } +} + +func TestPunycodeHostPort(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.google.com", "www.google.com"}, + {"гофер.рф", "xn--c1ae0ajs.xn--p1ai"}, + {"bücher.de", "xn--bcher-kva.de"}, + {"bücher.de:8080", "xn--bcher-kva.de:8080"}, + {"[1::6]:8080", "[1::6]:8080"}, + } + for _, tt := range tests { + got, err := PunycodeHostPort(tt.in) + if tt.want != got || err != nil { + t.Errorf("PunycodeHostPort(%q) = %q, %v, want %q, nil", tt.in, got, err, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/lif/address.go b/vendor/golang.org/x/net/lif/address.go new file mode 100644 index 0000000..afb957f --- /dev/null +++ b/vendor/golang.org/x/net/lif/address.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "errors" + "unsafe" +) + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address + PrefixLen int // address prefix length +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + PrefixLen int // address prefix length + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +// Addrs returns a list of interface addresses. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Addrs(af int, name string) ([]Addr, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + lls, err := links(eps, name) + if len(lls) == 0 { + return nil, err + } + var as []Addr + for _, ll := range lls { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + for _, ep := range eps { + ioc := int64(sysSIOCGLIFADDR) + err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifr)) + if err != nil { + continue + } + sa := (*sockaddrStorage)(unsafe.Pointer(&lifr.Lifru[0])) + l := int(nativeEndian.Uint32(lifr.Lifru1[:4])) + if l == 0 { + continue + } + switch sa.Family { + case sysAF_INET: + a := &Inet4Addr{PrefixLen: l} + copy(a.IP[:], lifr.Lifru[4:8]) + as = append(as, a) + case sysAF_INET6: + a := &Inet6Addr{PrefixLen: l, ZoneID: int(nativeEndian.Uint32(lifr.Lifru[24:28]))} + copy(a.IP[:], lifr.Lifru[8:24]) + as = append(as, a) + } + } + } + return as, nil +} + +func parseLinkAddr(b []byte) ([]byte, error) { + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + l := 4 + nlen + alen + slen + if len(b) < l { + return nil, errors.New("invalid address") + } + b = b[4:] + var addr []byte + if nlen > 0 { + b = b[nlen:] + } + if alen > 0 { + addr = make([]byte, alen) + copy(addr, b[:alen]) + } + return addr, nil +} diff --git a/vendor/golang.org/x/net/lif/address_test.go b/vendor/golang.org/x/net/lif/address_test.go new file mode 100644 index 0000000..a25f10b --- /dev/null +++ b/vendor/golang.org/x/net/lif/address_test.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%s %s %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%s %s %d %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen, a.ZoneID) +} + +type addrPack struct { + af int + as []Addr +} + +func addrPacks() ([]addrPack, error) { + var lastErr error + var aps []addrPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + as, err := Addrs(af, "") + if err != nil { + lastErr = err + continue + } + aps = append(aps, addrPack{af: af, as: as}) + } + return aps, lastErr +} + +func TestAddrs(t *testing.T) { + aps, err := addrPacks() + if len(aps) == 0 && err != nil { + t.Fatal(err) + } + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, ll := range lp.lls { + as, err := Addrs(lp.af, ll.Name) + if err != nil { + t.Fatal(lp.af, ll.Name, err) + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), ll.Name, as) + n += len(as) + } + for _, ap := range aps { + if ap.af != lp.af { + continue + } + if n != len(ap.as) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(ap.as)) + continue + } + } + } +} diff --git a/vendor/golang.org/x/net/lif/binary.go b/vendor/golang.org/x/net/lif/binary.go new file mode 100644 index 0000000..738a94f --- /dev/null +++ b/vendor/golang.org/x/net/lif/binary.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +// This file contains duplicates of encoding/binary package. +// +// This package is supposed to be used by the net package of standard +// library. Therefore the package set used in the package must be the +// same as net package. + +var ( + littleEndian binaryLittleEndian + bigEndian binaryBigEndian +) + +type binaryByteOrder interface { + Uint16([]byte) uint16 + Uint32([]byte) uint32 + Uint64([]byte) uint64 + PutUint16([]byte, uint16) + PutUint32([]byte, uint32) + PutUint64([]byte, uint64) +} + +type binaryLittleEndian struct{} + +func (binaryLittleEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[0]) | uint16(b[1])<<8 +} + +func (binaryLittleEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (binaryLittleEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func (binaryBigEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} diff --git a/vendor/golang.org/x/net/lif/defs_solaris.go b/vendor/golang.org/x/net/lif/defs_solaris.go new file mode 100644 index 0000000..02c1998 --- /dev/null +++ b/vendor/golang.org/x/net/lif/defs_solaris.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package lif + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_DGRAM = C.SOCK_DGRAM +) + +type sockaddrStorage C.struct_sockaddr_storage + +const ( + sysLIFC_NOXMIT = C.LIFC_NOXMIT + sysLIFC_EXTERNAL_SOURCE = C.LIFC_EXTERNAL_SOURCE + sysLIFC_TEMPORARY = C.LIFC_TEMPORARY + sysLIFC_ALLZONES = C.LIFC_ALLZONES + sysLIFC_UNDER_IPMP = C.LIFC_UNDER_IPMP + sysLIFC_ENABLED = C.LIFC_ENABLED + + sysSIOCGLIFADDR = C.SIOCGLIFADDR + sysSIOCGLIFDSTADDR = C.SIOCGLIFDSTADDR + sysSIOCGLIFFLAGS = C.SIOCGLIFFLAGS + sysSIOCGLIFMTU = C.SIOCGLIFMTU + sysSIOCGLIFNETMASK = C.SIOCGLIFNETMASK + sysSIOCGLIFMETRIC = C.SIOCGLIFMETRIC + sysSIOCGLIFNUM = C.SIOCGLIFNUM + sysSIOCGLIFINDEX = C.SIOCGLIFINDEX + sysSIOCGLIFSUBNET = C.SIOCGLIFSUBNET + sysSIOCGLIFLNKINFO = C.SIOCGLIFLNKINFO + sysSIOCGLIFCONF = C.SIOCGLIFCONF + sysSIOCGLIFHWADDR = C.SIOCGLIFHWADDR +) + +const ( + sysIFF_UP = C.IFF_UP + sysIFF_BROADCAST = C.IFF_BROADCAST + sysIFF_DEBUG = C.IFF_DEBUG + sysIFF_LOOPBACK = C.IFF_LOOPBACK + sysIFF_POINTOPOINT = C.IFF_POINTOPOINT + sysIFF_NOTRAILERS = C.IFF_NOTRAILERS + sysIFF_RUNNING = C.IFF_RUNNING + sysIFF_NOARP = C.IFF_NOARP + sysIFF_PROMISC = C.IFF_PROMISC + sysIFF_ALLMULTI = C.IFF_ALLMULTI + sysIFF_INTELLIGENT = C.IFF_INTELLIGENT + sysIFF_MULTICAST = C.IFF_MULTICAST + sysIFF_MULTI_BCAST = C.IFF_MULTI_BCAST + sysIFF_UNNUMBERED = C.IFF_UNNUMBERED + sysIFF_PRIVATE = C.IFF_PRIVATE +) + +const ( + sizeofLifnum = C.sizeof_struct_lifnum + sizeofLifreq = C.sizeof_struct_lifreq + sizeofLifconf = C.sizeof_struct_lifconf + sizeofLifIfinfoReq = C.sizeof_struct_lif_ifinfo_req +) + +type lifnum C.struct_lifnum + +type lifreq C.struct_lifreq + +type lifconf C.struct_lifconf + +type lifIfinfoReq C.struct_lif_ifinfo_req + +const ( + sysIFT_IPV4 = C.IFT_IPV4 + sysIFT_IPV6 = C.IFT_IPV6 + sysIFT_6TO4 = C.IFT_6TO4 +) diff --git a/vendor/golang.org/x/net/lif/lif.go b/vendor/golang.org/x/net/lif/lif.go new file mode 100644 index 0000000..6e81f81 --- /dev/null +++ b/vendor/golang.org/x/net/lif/lif.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +// Package lif provides basic functions for the manipulation of +// logical network interfaces and interface addresses on Solaris. +// +// The package supports Solaris 11 or above. +package lif + +import "syscall" + +type endpoint struct { + af int + s uintptr +} + +func (ep *endpoint) close() error { + return syscall.Close(int(ep.s)) +} + +func newEndpoints(af int) ([]endpoint, error) { + var lastErr error + var eps []endpoint + afs := []int{sysAF_INET, sysAF_INET6} + if af != sysAF_UNSPEC { + afs = []int{af} + } + for _, af := range afs { + s, err := syscall.Socket(af, sysSOCK_DGRAM, 0) + if err != nil { + lastErr = err + continue + } + eps = append(eps, endpoint{af: af, s: uintptr(s)}) + } + if len(eps) == 0 { + return nil, lastErr + } + return eps, nil +} diff --git a/vendor/golang.org/x/net/lif/link.go b/vendor/golang.org/x/net/lif/link.go new file mode 100644 index 0000000..913a53e --- /dev/null +++ b/vendor/golang.org/x/net/lif/link.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +// A Link represents logical data link information. +// +// It also represents base information for logical network interface. +// On Solaris, each logical network interface represents network layer +// adjacency information and the interface has a only single network +// address or address pair for tunneling. It's usual that multiple +// logical network interfaces share the same logical data link. +type Link struct { + Name string // name, equivalent to IP interface name + Index int // index, equivalent to IP interface index + Type int // type + Flags int // flags + MTU int // maximum transmission unit, basically link MTU but may differ between IP address families + Addr []byte // address +} + +func (ll *Link) fetch(s uintptr) { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + ioc := int64(sysSIOCGLIFINDEX) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Index = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + ioc = int64(sysSIOCGLIFFLAGS) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Flags = int(nativeEndian.Uint64(lifr.Lifru[:8])) + } + ioc = int64(sysSIOCGLIFMTU) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.MTU = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + switch ll.Type { + case sysIFT_IPV4, sysIFT_IPV6, sysIFT_6TO4: + default: + ioc = int64(sysSIOCGLIFHWADDR) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Addr, _ = parseLinkAddr(lifr.Lifru[4:]) + } + } +} + +// Links returns a list of logical data links. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Links(af int, name string) ([]Link, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + return links(eps, name) +} + +func links(eps []endpoint, name string) ([]Link, error) { + var lls []Link + lifn := lifnum{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + lifc := lifconf{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + for _, ep := range eps { + lifn.Family = uint16(ep.af) + ioc := int64(sysSIOCGLIFNUM) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifn)); err != nil { + continue + } + if lifn.Count == 0 { + continue + } + b := make([]byte, lifn.Count*sizeofLifreq) + lifc.Family = uint16(ep.af) + lifc.Len = lifn.Count * sizeofLifreq + if len(lifc.Lifcu) == 8 { + nativeEndian.PutUint64(lifc.Lifcu[:], uint64(uintptr(unsafe.Pointer(&b[0])))) + } else { + nativeEndian.PutUint32(lifc.Lifcu[:], uint32(uintptr(unsafe.Pointer(&b[0])))) + } + ioc = int64(sysSIOCGLIFCONF) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifc)); err != nil { + continue + } + nb := make([]byte, 32) // see LIFNAMSIZ in net/if.h + for i := 0; i < int(lifn.Count); i++ { + lifr := (*lifreq)(unsafe.Pointer(&b[i*sizeofLifreq])) + for i := 0; i < 32; i++ { + if lifr.Name[i] == 0 { + nb = nb[:i] + break + } + nb[i] = byte(lifr.Name[i]) + } + llname := string(nb) + nb = nb[:32] + if isDupLink(lls, llname) || name != "" && name != llname { + continue + } + ll := Link{Name: llname, Type: int(lifr.Type)} + ll.fetch(ep.s) + lls = append(lls, ll) + } + } + return lls, nil +} + +func isDupLink(lls []Link, name string) bool { + for _, ll := range lls { + if ll.Name == name { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/lif/link_test.go b/vendor/golang.org/x/net/lif/link_test.go new file mode 100644 index 0000000..0cb9b95 --- /dev/null +++ b/vendor/golang.org/x/net/lif/link_test.go @@ -0,0 +1,63 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +func (ll *Link) String() string { + return fmt.Sprintf("name=%s index=%d type=%d flags=%#x mtu=%d addr=%v", ll.Name, ll.Index, ll.Type, ll.Flags, ll.MTU, llAddr(ll.Addr)) +} + +type linkPack struct { + af int + lls []Link +} + +func linkPacks() ([]linkPack, error) { + var lastErr error + var lps []linkPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + lls, err := Links(af, "") + if err != nil { + lastErr = err + continue + } + lps = append(lps, linkPack{af: af, lls: lls}) + } + return lps, lastErr +} + +func TestLinks(t *testing.T) { + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, sll := range lp.lls { + lls, err := Links(lp.af, sll.Name) + if err != nil { + t.Fatal(lp.af, sll.Name, err) + } + for _, ll := range lls { + if ll.Name != sll.Name || ll.Index != sll.Index { + t.Errorf("af=%s got %v; want %v", addrFamily(lp.af), &ll, &sll) + continue + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), sll.Name, &ll) + n++ + } + } + if n != len(lp.lls) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(lp.lls)) + continue + } + } +} diff --git a/vendor/golang.org/x/net/lif/sys.go b/vendor/golang.org/x/net/lif/sys.go new file mode 100644 index 0000000..c896041 --- /dev/null +++ b/vendor/golang.org/x/net/lif/sys.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +var nativeEndian binaryByteOrder + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } +} diff --git a/vendor/golang.org/x/net/lif/sys_solaris_amd64.s b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s new file mode 100644 index 0000000..39d76af --- /dev/null +++ b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s @@ -0,0 +1,8 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) diff --git a/vendor/golang.org/x/net/lif/syscall.go b/vendor/golang.org/x/net/lif/syscall.go new file mode 100644 index 0000000..aadab2e --- /dev/null +++ b/vendor/golang.org/x/net/lif/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +//go:linkname procIoctl libc_ioctl + +var procIoctl uintptr + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func ioctl(s, ioc uintptr, arg unsafe.Pointer) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go new file mode 100644 index 0000000..b5e999b --- /dev/null +++ b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go @@ -0,0 +1,103 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package lif + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_DGRAM = 0x1 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +const ( + sysLIFC_NOXMIT = 0x1 + sysLIFC_EXTERNAL_SOURCE = 0x2 + sysLIFC_TEMPORARY = 0x4 + sysLIFC_ALLZONES = 0x8 + sysLIFC_UNDER_IPMP = 0x10 + sysLIFC_ENABLED = 0x20 + + sysSIOCGLIFADDR = -0x3f87968f + sysSIOCGLIFDSTADDR = -0x3f87968d + sysSIOCGLIFFLAGS = -0x3f87968b + sysSIOCGLIFMTU = -0x3f879686 + sysSIOCGLIFNETMASK = -0x3f879683 + sysSIOCGLIFMETRIC = -0x3f879681 + sysSIOCGLIFNUM = -0x3ff3967e + sysSIOCGLIFINDEX = -0x3f87967b + sysSIOCGLIFSUBNET = -0x3f879676 + sysSIOCGLIFLNKINFO = -0x3f879674 + sysSIOCGLIFCONF = -0x3fef965b + sysSIOCGLIFHWADDR = -0x3f879640 +) + +const ( + sysIFF_UP = 0x1 + sysIFF_BROADCAST = 0x2 + sysIFF_DEBUG = 0x4 + sysIFF_LOOPBACK = 0x8 + sysIFF_POINTOPOINT = 0x10 + sysIFF_NOTRAILERS = 0x20 + sysIFF_RUNNING = 0x40 + sysIFF_NOARP = 0x80 + sysIFF_PROMISC = 0x100 + sysIFF_ALLMULTI = 0x200 + sysIFF_INTELLIGENT = 0x400 + sysIFF_MULTICAST = 0x800 + sysIFF_MULTI_BCAST = 0x1000 + sysIFF_UNNUMBERED = 0x2000 + sysIFF_PRIVATE = 0x8000 +) + +const ( + sizeofLifnum = 0xc + sizeofLifreq = 0x178 + sizeofLifconf = 0x18 + sizeofLifIfinfoReq = 0x10 +) + +type lifnum struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Count int32 +} + +type lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} + +type lifconf struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Len int32 + Pad_cgo_1 [4]byte + Lifcu [8]byte +} + +type lifIfinfoReq struct { + Maxhops uint8 + Pad_cgo_0 [3]byte + Reachtime uint32 + Reachretrans uint32 + Maxmtu uint32 +} + +const ( + sysIFT_IPV4 = 0xc8 + sysIFT_IPV6 = 0xc9 + sysIFT_6TO4 = 0xca +) diff --git a/vendor/golang.org/x/net/nettest/conntest.go b/vendor/golang.org/x/net/nettest/conntest.go new file mode 100644 index 0000000..5bd3a8c --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest.go @@ -0,0 +1,456 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" + "math/rand" + "net" + "runtime" + "sync" + "testing" + "time" +) + +var ( + aLongTimeAgo = time.Unix(233431200, 0) + neverTimeout = time.Time{} +) + +// MakePipe creates a connection between two endpoints and returns the pair +// as c1 and c2, such that anything written to c1 is read by c2 and vice-versa. +// The stop function closes all resources, including c1, c2, and the underlying +// net.Listener (if there is one), and should not be nil. +type MakePipe func() (c1, c2 net.Conn, stop func(), err error) + +// TestConn tests that a net.Conn implementation properly satisfies the interface. +// The tests should not produce any false positives, but may experience +// false negatives. Thus, some issues may only be detected when the test is +// run multiple times. For maximal effectiveness, run the tests under the +// race detector. +func TestConn(t *testing.T, mp MakePipe) { + testConn(t, mp) +} + +type connTester func(t *testing.T, c1, c2 net.Conn) + +func timeoutWrapper(t *testing.T, mp MakePipe, f connTester) { + c1, c2, stop, err := mp() + if err != nil { + t.Fatalf("unable to make pipe: %v", err) + } + var once sync.Once + defer once.Do(func() { stop() }) + timer := time.AfterFunc(time.Minute, func() { + once.Do(func() { + t.Error("test timed out; terminating pipe") + stop() + }) + }) + defer timer.Stop() + f(t, c1, c2) +} + +// testBasicIO tests that the data sent on c1 is properly received on c2. +func testBasicIO(t *testing.T, c1, c2 net.Conn) { + want := make([]byte, 1<<20) + rand.New(rand.NewSource(0)).Read(want) + + dataCh := make(chan []byte) + go func() { + rd := bytes.NewReader(want) + if err := chunkedCopy(c1, rd); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } + if err := c1.Close(); err != nil { + t.Errorf("unexpected c1.Close error: %v", err) + } + }() + + go func() { + wr := new(bytes.Buffer) + if err := chunkedCopy(wr, c2); err != nil { + t.Errorf("unexpected c2.Read error: %v", err) + } + if err := c2.Close(); err != nil { + t.Errorf("unexpected c2.Close error: %v", err) + } + dataCh <- wr.Bytes() + }() + + if got := <-dataCh; !bytes.Equal(got, want) { + t.Errorf("transmitted data differs") + } +} + +// testPingPong tests that the two endpoints can synchronously send data to +// each other in a typical request-response pattern. +func testPingPong(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + + pingPonger := func(c net.Conn) { + defer wg.Done() + buf := make([]byte, 8) + var prev uint64 + for { + if _, err := io.ReadFull(c, buf); err != nil { + if err == io.EOF { + break + } + t.Errorf("unexpected Read error: %v", err) + } + + v := binary.LittleEndian.Uint64(buf) + binary.LittleEndian.PutUint64(buf, v+1) + if prev != 0 && prev+2 != v { + t.Errorf("mismatching value: got %d, want %d", v, prev+2) + } + prev = v + if v == 1000 { + break + } + + if _, err := c.Write(buf); err != nil { + t.Errorf("unexpected Write error: %v", err) + break + } + } + if err := c.Close(); err != nil { + t.Errorf("unexpected Close error: %v", err) + } + } + + wg.Add(2) + go pingPonger(c1) + go pingPonger(c2) + + // Start off the chain reaction. + if _, err := c1.Write(make([]byte, 8)); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } +} + +// testRacyRead tests that it is safe to mutate the input Read buffer +// immediately after cancelation has occurred. +func testRacyRead(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Read(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testRacyWrite tests that it is safe to mutate the input Write buffer +// immediately after cancelation has occurred. +func testRacyWrite(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Write(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testReadTimeout tests that Read timeouts do not affect Write. +func testReadTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + c1.SetReadDeadline(aLongTimeAgo) + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Write(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// testWriteTimeout tests that Write timeouts do not affect Read. +func testWriteTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + c1.SetWriteDeadline(aLongTimeAgo) + _, err := c1.Write(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Read(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Read error: %v", err) + } +} + +// testPastTimeout tests that a deadline set in the past immediately times out +// Read and Write requests. +func testPastTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + testRoundtrip(t, c1) + + c1.SetDeadline(aLongTimeAgo) + n, err := c1.Write(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Write count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + n, err = c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + + testRoundtrip(t, c1) +} + +// testPresentTimeout tests that a deadline set while there are pending +// Read and Write operations immediately times out those operations. +func testPresentTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + deadlineSet := make(chan bool, 1) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + deadlineSet <- true + c1.SetReadDeadline(aLongTimeAgo) + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + n, err := c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Read timed out before deadline is set") + } + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Write timed out before deadline is set") + } + }() +} + +// testFutureTimeout tests that a future deadline will eventually time out +// Read and Write operations. +func testFutureTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + wg.Add(2) + + c1.SetDeadline(time.Now().Add(100 * time.Millisecond)) + go func() { + defer wg.Done() + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + }() + wg.Wait() + + go chunkedCopy(c2, c2) + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// testCloseTimeout tests that calling Close immediately times out pending +// Read and Write operations. +func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + // Test for cancelation upon connection closure. + c1.SetDeadline(neverTimeout) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + c1.Close() + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Read(buf) + } + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Write(buf) + } + }() +} + +// testConcurrentMethods tests that the methods of net.Conn can safely +// be called concurrently. +func testConcurrentMethods(t *testing.T, c1, c2 net.Conn) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; see https://golang.org/issue/20489") + } + go chunkedCopy(c2, c2) + + // The results of the calls may be nonsensical, but this should + // not trigger a race detector warning. + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(7) + go func() { + defer wg.Done() + c1.Read(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.Write(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.SetDeadline(time.Now().Add(10 * time.Millisecond)) + }() + go func() { + defer wg.Done() + c1.SetReadDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.LocalAddr() + }() + go func() { + defer wg.Done() + c1.RemoteAddr() + }() + } + wg.Wait() // At worst, the deadline is set 10ms into the future + + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// checkForTimeoutError checks that the error satisfies the Error interface +// and that Timeout returns true. +func checkForTimeoutError(t *testing.T, err error) { + if nerr, ok := err.(net.Error); ok { + if !nerr.Timeout() { + t.Errorf("err.Timeout() = false, want true") + } + } else { + t.Errorf("got %T, want net.Error", err) + } +} + +// testRoundtrip writes something into c and reads it back. +// It assumes that everything written into c is echoed back to itself. +func testRoundtrip(t *testing.T, c net.Conn) { + if err := c.SetDeadline(neverTimeout); err != nil { + t.Errorf("roundtrip SetDeadline error: %v", err) + } + + const s = "Hello, world!" + buf := []byte(s) + if _, err := c.Write(buf); err != nil { + t.Errorf("roundtrip Write error: %v", err) + } + if _, err := io.ReadFull(c, buf); err != nil { + t.Errorf("roundtrip Read error: %v", err) + } + if string(buf) != s { + t.Errorf("roundtrip data mismatch: got %q, want %q", buf, s) + } +} + +// resyncConn resynchronizes the connection into a sane state. +// It assumes that everything written into c is echoed back to itself. +// It assumes that 0xff is not currently on the wire or in the read buffer. +func resyncConn(t *testing.T, c net.Conn) { + c.SetDeadline(neverTimeout) + errCh := make(chan error) + go func() { + _, err := c.Write([]byte{0xff}) + errCh <- err + }() + buf := make([]byte, 1024) + for { + n, err := c.Read(buf) + if n > 0 && bytes.IndexByte(buf[:n], 0xff) == n-1 { + break + } + if err != nil { + t.Errorf("unexpected Read error: %v", err) + break + } + } + if err := <-errCh; err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// chunkedCopy copies from r to w in fixed-width chunks to avoid +// causing a Write that exceeds the maximum packet size for packet-based +// connections like "unixpacket". +// We assume that the maximum packet size is at least 1024. +func chunkedCopy(w io.Writer, r io.Reader) error { + b := make([]byte, 1024) + _, err := io.CopyBuffer(struct{ io.Writer }{w}, struct{ io.Reader }{r}, b) + return err +} diff --git a/vendor/golang.org/x/net/nettest/conntest_go16.go b/vendor/golang.org/x/net/nettest/conntest_go16.go new file mode 100644 index 0000000..4cbf48e --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_go16.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Avoid using subtests on Go 1.6 and below. + timeoutWrapper(t, mp, testBasicIO) + timeoutWrapper(t, mp, testPingPong) + timeoutWrapper(t, mp, testRacyRead) + timeoutWrapper(t, mp, testRacyWrite) + timeoutWrapper(t, mp, testReadTimeout) + timeoutWrapper(t, mp, testWriteTimeout) + timeoutWrapper(t, mp, testPastTimeout) + timeoutWrapper(t, mp, testPresentTimeout) + timeoutWrapper(t, mp, testFutureTimeout) + timeoutWrapper(t, mp, testCloseTimeout) + timeoutWrapper(t, mp, testConcurrentMethods) +} diff --git a/vendor/golang.org/x/net/nettest/conntest_go17.go b/vendor/golang.org/x/net/nettest/conntest_go17.go new file mode 100644 index 0000000..fa039f0 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_go17.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Use subtests on Go 1.7 and above since it is better organized. + t.Run("BasicIO", func(t *testing.T) { timeoutWrapper(t, mp, testBasicIO) }) + t.Run("PingPong", func(t *testing.T) { timeoutWrapper(t, mp, testPingPong) }) + t.Run("RacyRead", func(t *testing.T) { timeoutWrapper(t, mp, testRacyRead) }) + t.Run("RacyWrite", func(t *testing.T) { timeoutWrapper(t, mp, testRacyWrite) }) + t.Run("ReadTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testReadTimeout) }) + t.Run("WriteTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testWriteTimeout) }) + t.Run("PastTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPastTimeout) }) + t.Run("PresentTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPresentTimeout) }) + t.Run("FutureTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testFutureTimeout) }) + t.Run("CloseTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testCloseTimeout) }) + t.Run("ConcurrentMethods", func(t *testing.T) { timeoutWrapper(t, mp, testConcurrentMethods) }) +} diff --git a/vendor/golang.org/x/net/nettest/conntest_test.go b/vendor/golang.org/x/net/nettest/conntest_test.go new file mode 100644 index 0000000..9f9453f --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_test.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package nettest + +import ( + "net" + "os" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" +) + +func TestTestConn(t *testing.T) { + tests := []struct{ name, network string }{ + {"TCP", "tcp"}, + {"UnixPipe", "unix"}, + {"UnixPacketPipe", "unixpacket"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if !nettest.TestableNetwork(tt.network) { + t.Skipf("not supported on %s", runtime.GOOS) + } + + mp := func() (c1, c2 net.Conn, stop func(), err error) { + ln, err := nettest.NewLocalListener(tt.network) + if err != nil { + return nil, nil, nil, err + } + + // Start a connection between two endpoints. + var err1, err2 error + done := make(chan bool) + go func() { + c2, err2 = ln.Accept() + close(done) + }() + c1, err1 = net.Dial(ln.Addr().Network(), ln.Addr().String()) + <-done + + stop = func() { + if err1 == nil { + c1.Close() + } + if err2 == nil { + c2.Close() + } + ln.Close() + switch tt.network { + case "unix", "unixpacket": + os.Remove(ln.Addr().String()) + } + } + + switch { + case err1 != nil: + stop() + return nil, nil, nil, err1 + case err2 != nil: + stop() + return nil, nil, nil, err2 + default: + return c1, c2, stop, nil + } + } + + TestConn(t, mp) + }) + } +} diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go new file mode 100644 index 0000000..cee46e3 --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package netutil provides network utility functions, complementing the more +// common ones in the net package. +package netutil // import "golang.org/x/net/netutil" + +import ( + "net" + "sync" +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{ + Listener: l, + sem: make(chan struct{}, n), + done: make(chan struct{}), + } +} + +type limitListener struct { + net.Listener + sem chan struct{} + closeOnce sync.Once // ensures the done chan is only closed once + done chan struct{} // no values sent; closed when Close is called +} + +// acquire acquires the limiting semaphore. Returns true if successfully +// accquired, false if the listener is closed and the semaphore is not +// acquired. +func (l *limitListener) acquire() bool { + select { + case <-l.done: + return false + case l.sem <- struct{}{}: + return true + } +} +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + acquired := l.acquire() + // If the semaphore isn't acquired because the listener was closed, expect + // that this call to accept won't block, but immediately return an error. + c, err := l.Listener.Accept() + if err != nil { + if acquired { + l.release() + } + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +func (l *limitListener) Close() error { + err := l.Listener.Close() + l.closeOnce.Do(func() { close(l.done) }) + return err +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/vendor/golang.org/x/net/netutil/listen_test.go b/vendor/golang.org/x/net/netutil/listen_test.go new file mode 100644 index 0000000..f40c9aa --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen_test.go @@ -0,0 +1,147 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package netutil + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/internal/nettest" +) + +func TestLimitListener(t *testing.T) { + const max = 5 + attempts := (nettest.MaxOpenFiles() - max) / 2 + if attempts > 256 { // maximum length of accept queue is 128 by default + attempts = 256 + } + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + l = LimitListener(l, max) + + var open int32 + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > max { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + })) + + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + t.Log(err) + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }() + } + wg.Wait() + + // We expect some Gets to fail as the kernel's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} + +type errorListener struct { + net.Listener +} + +func (errorListener) Accept() (net.Conn, error) { + return nil, errFake +} + +var errFake = errors.New("fake error from errorListener") + +// This used to hang. +func TestLimitListenerError(t *testing.T) { + donec := make(chan bool, 1) + go func() { + const n = 2 + ll := LimitListener(errorListener{}, n) + for i := 0; i < n+1; i++ { + _, err := ll.Accept() + if err != errFake { + t.Fatalf("Accept error = %v; want errFake", err) + } + } + donec <- true + }() + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout. deadlock?") + } +} + +func TestLimitListenerClose(t *testing.T) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + ln = LimitListener(ln, 1) + + doneCh := make(chan struct{}) + defer close(doneCh) + go func() { + c, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + <-doneCh + }() + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + defer c.Close() + + acceptDone := make(chan struct{}) + go func() { + c, err := ln.Accept() + if err == nil { + c.Close() + t.Errorf("Unexpected successful Accept()") + } + close(acceptDone) + }() + + // Wait a tiny bit to ensure the Accept() is blocking. + time.Sleep(10 * time.Millisecond) + ln.Close() + + select { + case <-acceptDone: + case <-time.After(5 * time.Second): + t.Fatalf("Accept() still blocking") + } +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 0000000..4c5ad88 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 0000000..0689bb6 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go new file mode 100644 index 0000000..a7d8095 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host_test.go @@ -0,0 +1,55 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "net" + "reflect" + "testing" +) + +type recordingProxy struct { + addrs []string +} + +func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { + r.addrs = append(r.addrs, addr) + return nil, errors.New("recordingProxy") +} + +func TestPerHost(t *testing.T) { + var def, bypass recordingProxy + perHost := NewPerHost(&def, &bypass) + perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") + + expectedDef := []string{ + "example.com:123", + "1.2.3.4:123", + "[1001::]:123", + } + expectedBypass := []string{ + "localhost:123", + "zone:123", + "foo.zone:123", + "127.0.0.1:123", + "10.1.2.3:123", + "[1000::]:123", + } + + for _, addr := range expectedDef { + perHost.Dial("tcp", addr) + } + for _, addr := range expectedBypass { + perHost.Dial("tcp", addr) + } + + if !reflect.DeepEqual(expectedDef, def.addrs) { + t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) + } + if !reflect.DeepEqual(expectedBypass, bypass.addrs) { + t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) + } +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 0000000..553ead7 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,134 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go new file mode 100644 index 0000000..0be1b42 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy_test.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "bytes" + "fmt" + "net/url" + "os" + "strings" + "testing" + + "golang.org/x/net/internal/sockstest" +) + +type proxyFromEnvTest struct { + allProxyEnv string + noProxyEnv string + wantTypeOf Dialer +} + +func (t proxyFromEnvTest) String() string { + var buf bytes.Buffer + space := func() { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + } + if t.allProxyEnv != "" { + fmt.Fprintf(&buf, "all_proxy=%q", t.allProxyEnv) + } + if t.noProxyEnv != "" { + space() + fmt.Fprintf(&buf, "no_proxy=%q", t.noProxyEnv) + } + return strings.TrimSpace(buf.String()) +} + +func TestFromEnvironment(t *testing.T) { + ResetProxyEnv() + + type dummyDialer struct { + direct + } + + RegisterDialerType("irc", func(_ *url.URL, _ Dialer) (Dialer, error) { + return dummyDialer{}, nil + }) + + proxyFromEnvTests := []proxyFromEnvTest{ + {allProxyEnv: "127.0.0.1:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "ftp://example.com:8000", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "socks5://example.com:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: &PerHost{}}, + {allProxyEnv: "irc://example.com:8000", wantTypeOf: dummyDialer{}}, + {noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {wantTypeOf: direct{}}, + } + + for _, tt := range proxyFromEnvTests { + os.Setenv("ALL_PROXY", tt.allProxyEnv) + os.Setenv("NO_PROXY", tt.noProxyEnv) + ResetCachedEnvironment() + + d := FromEnvironment() + if got, want := fmt.Sprintf("%T", d), fmt.Sprintf("%T", tt.wantTypeOf); got != want { + t.Errorf("%v: got type = %T, want %T", tt, d, tt.wantTypeOf) + } + } +} + +func TestFromURL(t *testing.T) { + ss, err := sockstest.NewServer(sockstest.NoAuthRequired, sockstest.NoProxyRequired) + if err != nil { + t.Fatal(err) + } + defer ss.Close() + url, err := url.Parse("socks5://user:password@" + ss.Addr().String()) + if err != nil { + t.Fatal(err) + } + proxy, err := FromURL(url, nil) + if err != nil { + t.Fatal(err) + } + c, err := proxy.Dial("tcp", "fqdn.doesnotexist:5963") + if err != nil { + t.Fatal(err) + } + c.Close() +} + +func TestSOCKS5(t *testing.T) { + ss, err := sockstest.NewServer(sockstest.NoAuthRequired, sockstest.NoProxyRequired) + if err != nil { + t.Fatal(err) + } + defer ss.Close() + proxy, err := SOCKS5("tcp", ss.Addr().String(), nil, nil) + if err != nil { + t.Fatal(err) + } + c, err := proxy.Dial("tcp", ss.TargetAddr().String()) + if err != nil { + t.Fatal(err) + } + c.Close() +} + +func ResetProxyEnv() { + for _, env := range []*envOnce{allProxyEnv, noProxyEnv} { + for _, v := range env.names { + os.Setenv(v, "") + } + } + ResetCachedEnvironment() +} + +func ResetCachedEnvironment() { + allProxyEnv.reset() + noProxyEnv.reset() +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 0000000..56345ec --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,36 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + d.ProxyDial = func(_ context.Context, network string, address string) (net.Conn, error) { + return forward.Dial(network, address) + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 0000000..f85a3c3 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,713 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 0000000..8bbf3bc --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,135 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1< len(b[j]) +} + +// eTLDPlusOneTestCases come from +// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt +var eTLDPlusOneTestCases = []struct { + domain, want string +}{ + // Empty input. + {"", ""}, + // Unlisted TLD. + {"example", ""}, + {"example.example", "example.example"}, + {"b.example.example", "example.example"}, + {"a.b.example.example", "example.example"}, + // TLD with only 1 rule. + {"biz", ""}, + {"domain.biz", "domain.biz"}, + {"b.domain.biz", "domain.biz"}, + {"a.b.domain.biz", "domain.biz"}, + // TLD with some 2-level rules. + {"com", ""}, + {"example.com", "example.com"}, + {"b.example.com", "example.com"}, + {"a.b.example.com", "example.com"}, + {"uk.com", ""}, + {"example.uk.com", "example.uk.com"}, + {"b.example.uk.com", "example.uk.com"}, + {"a.b.example.uk.com", "example.uk.com"}, + {"test.ac", "test.ac"}, + // TLD with only 1 (wildcard) rule. + {"mm", ""}, + {"c.mm", ""}, + {"b.c.mm", "b.c.mm"}, + {"a.b.c.mm", "b.c.mm"}, + // More complex TLD. + {"jp", ""}, + {"test.jp", "test.jp"}, + {"www.test.jp", "test.jp"}, + {"ac.jp", ""}, + {"test.ac.jp", "test.ac.jp"}, + {"www.test.ac.jp", "test.ac.jp"}, + {"kyoto.jp", ""}, + {"test.kyoto.jp", "test.kyoto.jp"}, + {"ide.kyoto.jp", ""}, + {"b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"c.kobe.jp", ""}, + {"b.c.kobe.jp", "b.c.kobe.jp"}, + {"a.b.c.kobe.jp", "b.c.kobe.jp"}, + {"city.kobe.jp", "city.kobe.jp"}, + {"www.city.kobe.jp", "city.kobe.jp"}, + // TLD with a wildcard rule and exceptions. + {"ck", ""}, + {"test.ck", ""}, + {"b.test.ck", "b.test.ck"}, + {"a.b.test.ck", "b.test.ck"}, + {"www.ck", "www.ck"}, + {"www.www.ck", "www.ck"}, + // US K12. + {"us", ""}, + {"test.us", "test.us"}, + {"www.test.us", "test.us"}, + {"ak.us", ""}, + {"test.ak.us", "test.ak.us"}, + {"www.test.ak.us", "test.ak.us"}, + {"k12.ak.us", ""}, + {"test.k12.ak.us", "test.k12.ak.us"}, + {"www.test.k12.ak.us", "test.k12.ak.us"}, + // Punycoded IDN labels + {"xn--85x722f.com.cn", "xn--85x722f.com.cn"}, + {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"}, + {"xn--55qx5d.cn", ""}, + {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"}, + {"xn--fiqs8s", ""}, +} + +func TestEffectiveTLDPlusOne(t *testing.T) { + for _, tc := range eTLDPlusOneTestCases { + got, _ := EffectiveTLDPlusOne(tc.domain) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) + } + } +} diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go new file mode 100644 index 0000000..a870b36 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -0,0 +1,9534 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = "publicsuffix.org's public_suffix_list.dat, git revision 0f3b07d9aab6d6c9fe74990af98316468d40f488 (2018-01-25T09:22:16Z)" + +const ( + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 +) + +// numTLD is the number of top level domains. +const numTLD = 1551 + +// Text is the combined text of all labels. +const text = "0emmafann-arboretumbriamallamaceiobihirosakikamijimatsuzaki234li" + + "ma-cityeatselinogradult3l3p0rtargets-itargivestbytomaritimekeepi" + + "ng120009guacuiababia-goracleaningroks-theatreeastcoastaldefencea" + + "tonsbergjemnes3-ap-northeast-1337bilbaogashimadachicagoboats3-we" + + "bsite-us-east-1billustrationikonanporovnopocznoppdalindesnes3-we" + + "bsite-us-west-1biobirdartcenterprisesakimobetsuitainairforcechir" + + "ealminamiechizeninohekinannestadiybirkenesoddtangenovaranzanpach" + + "igasakievennodesaarlandnpanasonicateringebuilderschmidtre-gaulda" + + "livornobirthplacebitballooningladefinimakanegasakinkobayashikaoi" + + "rminamifuranobjarkoybjerkreimbananarepublicasadelamonedatingjesd" + + "alimitediscountysvardolls3-eu-west-3utilitiesquare7bjugninomiyak" + + "onojorpelandrangedalombardiamonds3-website-us-west-2blancomedica" + + "ltanissettaipeiheijinuyamashinatsukigatakasagotpantheonsitebloom" + + "bergbauernuorochesterbloxcms5ybluedancebmoattachmentsakyotanabel" + + "lunord-aurdalvdalcesalangenirasakinvestmentsalondonetskarmoybmsa" + + "ltdalombardynamisches-dnsaludray-dnsupdaternopilawawebspacebmwed" + + "dinglassassinationalheritagebnpparibaselburgleezebnrwedeployboml" + + "oansalvadordalibabalsanagochihayaakasakawaharaholtalenvironmenta" + + "lconservationishiazainzais-a-candidatebondrayddnsfreebox-osascol" + + "i-picenordre-landraydnsalzburgliwicebonnishigobookinglobalashovh" + + "achinohedmarkarpaczeladzparaglidingloboavistaprintelligencebooml" + + "adbrokesamegawabootsamnangerboschaefflerdalwaysdatabaseballangen" + + "oamishirasatochigiessensiositelekommunikationishiharabostikaruiz" + + "awabostonakijinsekikogentinglogowegroweibolognagasukebotanicalga" + + "rdenishiizunazukis-a-catererbotanicgardenishikatakayamatsushigeb" + + "otanybouncemerckmsdnipropetrovskjervoyagebounty-fullensakerrypro" + + "pertiesampagespeedmobilizeroboutiquebecatholicaxiascolipicenodum" + + "inamiiselectjomemorialomzaporizhzheguris-a-celticsfanishikatsura" + + "git-repostfoldnavybozentsujiiebplacedekagaminord-odalondrinaples" + + "amsclubindalorenskogloppenzaolbia-tempio-olbiatempioolbialystokk" + + "embuchikumagayagawakuyabukihokumakogenglandrivelandrobaknoluokta" + + "chikawakkanaibetsubamericanfamilydscloudcontrolappspotagerbrandy" + + "winevalleybrasiliabrindisibenikebristoloseyouripirangapartmentsa" + + "msungmbhartiffanybritishcolumbialowiezachpomorskienishikawazukam" + + "itsuebroadcastlefrakkestadrudunsandvikcoromantovalle-d-aostathel" + + "lebroadwaybroke-itjxjavald-aostaplesanfranciscofreakunemurorange" + + "iseiyoichippubetsubetsugarugbyengerdalaskanittedallasalleasingle" + + "surancertmgretagajobojis-a-chefarmsteadupontariodejaneirodoybrok" + + "erbronnoysundurbanamexnetlifyis-a-conservativefsnillfjordurhambu" + + "rgminakamichiharabrothermesaverdeatnurembergmodellingmxn--0trq7p" + + "7nnishimerabrowsersafetymarketsangobrumunddalotenkawabrunelastic" + + "beanstalkarumaifarsundyndns-at-workinggrouparisor-fronishinomiya" + + "shironobrusselsanjotkmaxxn--11b4c3dyndns-blogdnsannanishinoomote" + + "gobruxellesannohelplfinancialottebryanskleppgafanquannefrankfurt" + + "ksatxn--12c1fe0bradescorporationishinoshimatsuurabrynewjerseybus" + + "kerudinewportlligatmparliamentoyosatoyonakagyokutoyokawabuzenish" + + "iokoppegardyndns-freeboxoslodingenishitosashimizunaminamibosognd" + + "alottokorozawabuzzweirbwfashionishiwakis-a-cpadualstackspace-to-" + + "rentalstomakomaibarabzhitomirumalatvuopmicrolightingrimstadyndns" + + "-homednsanokasaokaminokawanishiaizubangecommunitysnesardegnaroyc" + + "omobaracomparemarkerryhotelsardiniacompute-1computerhistoryofsci" + + "ence-fictioncomsecuritytacticsarlutskashiwazakiyosemitecondoshic" + + "hinohealth-carereformitakeharaconferenceconstructionconsuladohar" + + "uovatrani-andria-barletta-trani-andriaconsultanthropologyconsult" + + "ingvolluxembourgruecontactraniandriabarlettatraniandriacontagema" + + "tsubaracontemporaryarteducationalchikugojomedio-campidano-medioc" + + "ampidanomediocontractorskenconventureshinodearthdfcbankasukabedz" + + "in-the-bandaioiraseeklogest-mon-blogueurovisionionjukudoyamainte" + + "nancebetsuikidsmynasushiobarackmazerbaijan-mayenebakkeshibechamb" + + "agriculturennebudapest-a-la-masionthewifiat-band-campaniacooking" + + "channelsdvrdnsdojoetsuwanouchikujogaszczytnordlandyndns-weberlin" + + "colncoolkuszkolahppiacenzagancooperativano-frankivskodjeffersonc" + + "openhagencyclopedichernivtsiciliacorsicagliaribeiraokinawashiros" + + "atochiokinoshimaizuruhrcorvettemasekasumigaurawa-mazowszextraspa" + + "cekitagatajirissagamiharacosenzakopanerairguardiannakadomarinebr" + + "askaunjargalsaceocosidnsfor-better-thanawatchesarpsborguitarsaru" + + "futsunomiyawakasaikaitakoelncostumedizinhistorischesasayamacouch" + + "potatofriesasebofagecounciluxurycouponsaskatchewancoursesassaris" + + "-a-doctoraycq-acranbrookuwanalyticsaudacreditcardyndns-wikiracre" + + "ditunioncremonashgabadaddjaguarqhachiojiyahoooshikamaishimodatec" + + "rewhoswhokksundyndns-workisboringujoinvillewismillercricketrzync" + + "rimeast-kazakhstanangercrotonexus-3crownprovidercrsvparsauherady" + + "ndns1cruisesavannahgacryptonomichigangwoncuisinellair-traffic-co" + + "ntrolleyculturalcentertainmentranoycuneocupcakecuritibaghdadynns" + + "aves-the-whalessandria-trani-barletta-andriatranibarlettaandriac" + + "xn--12cfi8ixb8luzerncyberlevagangaviikanonjis-a-financialadvisor" + + "-aurdalvivanovodkamisatokashikiwakunigamiharufcfancymrussiacyona" + + "barulsandoycyoutheworkpccwiiheyakagefgushikamifuranorth-kazakhst" + + "anfhvalerfidonnakanotoddenfieldynvpnchernovtsykkylvenetogakushim" + + "otoganewyorkshirecipesaro-urbino-pesarourbinopesaromasvuotnakaiw" + + "amizawassamukawataricohdatsunanjoburgriwataraidyndns-iparmattele" + + "fonicapitalonewspaperfigueresinstagingxn--1ctwolominamatakkokami" + + "noyamaxunusualpersonfilateliafilegearfilminamimakis-a-geekaszuby" + + "finalfinancefineartscholarshipschoolfinlandyroyrvikingulenfinnoy" + + "firebaseappartis-a-greenfirenzefirestonefirmdaleirvikatowicefish" + + "ingolffanschulefitjarfitnessettlementransurlfjalerflesbergflickr" + + "agerotikakamigaharaflightschwarzgwangjuniperflirflogintohmalvika" + + "tsushikabeeldengeluidfloraflorencefloridavvesiidazaifudaigokasel" + + "jordfloripaderbornfloristanohatakahamamurogawaflorogerschweizflo" + + "wersciencecentersciencehistoryflynnhosting-clusterflynnhubarclay" + + "s3-sa-east-1fndfor-ourfor-someeresistancefor-theaterforexrothach" + + "irogatakamoriokalmykiaforgotdnscientistockholmestrandforli-cesen" + + "a-forlicesenaforlikescandynamic-dnscjohnsonforsaleitungsenforsan" + + "dasuoloftrapaniizafortalfortmissoulancashireggio-calabriafortwor" + + "thadanorthwesternmutualforuminamiminowafosnescotlandfotaruis-a-g" + + "urufoxfordebianfozorafredrikstadtvscrapper-sitefreeddnsgeekgalax" + + "yfreemasonryfreesitevadsochildrensgardenfreetlscrappingfreiburgf" + + "reightravelchannelfreseniuscountryestateofdelawarezzoologyfribou" + + "rgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-g" + + "iuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-gi" + + "uliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriuliv" + + "giuliafrlfroganscrysechirurgiens-dentistes-en-francefrognfroland" + + "from-akrehamnfrom-alfrom-arfrom-azfrom-canonoichinomiyakefrom-co" + + "dynaliasdaburfrom-ctravelersinsurancefrom-dchiryukyuragifuchungb" + + "ukharafrom-dedyn-ip24from-flanderservegame-serversicherungfrom-g" + + "ausdalfrom-higashiagatsumagoianiafrom-iafrom-idfrom-ilfrom-inche" + + "onfrom-kservehalflifestylefrom-kyowariasahikawafrom-lancasterfro" + + "m-mangonohejis-a-hard-workerfrom-mdfrom-meethnologyfrom-mifunefr" + + "om-mnfrom-modalenfrom-mservehttpartnerservehumourfrom-mtnfrom-nc" + + "hitachinakagawatchandclockashibatakashimarumorimachidafrom-ndfro" + + "m-nefrom-nh-servebbserveirchitosetogitsuliguriafrom-njaworznotog" + + "awafrom-nminamiogunicomcastresindeviceserveminecraftrdfrom-nv-in" + + "foodnetworkshoppingfrom-nyfrom-ohtawaramotoineppuboliviajessheim" + + "periafrom-oketohnoshooguyfrom-orfrom-padovaksdalfrom-pratohobby-" + + "sitexashorokanaiefrom-rivnefrom-schoenbrunnfrom-sdfrom-tnfrom-tx" + + "n--1lqs03nfrom-utazuerichardlillehammerfeste-ipartservemp3from-v" + + "al-daostavalleyfrom-vtrentino-a-adigefrom-wafrom-wielunnerfrom-w" + + "valled-aostatoilfrom-wyfrosinonefrostalowa-wolawafroyahikobeardu" + + "baiduckdnservep2partyfstavernfujiiderafujikawaguchikonefujiminok" + + "amoenairtelecitychyattorneyagawakeisenbahnfujinomiyadafujiokayam" + + "angyshlakasamatsudontexistmein-vigorgefujisatoshonairtrafficplex" + + "us-1fujisawafujishiroishidakabiratoridefensells-for-lesservepics" + + "ervequakefujitsurugashimaringatlantakaharufujixeroxn--1lqs71dfuj" + + "iyoshidafukayabeatservesarcasmatartanddesignfukuchiyamadafukudom" + + "inichocolatelevisionissedalouvreisenisshingugefukuis-a-hunterfuk" + + "umitsubishigakirovogradoyfukuokazakiryuohadselfipasadenaritakura" + + "shikis-a-knightpointtokamachintaifun-dnsaliasiafukuroishikarikat" + + "urindalfukusakisarazurewebsiteshikagamiishibukawafukuyamagatakah" + + "ashimamakishiwadafunabashiriuchinadafunagatakahatakaishimogosenf" + + "unahashikamiamakusatsumasendaisennangoodyearfundaciofuoiskujukur" + + "iyamaniwakuratextileksvikatsuyamarylandfuosskoczowildlifedorainf" + + "racloudcontrolledogawarabikomaezakirunore-og-uvdalfurnitureggio-" + + "emilia-romagnakatombetsumitakagiizefurubirafurudonostiaarpassage" + + "nservicesettsurgeonshalloffameloyalistjordalshalsenfurukawais-a-" + + "landscaperfusodegaurafussaikisofukushimannorfolkebiblelveruminam" + + "isanrikubetsupportrentino-aadigefutabayamaguchinomigawafutboldly" + + "goingnowhere-for-morenakatsugawafuttsurugiminamitanefuturecmseva" + + "stopolefuturehostingfuturemailingfvgfylkesbiblackfridayfyresdalh" + + "angoutsystemscloudfunctionsevenassisicilyhannanmokuizumodenakaya" + + "mapassenger-associationhannosegawahanyuzenhapmirhareidsbergenhar" + + "stadharvestcelebrationhasamarburghasaminami-alpssells-itrentino-" + + "altoadigehashbanghasudahasura-appatriahasvikazohatogayaitakanabe" + + "autysfjordhatoyamazakitakamiizumisanofidelityhatsukaichikaiseis-" + + "a-linux-useranishiaritabashijonawatehattfjelldalhayashimamotobun" + + "gotakadapliernewmexicoalhazuminobusellsyourhomegoodsewilliamhill" + + "hbodoes-itvedestrandhelsinkitakatakanezawahembygdsforbundhemnesh" + + "aris-a-llamarriottrentino-s-tirollagrigentomologyeonggiehtavuoat" + + "nagaivuotnagaokakyotambabydgoszczecinemaceratabusebastopologyeon" + + "gnamegawakayamadridhemsedalhepforgeherokussldheroyhgtvalledaosta" + + "vangerhigashichichibunkyonanaoshimageandsoundandvisionhigashihir" + + "oshimanehigashiizumozakitakyushuaiahigashikagawahigashikagurasoe" + + "dahigashikawakitaaikitamihamadahigashikurumeguromskoghigashimats" + + "ushimarcheapaviancargodaddyn-vpnplus-2higashimatsuyamakitaakitad" + + "aitoigawahigashimurayamamotorcyclesharpfizerhigashinarusembokuki" + + "tamotosumy-routerhigashinehigashiomihachimanaustdalhigashiosakas" + + "ayamanakakogawahigashishirakawamatakaokaluganskydivinghigashisum" + + "iyoshikawaminamiaikitanakagusukumodernhigashitsunoshiroomurahiga" + + "shiurausukitashiobarahigashiyamatokoriyamanashifteditchyouripgfo" + + "ggiahigashiyodogawahigashiyoshinogaris-a-musicianhiraizumisatoka" + + "izukamakurazakitaurayasudahirakatashinagawahiranais-a-nascarfanh" + + "irarahiratsukagawahirayaizuwakamatsubushikusakadogawahistorichou" + + "seshawaiijimaritimoduminamiyamashirokawanabelembetsukubankazunow" + + "tvallee-aosteroyhitachiomiyagildeskaliszhitachiotagoperauniteroi" + + "zumizakisosakitagawahitraeumtgeradellogliastradinghjartdalhjelme" + + "landholeckobierzyceholidayhomeipharmacienshellaspeziahomelinkddi" + + "elddanuorrikuzentakataiwanairlinedre-eikerhomelinuxn--1qqw23ahom" + + "eofficehomesecuritymacaparecidahomesecuritypchofunatoriginsurecr" + + "eationiyodogawahomesenseminehomeunixn--2m4a15ehondahoneywellbein" + + "gzonehongotembaixadahonjyoitakarazukameokameyamatotakadahorninda" + + "lhorseoullensvanguardhortendofinternet-dnshimojis-a-nurservebeer" + + "hospitalhoteleshimokawahotmailhoyangerhoylandetroitskypehumaniti" + + "eshimokitayamahurdalhurumajis-a-painteractivegarsheis-a-patsfanh" + + "yllestadhyogoris-a-personaltrainerhyugawarahyundaiwafunejewelryj" + + "ewishartgalleryjfkharkovanylvenicejgorajlcube-serverrankoshigaya" + + "kumoldelmenhorstalbanshinichinanjlljmphilatelyjnjcphiladelphiaar" + + "eadmyblogsitejoyentrentino-sued-tiroljoyokaichibalatinoipifonymi" + + "nanojpmorganjpnjprshinjournalismailillesandefjordjurkoshunantank" + + "hmelnitskiyamarylhurstjohnkosugekotohiradomainshinjukumanokotour" + + "akouhokutamakis-a-techietis-a-photographerokuappharmacyshimonita" + + "yanagithubusercontentrentino-stirolkounosupplieshinkamigotoyohas" + + "himotottoris-a-therapistoiakouyamashikekouzushimashikis-an-accou" + + "ntantshimonosekikawakozagawakozakis-an-actorkozowinbarrel-of-kno" + + "wledgeologyonagoyaustrheimatunduhrennesoyolasitebizenakasatsunai" + + "rportland-4-salernoboribetsucks3-eu-central-1kpnkppspdnshinshino" + + "tsurgerykrasnodarkredstonekristiansandcatshinshirokristiansundkr" + + "odsheradkrokstadelvaldaostarnbergkrymincommbankhmelnytskyivaokum" + + "atorinokumejimasoykumenantokonamegatakatoris-an-actresshimosuwal" + + "kis-a-playerkunisakis-an-anarchistoricalsocietykunitachiarailway" + + "kunitomigusukumamotoyamashikokuchuokunneppugliakunstsammlungkuns" + + "tunddesignkuokgrouphoenixn--30rr7ykurehabmerkurgankurobelaudible" + + "borkangerkurogiminamiashigarakuroisoftwarendalenugkuromatsunais-" + + "an-artisteinkjerusalembroiderykurotakikawasakis-an-engineeringku" + + "shirogawakustanais-an-entertainerkusupplykutchanelkutnokuzumakis" + + "-bykvafjordkvalsundkvamfamberkeleykvanangenkvinesdalkvinnheradkv" + + "iteseidskogkvitsoykwpspiegelkzmitoyoakemiuramiyazumiyotamanomjon" + + "dalenmlbfanmonstermontrealestatefarmequipmentrentinoa-adigemonza" + + "-brianzaporizhzhiamonza-e-della-brianzapposhintomikasaharamonzab" + + "rianzaptokyotangotsukitahatakamatsukawamonzaebrianzaramonzaedell" + + "abrianzamoonscalemoparachutingmordoviamoriyamatsumotofukemoriyos" + + "himinamiawajikis-into-animeiwamarshallstatebankfhappoumormonmout" + + "hagakhanamigawamoroyamatsunomortgagemoscowindmillmoseushistorymo" + + "sjoenmoskeneshinyoshitomiokamogawamosshiojirishirifujiedamosvikn" + + "x-serveronamsskoganeis-a-rockstarachowicemoteginowaniihamatamaka" + + "wajimansionshioyanaizumoviemovimientolgamovistargardmtpchoyodoba" + + "shichikashukujitawaramtranbymuenstermuginozawaonsenmuikamisunaga" + + "wamukodairamulhouserveblogspotrentinoaadigemunakatanemuncienciam" + + "uosattemuphonefosshirahamatonbetsurnadalmurmanskolobrzegersundmu" + + "rotorcraftrentinoalto-adigemusashimurayamatsusakahoginankokubunj" + + "is-into-carshimotsukemusashinoharamuseetrentinoaltoadigemuseumve" + + "renigingmusicarbonia-iglesias-carboniaiglesiascarboniamutsuzawam" + + "y-vigorlicemy-wanggouvicenzamyactivedirectorymyasustor-elvdalmyc" + + "dn77-securecifedexhibitionmyddnskingmydissentrentinos-tirolmydro" + + "boehringerikemydshirakofuefukihaborokunohealthcareershiranukanag" + + "awamyeffectrentinostirolmyfirewallonieruchomoscienceandindustryn" + + "myfritzmyftpaccesshiraois-into-cartoonshimotsumamyhome-serversai" + + "lleshiraokananiimihoboleslawiechristiansburgrondarmykolaivaporcl" + + "oudmymailermymediapchristmasakinderoymyokohamamatsudamypephotogr" + + "aphysiomypetshiratakahagitlabormyphotoshibalestrandabergamoareke" + + "ymachinewhampshirebungoonombresciamypsxn--32vp30hagebostadmysecu" + + "ritycamerakermyshopblockshishikuis-into-gamessinazawamytis-a-boo" + + "kkeeperugiamytuleapiagetmyipictetrentinosud-tirolmyvnchromedicin" + + "akamagayachtsantabarbaramywireitrentinosudtirolpinkomaganepionee" + + "rpippulawypiszpittsburghofauskedsmokorsetagayasells-for-unzenpiw" + + "atepixolinopizzapkomakiyosunndalplanetariuminnesotaketakatsukis-" + + "certifieducatorahimeshimamateramobilyplantationplantshitaramapla" + + "tformshangrilanshizukuishimofusaitamatsukuris-lostre-toteneis-a-" + + "republicancerresearchaeologicaliforniaplaystationplazaplchungnam" + + "dalseidfjordyndns-mailucaniaplumbingoplurinacionalpmnpodzonepohl" + + "poivronpokerpokrovskomatsushimasfjordenpoliticarrierpolitiendapo" + + "lkowicepoltavalle-aostarostwodzislawindowshizuokanazawapomorzesz" + + "owinnershoujis-not-certifiedunetbankhakassiapordenonepornporsang" + + "erporsanguidell-ogliastraderporsgrunnanyokoshibahikariwanumatake" + + "tomisatoshimapoznanpraxis-a-bruinsfanprdpreservationpresidioprgm" + + "rprimeldalprincipeprivatizehealthinsuranceprochowiceproductionsh" + + "owaprofesionalprogressivegaskvolloabathsbchurchaseljeepsongdalen" + + "viknaharimalopolskanlandyndns-office-on-the-webcampinashikiminoh" + + "kurapromombetsurfbsbxn--12co0c3b4evalleaostaticsavonarusawaprope" + + "rtyprotectionprotonetrentinosued-tirolprudentialpruszkowioshowti" + + "memergencyahabahcavuotnagarahkkeravjuegoshikikonaikawachinaganoh" + + "aramcoachampionshiphoptobishimagentositecnologiaprzeworskogptplu" + + "sgardenpupictureshisognepvhaibarakitahiroshimaoris-a-lawyerpvtre" + + "ntinosuedtirolpwciprianiigataishinomakindlegnicafederationpzqldq" + + "ponqslgbtrentoyonezawaquicksyteshriramlidlugolekafjordquipelemen" + + "tsienarutomobellevuelosangelesjabbottrevisohughesigdalqvcirclego" + + "doesntexisteingeekashiharasrtroandinosaurepaircraftrogstadsrvare" + + "servecounterstrikestoragestordalstoregontrailroadstorfjordstorjd" + + "evcloudfrontdoorstpetersburgstreamsterdamnserverbaniastudiostudy" + + "ndns-at-homedepotenzamamidsundstuff-4-salestufftoread-booksnesir" + + "dalstuttgartromsakakinokiasusakis-savedsusonosuzakaniepcesuzukan" + + "makiwiensuzukis-slickharkivalleeaosteigensvalbardunloppacificirc" + + "ustomersveiosvelvikomvuxn--2scrj9choshibuyachiyodavvenjargaulard" + + "alowiczest-le-patronsvizzerasvn-reposjcbnlswedenswidnicartoonart" + + "decologiaswiebodzindianapolis-a-bloggerswiftcoverswinoujsciencea" + + "ndhistoryswisshikis-uberleetrentino-sud-tirolsynology-dslingtush" + + "uissier-justicetuvalle-daostatic-accessnoasaitotaltuxfamilytwmai" + + "lvenneslaskerrylogisticsokaneyamazoevestfoldvestnesokndalvestre-" + + "slidrepbodynathomebuiltrusteevestre-totennishiawakuravestvagoyve" + + "velstadvibo-valentiavibovalentiavideovillasnesoddenmarkhangelskj" + + "akdnepropetrovskiervaapsteiermarkongsvingervinnicasacamdvrcampin" + + "agrandebugattipschlesischesolarssonvinnytsiavipsinaappiemontevir" + + "giniavirtualvirtueeldomeindianmarketingvirtuelvisakegawaviterbok" + + "nowsitallvivoldavixn--3bst00misakis-foundationvlaanderenvladikav" + + "kazimierz-dolnyvladimirvlogoipilotshisuifuelblagdenesnaaseraling" + + "enkainanaejrietisalatinabenonichryslervolkswagentsolognevologdan" + + "skoninjambylvolvolkenkundenvolyngdalvossevangenvotevotingvotoyon" + + "owiwatsukiyonoticiaskimitsubatamibudejjuedischesapeakebayernrtrv" + + "arggatromsojamisonwloclawekonsulatrobeepilepsydneywmflabsolundbe" + + "ckommuneworldworse-thandawowitdkonskowolayangrouphilipsynology-d" + + "iskstationwpdevcloudwritesthisblogsytewroclawithgoogleapisa-hock" + + "eynutsiracusakatakinouewtcmisasaguris-gonewtfbx-ostrowwlkpmgunma" + + "nxn--1ck2e1barclaycards3-fips-us-gov-west-1wuozuwwwithyoutubenev" + + "entoeidsvollwzmiuwajimaxn--42c2d9axn--45br5cylxn--45brj9citadeli" + + "veryxn--45q11citichernigovernmentoyotaris-a-cubicle-slavellinota" + + "irestaurantoyotomiyazakis-a-democratoyotsukaidoxn--4gbriminingxn" + + "--4it168dxn--4it797kooris-a-soxfanxn--4pvxs4allxn--54b7fta0ccivi" + + "laviationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilisationx" + + "n--5rtq34kopervikhersonxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--" + + "6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilizationxn--80adxh" + + "ksolutionsilkomforbargainstitutelemarkarateu-1xn--80ao21axn--80a" + + "qecdr1axn--80asehdbarsyonlinewhollandiscoveryonaguniversityoriik" + + "aratsuginamikatagamilitaryoshiokaracoldwarmiastageu-2xn--80aswgx" + + "n--80audnedalnxn--8ltr62koryokamikawanehonbetsurutaharaxn--8pvr4" + + "uxn--8y0a063axn--90a3academiamicaaarborteaches-yogasawaracingxn-" + + "-90aeroportalaheadjudaicable-modemocraciaxn--90aishobarakawagoex" + + "n--90azhytomyrxn--9dbhblg6dietcimdbashkiriauthordalandeportenrig" + + "htathomeftpalmaseratibigawastronomy-gatewayokosukanzakiyosatokig" + + "awagrocerybnikahokutobamagazineat-url-o-g-i-natuurwetenschappena" + + "umburgjerdrumeteorappalermomahachijolstereportarumizusawaetnagah" + + "amaroygardendoftheinternetflixilovecollegefantasyleaguernseybolt" + + "arnobrzegyptianaturhistorisches3-ap-northeast-2ixboxenapponazure" + + "-mobile12hpaleobirabogadocscbgdyniabruzzoologicalvinklein-addram" + + "menuernberggfarmerseine164xn--9dbq2axn--9et52uxn--9krt00axn--and" + + "y-iraxn--aroport-byandexn--3ds443gxn--asky-iraxn--aurskog-hland-" + + "jnbasilicataniautomotiveconomiasakuchinotsuchiurakawalmartataran" + + "toyakokonoehimejibmdgcahcesuolocalhostrodawaraumalborkdalaziocea" + + "nographics3-eu-west-1xn--avery-yuasakuhokkaidoomdnsiskinkyotobet" + + "sumidatlanticivilwarmanagementoyouraxn--b-5gaxn--b4w605ferdxn--b" + + "ck1b9a5dre4claimsantacruzsantafedjejuifminamiizukamishihoronobea" + + "uxartsandcraftsantamariakexn--bdddj-mrabdxn--bearalvhki-y4axn--b" + + "erlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachika" + + "tsuuraxn--bievt-0qa2xn--bjarky-fyaotsurreyxn--bjddar-ptamayufuet" + + "tertdasnetzxn--blt-elabourxn--bmlo-graingerxn--bod-2natalxn--brn" + + "ny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigat" + + "ion-aptibleaseating-organicbcn-north-1xn--brum-voagatrysiljanxn-" + + "-btsfjord-9zaxn--c1avgxn--c2br7gxn--c3s14misawaxn--cck2b3basketb" + + "allyngenhktatsunoddautoscanadaejeonbukarasjohkamikoaniikappueblo" + + "ckbustermezgoraugustowadaegubambleclerc66xn--cg4bkis-very-badajo" + + "zxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumisconfusedxn--comuni" + + "caes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr694batodayu" + + "kindustriaveroykeniwaizumiotsukumiyamazonawsadodgemologicallilly" + + "ombolzanord-frontiereviewskrakowebhostingjerstadotsuruokakegawau" + + "kraanghkepnogifts3-ap-southeast-2xn--czrs0tulanxesslupskommunalf" + + "orbundxn--czru2dxn--czrw28batsfjordishakotanhlfanhs3-us-gov-west" + + "-1xn--d1acj3bauhausposts-and-telecommunicationsncfdisrechtranaka" + + "muratajimidoriopretogoldpoint2thisamitsukeu-3xn--d1alfaromeoxn--" + + "d1atuneslzxn--d5qv7z876clanbibaidarmeniaxn--davvenjrga-y4axn--dj" + + "rs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drbak-w" + + "uaxn--dyry-iraxn--e1a4cldmailuccapetownnews-stagingrongaxn--eckv" + + "dtc9dxn--efvn9somaxn--efvy88hair-surveillancexn--ehqz56nxn--elqq" + + "16hakatanortonxn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429k" + + "osakaerodromegallupinbarreauctionflfanfshostrowiecaseihichisobet" + + "suldalimoliserniaustraliaisondriobranconagawalesundemoneyokozebi" + + "nordreisa-geekaragandamusementashkentatamotors3-ap-southeast-1pa" + + "sswordd-dnshome-webservercellikes-piedmonticellocus-4xn--fhbeiar" + + "nxn--finny-yuaxn--fiq228c5hsomnarviikamitondabayashiogamagorizia" + + "xn--fiq64bbcasertairavennagatorockartuzyukuhashimoichinosekigaha" + + "ravocatanzarowebredirectmetacentrumetlifeinsurancempresashibetsu" + + "kuiitatebayashiibajddarchitecturealtydalipayomitanoceanographiqu" + + "emrevistanbulminamidaitomandalimanowarudaurskog-holandroverhalla" + + "-speziajudygarlanddnss3-ap-south-1kappchizippodhaleangaviikadena" + + "amesjevuemielno-ip6xn--fiqs8sooxn--fiqz9sopotritonxn--fjord-lrax" + + "n--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn--frde" + + "-grandrapidsor-odalxn--frna-woaraisaijosoyrorosor-varangerxn--fr" + + "ya-hraxn--fzc2c9e2clickashiwaraxn--fzys8d69uvgmailxn--g2xx48clin" + + "ichernihivguccieszynissandnessjoenissayokkaichiropracticheltenha" + + "m-radio-opencraftrainingripescaravantaaxn--gckr3f0fbxosaxoxn--ge" + + "crj9cliniquenoharaxn--ggaviika-8ya47hakodatexn--gildeskl-g0axn--" + + "givuotna-8yasakaiminatoyookannamilanotteroyxn--gjvik-wuaxn--gk3a" + + "t1exn--gls-elacaixaxn--gmq050is-very-evillagexn--gmqw5axn--h-2fa" + + "ilxn--h1aeghakonexn--h2breg3evenesorfoldxn--h2brj9c8clintonoshoe" + + "santoandreamhostersanukis-a-designerimarnardalucernexn--h3cuzk1d" + + "igitalxn--hbmer-xqaxn--hcesuolo-7ya35bbtattoolsztynsettlers3-us-" + + "west-1xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4accident-pre" + + "vention-webhopenairbusantiquest-a-la-maisondre-landroidvagsoyeri" + + "cssonyoursidealerimo-i-ranadexeterxn--hnefoss-q1axn--hobl-iraxn-" + + "-holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-" + + "54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugivingxn--io0a7is-v" + + "ery-goodhandsonxn--j1aefedorapeopleikangerxn--j1amhakubahccavuot" + + "nagareyamakeupowiathletajimabaridagawalbrzycharternidxn--j6w193g" + + "xn--jlq61u9w7bbvacationswatch-and-clockerhcloudns3-us-west-2xn--" + + "jlster-byasuokanraxn--jrpeland-54axn--jvr189mishimasudaxn--k7yn9" + + "5exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-w" + + "oaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--3e0b707exn--koluok" + + "ta-7ya57hakuis-a-liberalxn--kprw13dxn--kpry57dxn--kpu716fedorapr" + + "ojectransportexn--kput3is-very-nicexn--krager-gyatomitamamuraxn-" + + "-kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49j" + + "dfastlylbarcelonagasakikuchikuseikarugamvikarasjokarasuyamarugam" + + "e-hostrolekamiminers3-external-1xn--ksnes-uuaxn--kvfjord-nxaxn--" + + "kvitsy-fyatsukanumazuryxn--kvnangen-k0axn--l-1fairwindsorocabals" + + "fjordxn--l1accentureklamborghinikis-very-sweetpepperxn--laheadju" + + "-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagavi" + + "ika-52bentleyurihonjournalistgoryusuharavoues3-eu-west-2xn--lesu" + + "nd-huaxn--lgbbat1ad8jelenia-goraxn--lgrd-poacctunkongsbergxn--lh" + + "ppi-xqaxn--linds-pramericanarturystykanoyaltakasakiyokawaraxn--l" + + "ns-qlapyatigorskoseis-a-studentalxn--loabt-0qaxn--lrdal-sraxn--l" + + "renskog-54axn--lt-liaclothingdustkagoshimalselvendrellukowhaling" + + "rossetouchijiwadegreexn--lten-granexn--lury-iraxn--m3ch0j3axn--m" + + "ely-iraxn--merker-kuaxn--mgb2ddesorreisahayakawakamiichikawamisa" + + "toursimple-urlxn--mgb9awbfeiraquarellebesbyglandynulvikasuyanaga" + + "waxn--mgba3a3ejtuscanyxn--mgba3a4f16axn--mgba3a4franamizuholding" + + "smilevangerxn--mgba7c0bbn0axn--mgbaakc7dvfermochizukirkenesbscho" + + "koladenxn--mgbaam7a8hakusandiegooglecodespotrentino-alto-adigexn" + + "--mgbab2bdxn--mgbai9a5eva00beppublishproxyzjampagefrontappalmspr" + + "ingsakerxn--mgbai9azgqp6jeonnamerikawauexn--mgbayh7gpalacexn--mg" + + "bb9fbpobanazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mg" + + "berp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--" + + "mgbpl2fhskoleirfjordxn--mgbqly7c0a67fbcngroundhandlingroznyxn--m" + + "gbqly7cvafranziskanerdpolicexn--mgbt3dhdxn--mgbtf8flatangerxn--m" + + "gbtx2beskidyn-o-saurlandes3-website-ap-northeast-1xn--mgbx4cd0ab" + + "bvieeexn--mix082ferraraxn--mix891ferrarittoguraxn--mjndalen-64ax" + + "n--mk0axindigenaklodzkochikushinonsenergyxn--mk1bu44cnsaobernard" + + "ownloadyndns-picsaogoncartierxn--mkru45is-with-thebandovre-eiker" + + "xn--mlatvuopmi-s4axn--mli-tlaquilanciaxn--mlselv-iuaxn--moreke-j" + + "uaxn--mori-qsakuragawaxn--mosjen-eyawaraxn--mot-tlarvikosherbroo" + + "kegawaxn--mre-og-romsdal-qqbestbuyshouses3-website-ap-southeast-" + + "1xn--msy-ula0haldenxn--mtta-vrjjat-k7afamilycompanycntoystre-sli" + + "drettozawaxn--muost-0qaxn--mxtq1missilezajsklabudhabikinokawabar" + + "thaebaruminamiuonumassa-carrara-massacarraramassabusinessebykleg" + + "allocalhistoryggeelvinckaufenxn--ngbc5azdxn--ngbe9e0axn--ngbrxn-" + + "-3hcrj9cistrondheimmobilienxn--nit225koshimizumakizunokunimimata" + + "kasugais-a-teacherkassymantechnologyxn--nmesjevuemie-tcbaltimore" + + "-og-romsdalpha-myqnapcloudaccesscambridgestoneuesortlandxn--nnx3" + + "88axn--nodessakuraisleofmanchesterxn--nqv7fs00emaxn--nry-yla5gxn" + + "--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveexchangexn--nvuotna-h" + + "waxn--nyqy26axn--o1achattanooganordkappimientakazakis-leetnedalx" + + "n--o3cw4halsaintlouis-a-anarchistoireggiocalabriaxn--o3cyx2axn--" + + "od0algxn--od0aq3betainaboxfusejnynysagaeroclubmedecincinnationwi" + + "dealstahaugesunderseaportsinfolldalabamagasakishimabaraogakibich" + + "uomutashinaindustriesteambulanceu-4xn--ogbpf8flekkefjordxn--oppe" + + "grd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--p1acferreroticampo" + + "bassociatestinguovdageaidnuslivinghistoryxn--p1aissmarterthanyou" + + "xn--pbt977coguchikuzenxn--pgbs0dhlxn--porsgu-sta26fetsundynv6xn-" + + "-pssu33lxn--pssy2uxn--q9jyb4collectionxn--qcka1pmckinseyxn--qqqt" + + "11misugitokuyamatsumaebashikshacknetrentino-suedtirolxn--qxamune" + + "ustarhubsoruminternationalfirearmshintokushimaxn--rady-iraxn--rd" + + "al-poaxn--rde-ulavagiskexn--rdy-0nabariwchonanbuildingroks-thisa" + + "yamanobeokakudamatsuexn--rennesy-v1axn--rhkkervju-01aflakstadaok" + + "agakicks-assedicolognextdirectozsdeloittemp-dnsaotomelhusdecorat" + + "iveartsapodlasiellaktyubinskiptveterinairealtorlandyndns-remotew" + + "dyndns-serverdaluroyxn--rholt-mragowoodsideltaitogliattiresouthc" + + "arolinarvikomonoxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa" + + "-5nativeamericanantiquesouthwestfalenxn--risr-iraxn--rland-uuaxn" + + "--rlingen-mxaxn--rmskog-byaxn--rny31hammarfeastafricapebretonami" + + "crosoftbankautokeinowruzhgorodeoxn--rovu88bhzcasinorddalindaskoy" + + "abearalvahkijobserverisignieznogataijinfinitintuitaxihuanikkoebe" + + "nhavnikolaevents3-website-ap-southeast-2xn--rros-granvindafjordx" + + "n--rskog-uuaxn--rst-0naturalhistorymuseumcenterxn--rsta-francais" + + "eharaxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithruhere" + + "dumbrellajollamericanexpressexyxn--s9brj9colonialwilliamsburgrpa" + + "rocherkasyno-dsapporoxn--sandnessjen-ogbizxn--sandy-yuaxn--seral" + + "-lraxn--ses554gxn--sgne-gratangenxn--skierv-utazassnasabaerobati" + + "cketsowaxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxax" + + "n--slat-5naturalsciencesnaturellespjelkavikomorotsukamiokamikita" + + "yamatsuris-a-socialistcgrouphdxn--slt-elabcgxn--smla-hraxn--smna" + + "-gratis-a-bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--sn" + + "es-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1" + + "axn--sr-varanger-ggbieigersundivtasvuodnakaniikawatanaguraxauste" + + "vollavangenaval-d-aosta-valleyokotebinagisoccertificationavigati" + + "onavoibestadds3-ca-central-1xn--srfold-byaxn--srreisa-q1axn--sru" + + "m-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbielawal" + + "terxn--stre-toten-zcbspreadbettingxn--t60b56axn--tckweatherchann" + + "elxn--tiq49xqyjetztrentino-sudtirolxn--tjme-hraxn--tn0agrinet-fr" + + "eakspydebergxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgstad-r" + + "1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvaroyxn--uc0ay4" + + "axn--uist22hamurakamigoris-a-libertarianxn--uisz3gxn--unjrga-rta" + + "obaomoriguchiharagusartsrlxn--unup4yxn--uuwu58axn--vads-jraxn--v" + + "ard-jraxn--vegrshei-c0axn--vermgensberater-ctbiellaakesvuemielec" + + "ceverbankareliancevje-og-hornnes3-website-eu-west-1xn--vermgensb" + + "eratung-pwbieszczadygeyachimataikikugawarszawashingtondclkariyam" + + "elbournexn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgsy-q" + + "oa0jevnakershuscultureggioemiliaromagnamsosnowiechoseiroumuenche" + + "nxn--vgu402coloradoplateaudioxn--vhquvbarrell-of-knowledgeometre" + + "-experts-comptables3-us-east-2xn--vler-qoaxn--vre-eiker-k8axn--v" + + "rggt-xqadxn--vry-yla5gxn--vuq861bievatmallorcadaques3-website-sa" + + "-east-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1columb" + + "usheyxn--wgbl6axn--xhq521bifukagawashtenawdev-myqnapcloudapplebt" + + "imnetzlgjovikarlsoyusuisserveftpanamatta-varjjatjeldsundivttasvu" + + "otnakanojohanamakinoharaxn--xkc2al3hye2axn--xkc2dl3a5ee0hangglid" + + "ingxn--y9a3aquariumitourismolangevagrarchaeologyeongbukmpspbaref" + + "ootballfinanzgorzeleccoffeedbackplaneapplinziiyamanouchikuhokury" + + "ugasakitchenayorovigovtateshinanomachimkentateyamaustinnavuotnar" + + "ashinobninsk12xn--yer-znaturbruksgymnxn--yfro4i67oxn--ygarden-p1" + + "axn--ygbi2ammxn--3oq18vl8pn36axn--ystre-slidre-ujbihorologyuucon" + + "nectjmaxxxfinityuzawaxn--zbx025dxn--zf0ao64axn--zf0avxn--3pxu8ko" + + "nyvelolxn--zfr164bikedagestangeorgeorgiaxperiaxz" + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 0 bits] unused +// [10 bits] children index +// [ 1 bits] ICANN bit +// [15 bits] text index +// [ 6 bits] text length +var nodes = [...]uint32{ + 0x31a803, + 0x284d84, + 0x382f06, + 0x2f37c3, + 0x2f37c6, + 0x37af86, + 0x3a7a03, + 0x31b604, + 0x322487, + 0x382b48, + 0x1a00742, + 0x32e147, + 0x3672c9, + 0x2b4eca, + 0x2b4ecb, + 0x232183, + 0x2ab9c6, + 0x238485, + 0x1e01482, + 0x203b44, + 0x260543, + 0x201485, + 0x2215842, + 0x332603, + 0x271b0c4, + 0x31fe05, + 0x2a00102, + 0x38194e, + 0x256483, + 0x39cbc6, + 0x2e03d02, + 0x2c8047, + 0x23e146, + 0x3205c42, + 0x257dc3, + 0x257dc4, + 0x357406, + 0x205d08, + 0x277146, + 0x302004, + 0x3600602, + 0x33acc9, + 0x211307, + 0x347986, + 0x3c1109, + 0x2c78c8, + 0x331004, + 0x241286, + 0x230106, + 0x3a00582, + 0x3a234f, + 0x21f4ce, + 0x226484, + 0x2c1545, + 0x31a705, + 0x2f6809, + 0x244689, + 0x357c07, + 0x22bbc6, + 0x206dc3, + 0x3e03942, + 0x21d6c3, + 0x220d4a, + 0x21fbc3, + 0x3bde45, + 0x2f2542, + 0x370749, + 0x4200282, + 0x216c84, + 0x2ef006, + 0x2bb6c5, + 0x2d7c04, + 0x4a14344, + 0x205583, + 0x2374c4, + 0x4e02b82, + 0x267184, + 0x527eac4, + 0x39004a, + 0x5600cc2, + 0x35c447, + 0x2774c8, + 0x6207ec2, + 0x340687, + 0x2bde44, + 0x2bde47, + 0x3b9605, + 0x339407, + 0x31ca86, + 0x325384, + 0x3314c5, + 0x298307, + 0x720fc02, + 0x335a43, + 0x21ab82, + 0x3aae43, + 0x7612442, + 0x27f485, + 0x7a023c2, + 0x293584, + 0x276005, + 0x2263c7, + 0x20974e, + 0x2391c4, + 0x238cc4, + 0x20b583, + 0x364209, + 0x30e2cb, + 0x259e48, + 0x3c0ec8, + 0x316488, + 0x215cc8, + 0x330e4a, + 0x339307, + 0x309d86, + 0x7e6e442, + 0x345243, + 0x355943, + 0x35d344, + 0x3a7a43, + 0x32f6c3, + 0x172a782, + 0x8203102, + 0x27b385, + 0x28df86, + 0x2a9f04, + 0x369187, + 0x23ce86, + 0x3806c4, + 0x3806c7, + 0x205a83, + 0x86c31c2, + 0x8b14902, + 0x8e21182, + 0x221186, + 0x9200882, + 0x286c45, + 0x32bcc3, + 0x3c6444, + 0x2e3804, + 0x2e3805, + 0x2053c3, + 0x96b6c03, + 0x9a09342, + 0x289b05, + 0x289b0b, + 0x20bd06, + 0x331f4b, + 0x22aa44, + 0x20cec9, + 0x20d784, + 0x9e0d9c2, + 0x20ef03, + 0x20fec3, + 0x1610702, + 0x2fb9c3, + 0x21070a, + 0xa200302, + 0x203dc5, + 0x2d400a, + 0x243384, + 0x210f03, + 0x212984, + 0x213b83, + 0x213b84, + 0x213b87, + 0x2153c5, + 0x215705, + 0x216d46, + 0x2170c6, + 0x217d43, + 0x21a708, + 0x212d43, + 0xa6004c2, + 0x22c3c8, + 0x3878cb, + 0x223088, + 0x225f06, + 0x227447, + 0x22a1c8, + 0xb604002, + 0xbaf21c2, + 0x23b388, + 0x3031c7, + 0x207a45, + 0x207a48, + 0x383c48, + 0x2fa9c3, + 0x22f384, + 0x35d382, + 0xbe2f582, + 0xc201bc2, + 0xca30502, + 0x230503, + 0xce03cc2, + 0x31b5c3, + 0x2f1b84, + 0x20bf83, + 0x335e04, + 0x322b8b, + 0x237c03, + 0x2db106, + 0x237c04, + 0x2e21ce, + 0x2669c5, + 0x33d7c8, + 0x251107, + 0x25110a, + 0x2342c3, + 0x34f747, + 0x30e485, + 0x2342c4, + 0x2d4b86, + 0x2d4b87, + 0x2d0204, + 0x37d587, + 0x209a84, + 0x340c44, + 0x340c46, + 0x25d944, + 0x39db46, + 0x207803, + 0x207808, + 0x21a988, + 0x238c83, + 0x2fb983, + 0x3a8c04, + 0x3ae4c3, + 0xd24d5c2, + 0xd6d2fc2, + 0x2083c3, + 0x205646, + 0x241383, + 0x354bc4, + 0xda4b182, + 0x24cb83, + 0x339c03, + 0x218882, + 0xde03c02, + 0x2c0b06, + 0x23c007, + 0x2eab45, + 0x38a504, + 0x2981c5, + 0x27e687, + 0x2d84c9, + 0x2dcd46, + 0x307788, + 0x2eaa46, + 0xe2010c2, + 0x2f1408, + 0x2f3e06, + 0x223a85, + 0x30fe07, + 0x310344, + 0x310345, + 0x2010c4, + 0x2010c8, + 0xe619382, + 0xea02642, + 0x3292c6, + 0x202648, + 0x34d485, + 0x34df06, + 0x350108, + 0x36d548, + 0xee1f8c5, + 0xf21d0c4, + 0x38ca87, + 0xf60d642, + 0xfaefa02, + 0x10e02c42, + 0x2ef105, + 0x373905, + 0x3c1546, + 0x3208c7, + 0x3973c7, + 0x1160be03, + 0x26f507, + 0x2b99c8, + 0x231a09, + 0x381b07, + 0x2321c7, + 0x232b08, + 0x233306, + 0x233dc6, + 0x234a0c, + 0x235e4a, + 0x2364c7, + 0x23834b, + 0x23be47, + 0x23be4e, + 0x1a23d104, + 0x23d744, + 0x23e847, + 0x2616c7, + 0x243806, + 0x243807, + 0x243c87, + 0x1a630a42, + 0x2449c6, + 0x2449ca, + 0x244f4b, + 0x246d07, + 0x2478c5, + 0x247c03, + 0x248146, + 0x248147, + 0x322643, + 0x1aa022c2, + 0x248a4a, + 0x1af68802, + 0x1b24d602, + 0x1b64afc2, + 0x1ba3e242, + 0x24cc85, + 0x24d2c4, + 0x1c204ac2, + 0x267205, + 0x245543, + 0x20d885, + 0x215bc4, + 0x20f984, + 0x209d86, + 0x2505c6, + 0x289d03, + 0x3b6d84, + 0x3ac2c3, + 0x1ca02e02, + 0x3582c4, + 0x3582c6, + 0x38d005, + 0x36e3c6, + 0x30ff08, + 0x227b84, + 0x397848, + 0x399a45, + 0x311708, + 0x36c6c6, + 0x265847, + 0x27b984, + 0x27b986, + 0x26f803, + 0x3917c3, + 0x20b648, + 0x31c684, + 0x354fc7, + 0x2d2906, + 0x2d2909, + 0x20a1c8, + 0x317908, + 0x338884, + 0x2067c3, + 0x23dd42, + 0x1da4c3c2, + 0x1de14202, + 0x207583, + 0x1e20a502, + 0x3225c4, + 0x2440c6, + 0x335b45, + 0x283403, + 0x234ec4, + 0x2b1a07, + 0x336bc3, + 0x37cfc8, + 0x21ea85, + 0x25f7c3, + 0x275f85, + 0x2760c4, + 0x2f9c06, + 0x222704, + 0x225986, + 0x226306, + 0x357d84, + 0x23c203, + 0x1e614582, + 0x238ac5, + 0x2011c3, + 0x1ea05ec2, + 0x2319c3, + 0x21c8c5, + 0x237583, + 0x237589, + 0x1ee01f02, + 0x1f608ac2, + 0x289645, + 0x219286, + 0x37c8c6, + 0x2bfcc8, + 0x2bfccb, + 0x20568b, + 0x21c145, + 0x2ead45, + 0x2c3909, + 0x1603142, + 0x357f48, + 0x23e504, + 0x1fe01b02, + 0x20aac3, + 0x20661886, + 0x224fc8, + 0x20a003c2, + 0x307348, + 0x20e0a6c2, + 0x23994a, + 0x212c8d03, + 0x39f286, + 0x3b5048, + 0x389ac8, + 0x3ba046, + 0x377d47, + 0x3a2547, + 0x23fe0a, + 0x243404, + 0x352f84, + 0x366b89, + 0x21ba1d45, + 0x21f6c6, + 0x200143, + 0x255184, + 0x21e25784, + 0x323307, + 0x22f607, + 0x364044, + 0x2d3345, + 0x3c1608, + 0x37b847, + 0x38fc87, + 0x22208882, + 0x23b9c4, + 0x28e948, + 0x24e244, + 0x252944, + 0x253005, + 0x253147, + 0x22b509, + 0x254004, + 0x2547c9, + 0x254a08, + 0x254f04, + 0x254f07, + 0x226553c3, + 0x255547, + 0x1626d02, + 0x16ad402, + 0x255e86, + 0x2564c7, + 0x256b04, + 0x258487, + 0x258f47, + 0x259783, + 0x329982, + 0x205dc2, + 0x270003, + 0x270004, + 0x27000b, + 0x3c0fc8, + 0x25f184, + 0x25ad05, + 0x25cac7, + 0x25e5c5, + 0x30590a, + 0x25f0c3, + 0x22a12c42, + 0x212c44, + 0x261489, + 0x265183, + 0x265247, + 0x2f61c9, + 0x336308, + 0x25d1c3, + 0x27a247, + 0x27aa89, + 0x26be83, + 0x281b04, + 0x283c89, + 0x287dc6, + 0x2266c3, + 0x2039c2, + 0x241243, + 0x2ad207, + 0x383fc5, + 0x340346, + 0x268984, + 0x2dba05, + 0x220d03, + 0x217f86, + 0x20d0c2, + 0x3a3984, + 0x22e2ab02, + 0x22ab03, + 0x23201802, + 0x252843, + 0x217544, + 0x217547, + 0x3c6746, + 0x255e42, + 0x23629942, + 0x384384, + 0x23a30b82, + 0x23e01a42, + 0x337304, + 0x337305, + 0x201a45, + 0x35ab46, + 0x24208742, + 0x208745, + 0x2100c5, + 0x210ac3, + 0x213d06, + 0x214885, + 0x221102, + 0x34db45, + 0x221104, + 0x227ac3, + 0x227d03, + 0x2460ad82, + 0x298507, + 0x33a504, + 0x33a509, + 0x255084, + 0x281903, + 0x35b109, + 0x281908, + 0x24b0cc04, + 0x30cc06, + 0x2a2c83, + 0x20cb03, + 0x30e843, + 0x24eefe82, + 0x375502, + 0x25201402, + 0x32d8c8, + 0x327088, + 0x3a8046, + 0x2544c5, + 0x34f5c5, + 0x31e0c7, + 0x229985, + 0x25cd82, + 0x25694cc2, + 0x1602202, + 0x240a88, + 0x34e285, + 0x27ca84, + 0x2e7205, + 0x241d87, + 0x25efc4, + 0x248942, + 0x25a2dac2, + 0x33e704, + 0x226ec7, + 0x289fc7, + 0x3393c4, + 0x291003, + 0x238bc4, + 0x238bc8, + 0x234106, + 0x2d4a0a, + 0x22b3c4, + 0x291508, + 0x288204, + 0x227546, + 0x294c84, + 0x2ef406, + 0x33a7c9, + 0x26d007, + 0x34e1c3, + 0x25eebfc2, + 0x331203, + 0x207c82, + 0x2625c982, + 0x30cf06, + 0x371e48, + 0x2a44c7, + 0x2f7209, + 0x290ac9, + 0x2a61c5, + 0x2a73c9, + 0x2a7b85, + 0x2a7cc9, + 0x2a9045, + 0x2aa008, + 0x266598c4, + 0x26a598c7, + 0x232583, + 0x2aa207, + 0x232586, + 0x2aa5c7, + 0x2a0f45, + 0x2ca8c3, + 0x26e35c02, + 0x2ea984, + 0x27230bc2, + 0x276552c2, + 0x2f3ac6, + 0x277445, + 0x2acac7, + 0x326403, + 0x32f644, + 0x2130c3, + 0x23b0c3, + 0x27a07d02, + 0x28206202, + 0x37b084, + 0x329943, + 0x24b905, + 0x28603882, + 0x28e00c42, + 0x2e0586, + 0x31c7c4, + 0x385444, + 0x38544a, + 0x29601342, + 0x38e2ca, + 0x39e948, + 0x29a6ff84, + 0x201fc3, + 0x208c43, + 0x3165c9, + 0x267709, + 0x2a6e06, + 0x29e14bc3, + 0x214bc5, + 0x39434d, + 0x39eb06, + 0x20e84b, + 0x2a200802, + 0x220b88, + 0x2ca1a802, + 0x2ce00942, + 0x2c9a85, + 0x2d205842, + 0x21b147, + 0x2b0747, + 0x214a43, + 0x348148, + 0x2d601102, + 0x29f384, + 0x291203, + 0x325545, + 0x395983, + 0x245646, + 0x223504, + 0x2fb943, + 0x2aec03, + 0x2da03202, + 0x2eacc4, + 0x3af385, + 0x2ace07, + 0x277e03, + 0x2ad9c3, + 0x2ae803, + 0x16ae8c2, + 0x2ae8c3, + 0x2aeb83, + 0x2de0b0c2, + 0x39e304, + 0x2507c6, + 0x22a443, + 0x2af343, + 0x2e2b0102, + 0x2b0108, + 0x2b03c4, + 0x2ee8c6, + 0x256947, + 0x3845c6, + 0x2a4f04, + 0x3be01ec2, + 0x23244b, + 0x2ff28e, + 0x219e0f, + 0x2c7b83, + 0x3c65fe82, + 0x1647302, + 0x3ca00a82, + 0x25b4c3, + 0x205983, + 0x2d8746, + 0x2f1946, + 0x3c2147, + 0x2f9084, + 0x3ce193c2, + 0x3d21edc2, + 0x2425c5, + 0x2e44c7, + 0x37fd86, + 0x3d64d542, + 0x30de04, + 0x2b7b43, + 0x3da09602, + 0x3df63443, + 0x2b8444, + 0x2bd289, + 0x16c2482, + 0x3e20dd82, + 0x327e05, + 0x3e6c2702, + 0x3ea00682, + 0x352307, + 0x214fc9, + 0x36754b, + 0x3a2305, + 0x26ad09, + 0x37e806, + 0x20bd47, + 0x3ee074c4, + 0x348c89, + 0x337b07, + 0x224c87, + 0x230803, + 0x2afc46, + 0x30a7c7, + 0x20fbc3, + 0x2f0d46, + 0x3f6038c2, + 0x3fa0e402, + 0x3bec83, + 0x32f245, + 0x332807, + 0x222386, + 0x383f45, + 0x2f3f04, + 0x278f45, + 0x2f2144, + 0x3fe00f82, + 0x341587, + 0x2f2984, + 0x26a444, + 0x34694d, + 0x26a449, + 0x230b08, + 0x25c404, + 0x335ec5, + 0x20a047, + 0x341144, + 0x23cf47, + 0x204cc5, + 0x402a4e44, + 0x30bcc5, + 0x263e44, + 0x390706, + 0x3206c5, + 0x406291c2, + 0x210fc4, + 0x210fc5, + 0x35d8c6, + 0x343b85, + 0x25d144, + 0x3c6103, + 0x20eb46, + 0x22b705, + 0x22f045, + 0x3207c4, + 0x22b443, + 0x22b44c, + 0x40aacf02, + 0x40e0a5c2, + 0x41201542, + 0x20f003, + 0x20f004, + 0x41604482, + 0x30ae88, + 0x340405, + 0x236184, + 0x243686, + 0x41a0e302, + 0x41e1de42, + 0x422000c2, + 0x2b2cc5, + 0x294346, + 0x229304, + 0x357946, + 0x35c206, + 0x222a83, + 0x4272850a, + 0x26b085, + 0x28b003, + 0x228606, + 0x304789, + 0x228607, + 0x292288, + 0x2c7789, + 0x31d348, + 0x250e46, + 0x209703, + 0x42a6f582, + 0x392c08, + 0x42e54ac2, + 0x43201e42, + 0x20be83, + 0x2d8345, + 0x26ba04, + 0x3b6fc9, + 0x2ee004, + 0x21b388, + 0x20dc03, + 0x323004, + 0x2a5fc3, + 0x2192c8, + 0x346887, + 0x43a25242, + 0x290ec2, + 0x31a685, + 0x39cf89, + 0x21f743, + 0x27bfc4, + 0x394304, + 0x20a0c3, + 0x27d04a, + 0x43f7c0c2, + 0x44210f82, + 0x2c3143, + 0x37ea83, + 0x1600082, + 0x200083, + 0x44603282, + 0x44a05a02, + 0x44e1a484, + 0x322046, + 0x2e07c6, + 0x245e44, + 0x277043, + 0x345c03, + 0x2ec1c3, + 0x2452c6, + 0x341d05, + 0x2c32c7, + 0x2c6445, + 0x2c7d86, + 0x2c8708, + 0x2c8906, + 0x24efc4, + 0x29960b, + 0x2cb583, + 0x2cb585, + 0x2cba08, + 0x21a202, + 0x352602, + 0x4524cd02, + 0x4560d682, + 0x219403, + 0x45a6cd82, + 0x26cd83, + 0x2cbd04, + 0x2cc543, + 0x462168c2, + 0x466d0e06, + 0x25e446, + 0x46ad0f42, + 0x46e0ff02, + 0x47227d42, + 0x4763a3c2, + 0x47a1b5c2, + 0x47e047c2, + 0x20dec3, + 0x358645, + 0x2b6306, + 0x48226444, + 0x38ce0a, + 0x3a0546, + 0x21c384, + 0x277943, + 0x48e02f02, + 0x2032c2, + 0x26fb43, + 0x4920ec83, + 0x2e6747, + 0x3205c7, + 0x4aa70107, + 0x393f87, + 0x22cd03, + 0x3176ca, + 0x251304, + 0x397504, + 0x39750a, + 0x247705, + 0x4ae1f682, + 0x258443, + 0x4b202002, + 0x228803, + 0x3311c3, + 0x4ba02742, + 0x26f484, + 0x220704, + 0x2046c5, + 0x3080c5, + 0x34e4c6, + 0x34e846, + 0x4be53982, + 0x4c201382, + 0x2f8545, + 0x25e152, + 0x33f206, + 0x25e8c3, + 0x39d486, + 0x2a1f45, + 0x1604842, + 0x54610c82, + 0x35e3c3, + 0x210c83, + 0x27e483, + 0x54a0c502, + 0x381c43, + 0x54e06e02, + 0x200843, + 0x39e348, + 0x285603, + 0x2a6046, + 0x23ecc7, + 0x30b986, + 0x30b98b, + 0x21c2c7, + 0x2ea784, + 0x55601c82, + 0x340285, + 0x55a09cc3, + 0x292c83, + 0x239b45, + 0x3175c3, + 0x55f175c6, + 0x3580ca, + 0x245ac3, + 0x23e004, + 0x202586, + 0x223e86, + 0x56241d03, + 0x32f507, + 0x2a6d07, + 0x29ae85, + 0x311986, + 0x22b743, + 0x58e13f43, + 0x59206f02, + 0x21a244, + 0x207609, + 0x240887, + 0x229a85, + 0x247d04, + 0x26c7c8, + 0x273b85, + 0x59676405, + 0x284e49, + 0x347a43, + 0x24d584, + 0x59a0b182, + 0x219603, + 0x59e94742, + 0x299986, + 0x162bac2, + 0x5a2a47c2, + 0x2b2bc8, + 0x3a76c3, + 0x30bc07, + 0x2ce245, + 0x2b2785, + 0x2d8e4b, + 0x2d9846, + 0x2d9046, + 0x2dc486, + 0x279f04, + 0x2dc6c6, + 0x5a6f0248, + 0x237cc3, + 0x201f83, + 0x201f84, + 0x2ddbc4, + 0x2dde87, + 0x2df2c5, + 0x5aadf402, + 0x5ae08302, + 0x208305, + 0x2bb184, + 0x2e298b, + 0x2e3708, + 0x298804, + 0x230982, + 0x5b64e882, + 0x24e883, + 0x2e3f04, + 0x2e41c5, + 0x2e4d07, + 0x2e6d44, + 0x21c184, + 0x5ba057c2, + 0x36b449, + 0x2e84c5, + 0x3a25c5, + 0x2e9045, + 0x5be19543, + 0x2e9d04, + 0x2e9d0b, + 0x2ea0c4, + 0x2ea38b, + 0x2ec105, + 0x219f4a, + 0x2ecec8, + 0x2ed0ca, + 0x2ed983, + 0x2ed98a, + 0x5c21fc42, + 0x5c648602, + 0x209943, + 0x5caf1382, + 0x2f1383, + 0x5cf6c182, + 0x5d32c442, + 0x2f1fc4, + 0x21a846, + 0x357685, + 0x2f3d83, + 0x31adc6, + 0x34f085, + 0x250ac4, + 0x5d600382, + 0x2aefc4, + 0x2c358a, + 0x398a07, + 0x3477c6, + 0x24f3c7, + 0x244a03, + 0x2b8488, + 0x3a1f8b, + 0x2bd905, + 0x27c785, + 0x27c786, + 0x2dd704, + 0x3b5288, + 0x21d343, + 0x230004, + 0x230007, + 0x2f4a06, + 0x31fa06, + 0x2e200a, + 0x254844, + 0x31104a, + 0x5db364c6, + 0x3364c7, + 0x25ad87, + 0x273544, + 0x273549, + 0x250485, + 0x31cf4b, + 0x2e1283, + 0x225b43, + 0x5de20b43, + 0x2344c4, + 0x5e200982, + 0x3a3006, + 0x5e6ca645, + 0x39d6c5, + 0x258c46, + 0x29cd44, + 0x5ea07bc2, + 0x247c44, + 0x5ee16f02, + 0x224645, + 0x23f4c4, + 0x228d83, + 0x5f610cc2, + 0x210cc3, + 0x267b86, + 0x5fa00a02, + 0x2073c8, + 0x228484, + 0x228486, + 0x37f306, + 0x25cb84, + 0x20eac5, + 0x21cfc8, + 0x220f87, + 0x2227c7, + 0x2227cf, + 0x28e846, + 0x309bc3, + 0x398184, + 0x233744, + 0x2101c3, + 0x227684, + 0x34fd84, + 0x5fe030c2, + 0x289a43, + 0x372e83, + 0x60207c02, + 0x25c503, + 0x322683, + 0x21578a, + 0x207c07, + 0x2534cc, + 0x253786, + 0x255246, + 0x256647, + 0x60632f47, + 0x25c889, + 0x22c504, + 0x25eb04, + 0x60a09f82, + 0x60e01282, + 0x2e23c6, + 0x32f304, + 0x2d3146, + 0x2333c8, + 0x239444, + 0x21b186, + 0x37c885, + 0x285048, + 0x205883, + 0x28a685, + 0x290cc3, + 0x3a26c3, + 0x3a26c4, + 0x212c03, + 0x6125fd82, + 0x61603a42, + 0x2e1149, + 0x299885, + 0x2a1084, + 0x2a4a45, + 0x212384, + 0x393607, + 0x353f05, + 0x2702c4, + 0x2702c8, + 0x2e61c6, + 0x2e9f84, + 0x2ede88, + 0x2f27c7, + 0x61a04042, + 0x316244, + 0x210284, + 0x224e87, + 0x61e04044, + 0x2c9002, + 0x6220ed42, + 0x221b83, + 0x2d37c4, + 0x29bb43, + 0x2aacc5, + 0x6262e642, + 0x2fddc5, + 0x23a382, + 0x390c85, + 0x372005, + 0x62a04e02, + 0x339b84, + 0x62e06a42, + 0x3aba46, + 0x3a7346, + 0x39d0c8, + 0x2be888, + 0x2f3a44, + 0x303685, + 0x316049, + 0x2eadc4, + 0x358084, + 0x2b6983, + 0x6320fd85, + 0x2c2547, + 0x21fc84, + 0x3ae54d, + 0x2f4682, + 0x3b35c3, + 0x2f4683, + 0x63601b42, + 0x396e45, + 0x223747, + 0x2b9604, + 0x394047, + 0x2c7989, + 0x2c36c9, + 0x275247, + 0x202bc3, + 0x3a7508, + 0x25b949, + 0x2f5487, + 0x2f5805, + 0x2f6706, + 0x2f6d46, + 0x2f6ec5, + 0x26a545, + 0x63a00d42, + 0x2b7685, + 0x2b3a08, + 0x2c08c6, + 0x63e872c7, + 0x2ec344, + 0x2b8047, + 0x2f9206, + 0x6420a402, + 0x35d5c6, + 0x2fc9ca, + 0x2fd245, + 0x646da942, + 0x64a4eb02, + 0x30ab06, + 0x386548, + 0x64e8a187, + 0x6523c902, + 0x215c43, + 0x20c246, + 0x229144, + 0x3b2f86, + 0x201746, + 0x34290a, + 0x325e45, + 0x3559c6, + 0x39ec43, + 0x39ec44, + 0x202c02, + 0x31c743, + 0x6560f042, + 0x30b743, + 0x38e544, + 0x2b2484, + 0x38668a, + 0x214c43, + 0x277208, + 0x250f0a, + 0x23f747, + 0x300986, + 0x260484, + 0x21c242, + 0x2a3702, + 0x65a02982, + 0x238b83, + 0x25ab47, + 0x202987, + 0x284d04, + 0x3a4f87, + 0x2e4e06, + 0x221287, + 0x303304, + 0x399d85, + 0x292705, + 0x65e1b2c2, + 0x3c50c6, + 0x223443, + 0x22a4c2, + 0x22a4c6, + 0x66222342, + 0x6660e982, + 0x3bb945, + 0x66a27882, + 0x66e01c42, + 0x334e85, + 0x2c51c5, + 0x355a85, + 0x289043, + 0x244185, + 0x2d9907, + 0x2feb45, + 0x370005, + 0x33d8c4, + 0x310806, + 0x381d44, + 0x67202a82, + 0x67ee7585, + 0x2a3ac7, + 0x34f408, + 0x261146, + 0x26114d, + 0x2674c9, + 0x2674d2, + 0x300585, + 0x309f43, + 0x6820c202, + 0x30ee44, + 0x39eb83, + 0x33b0c5, + 0x2fdf05, + 0x68630882, + 0x25f803, + 0x68a51b02, + 0x692d6142, + 0x69602242, + 0x2a1d45, + 0x394183, + 0x3c4f08, + 0x69a0ad42, + 0x69e0c842, + 0x26f446, + 0x317c4a, + 0x20e043, + 0x25d0c3, + 0x337d03, + 0x6aa04182, + 0x78e0c542, + 0x79600d82, + 0x206d02, + 0x35d3c9, + 0x2c18c4, + 0x2a9348, + 0x79af3dc2, + 0x79e01ac2, + 0x2ea5c5, + 0x238788, + 0x39e488, + 0x268b8c, + 0x23f683, + 0x7a263802, + 0x7a611d82, + 0x270d46, + 0x301805, + 0x2787c3, + 0x253c06, + 0x301946, + 0x29bc83, + 0x303b03, + 0x303f46, + 0x304b84, + 0x239a46, + 0x214a05, + 0x214a0a, + 0x24c1c4, + 0x305244, + 0x305b8a, + 0x7aa04982, + 0x24c345, + 0x30798a, + 0x308305, + 0x308bc4, + 0x308cc6, + 0x308e44, + 0x2198c6, + 0x7ae308c2, + 0x2f3446, + 0x341ac5, + 0x325cc7, + 0x3adf46, + 0x256844, + 0x2d2387, + 0x328446, + 0x241a05, + 0x241a07, + 0x3aed07, + 0x3aed0e, + 0x2ebb06, + 0x23ce05, + 0x203f87, + 0x20ff43, + 0x20ff47, + 0x217945, + 0x22f484, + 0x22f5c2, + 0x246087, + 0x2f9104, + 0x244dc4, + 0x290d4b, + 0x220003, + 0x2e58c7, + 0x220004, + 0x2e6047, + 0x2903c3, + 0x33ca0d, + 0x3998c8, + 0x2297c4, + 0x2701c5, + 0x30b205, + 0x30b643, + 0x7b228382, + 0x30d5c3, + 0x30da83, + 0x321c04, + 0x27ab85, + 0x3c5247, + 0x39ecc6, + 0x37c1c3, + 0x22a60b, + 0x30e04b, + 0x2a5c8b, + 0x2fa5cb, + 0x2bd60a, + 0x30548b, + 0x3245cb, + 0x360a8c, + 0x384f4b, + 0x3c4351, + 0x3c5d4a, + 0x30f5cb, + 0x30f88c, + 0x30fb8b, + 0x31010a, + 0x311bca, + 0x312bce, + 0x31324b, + 0x31350a, + 0x3145d1, + 0x314a0a, + 0x314f0b, + 0x31544e, + 0x315d8c, + 0x316b8b, + 0x316e4e, + 0x3171cc, + 0x318d4a, + 0x31a04c, + 0x7b71a34a, + 0x31af48, + 0x31ba49, + 0x32368a, + 0x32390a, + 0x323b8b, + 0x328b4e, + 0x328ed1, + 0x330349, + 0x33058a, + 0x330bcb, + 0x332a4a, + 0x333296, + 0x334b8b, + 0x33784a, + 0x33818a, + 0x33908b, + 0x33ab49, + 0x33d389, + 0x33de0d, + 0x33e48b, + 0x33f38b, + 0x33fd4b, + 0x343d49, + 0x34438e, + 0x34500a, + 0x34a4ca, + 0x34a7ca, + 0x34afcb, + 0x34b80b, + 0x34bacd, + 0x34d18d, + 0x34d7d0, + 0x34dc8b, + 0x34f9cc, + 0x34fe8b, + 0x351e0b, + 0x35344e, + 0x353a0b, + 0x353a0d, + 0x35964b, + 0x35a0cf, + 0x35a48b, + 0x35acca, + 0x35b3c9, + 0x35ba89, + 0x35cd4b, + 0x35d00e, + 0x35e88b, + 0x35f64f, + 0x36160b, + 0x3618cb, + 0x361b8b, + 0x36238a, + 0x367149, + 0x36a18f, + 0x36f54c, + 0x37038c, + 0x37108e, + 0x37158f, + 0x37194e, + 0x3722d0, + 0x3726cf, + 0x3731ce, + 0x373f8c, + 0x374292, + 0x375211, + 0x375a0e, + 0x375e8e, + 0x3763ce, + 0x37674f, + 0x376b0e, + 0x376e93, + 0x377351, + 0x37778c, + 0x377a8e, + 0x377f0c, + 0x378513, + 0x378ed0, + 0x37970c, + 0x379a0c, + 0x379ecb, + 0x37ac8e, + 0x37b18b, + 0x37b5cb, + 0x37ca4c, + 0x3825ca, + 0x38474c, + 0x384a4c, + 0x384d49, + 0x387e0b, + 0x3880c8, + 0x388889, + 0x38888f, + 0x38a08b, + 0x7bb8afca, + 0x38e8cc, + 0x38fa89, + 0x390a48, + 0x39100b, + 0x39158b, + 0x39220a, + 0x39248b, + 0x39298c, + 0x393d48, + 0x39a40b, + 0x39d80b, + 0x3a114e, + 0x3a27cb, + 0x3a410b, + 0x3ae88b, + 0x3aeb49, + 0x3af08d, + 0x3b368a, + 0x3b45d7, + 0x3b5cd8, + 0x3b9749, + 0x3bb58b, + 0x3bc1d4, + 0x3bc6cb, + 0x3bcc4a, + 0x3bd14a, + 0x3bd3cb, + 0x3bf610, + 0x3bfa11, + 0x3c00ca, + 0x3c394d, + 0x3c404d, + 0x3c61cb, + 0x3c6a06, + 0x3c51c3, + 0x7bf74a03, + 0x2dd1c6, + 0x245a05, + 0x252087, + 0x324486, + 0x1601182, + 0x2cbe89, + 0x31abc4, + 0x2d8988, + 0x220a83, + 0x30ed87, + 0x201c02, + 0x2acb03, + 0x7c200dc2, + 0x2c4946, + 0x2c5c84, + 0x21a604, + 0x349a43, + 0x349a45, + 0x7cac2742, + 0x7cea8044, + 0x273487, + 0x7d22f442, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x20ae43, + 0x200742, + 0xcd588, + 0x202c42, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x207c03, + 0x32eb56, + 0x356d13, + 0x3a4e09, + 0x38c988, + 0x340109, + 0x307b06, + 0x33e750, + 0x248c93, + 0x2f4ac8, + 0x2a5687, + 0x2b6f87, + 0x278c8a, + 0x38e5c9, + 0x342549, + 0x28b30b, + 0x31ca86, + 0x20850a, + 0x225f06, + 0x31a7c3, + 0x298445, + 0x207808, + 0x3abb0d, + 0x2ef1cc, + 0x35cac7, + 0x312f0d, + 0x21d0c4, + 0x23478a, + 0x23598a, + 0x235e4a, + 0x21fa07, + 0x243507, + 0x245fc4, + 0x27b986, + 0x3264c4, + 0x2e01c8, + 0x2ee049, + 0x2bfcc6, + 0x2bfcc8, + 0x24944d, + 0x2c3909, + 0x389ac8, + 0x3a2547, + 0x2f1c0a, + 0x2564c6, + 0x260fc7, + 0x306a04, + 0x214707, + 0x3105ca, + 0x378a0e, + 0x229985, + 0x3bfe0b, + 0x300389, + 0x267709, + 0x2b0587, + 0x3694ca, + 0x224dc7, + 0x2ff3c9, + 0x31e5c8, + 0x239e8b, + 0x2d8345, + 0x2309ca, + 0x227b09, + 0x3abe0a, + 0x2c64cb, + 0x21460b, + 0x28b095, + 0x306745, + 0x3a25c5, + 0x2e9d0a, + 0x2a6f0a, + 0x300107, + 0x2388c3, + 0x2e2348, + 0x2cf00a, + 0x228486, + 0x25b789, + 0x285048, + 0x2e9f84, + 0x29bb49, + 0x2be888, + 0x36c607, + 0x2e7586, + 0x2a3ac7, + 0x2ac6c7, + 0x2450c5, + 0x2297cc, + 0x2701c5, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x20be03, + 0x20ec83, + 0xae43, + 0x285603, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0xcd588, + 0x202c42, + 0x209d42, + 0x236082, + 0x201102, + 0x2013c2, + 0x2db482, + 0x460be03, + 0x237583, + 0x203d43, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x2d1206, + 0x20ec83, + 0x241d03, + 0x238843, + 0xcd588, + 0x323584, + 0x322dc7, + 0x34a403, + 0x2402c4, + 0x21b903, + 0x283cc3, + 0x30e843, + 0x15da87, + 0x1221c4, + 0x121b83, + 0xf45, + 0x200742, + 0xb6c03, + 0x5a02c42, + 0x1488d09, + 0x891cd, + 0x8950d, + 0x236082, + 0x6ff84, + 0xf89, + 0x200342, + 0x5f8d588, + 0xe9484, + 0xcd588, + 0x1426502, + 0x1508546, + 0x233603, + 0x2b8283, + 0x660be03, + 0x234784, + 0x6a37583, + 0x6f0e843, + 0x207d02, + 0x26ff84, + 0x20ec83, + 0x2fbbc3, + 0x2056c2, + 0x241d03, + 0x21c4c2, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0xcd588, + 0x233603, + 0x2fbbc3, + 0x2056c2, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0x20be03, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x204bc2, + 0x219543, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x2f5805, + 0x230882, + 0x200742, + 0xcd588, + 0x1455908, + 0x1367ca, + 0x30e843, + 0x200001, + 0x202081, + 0x200ec1, + 0x200f01, + 0x200f41, + 0x20d701, + 0x312181, + 0x203801, + 0x24b241, + 0x2021c1, + 0x200101, + 0x200301, + 0x117485, + 0xcd588, + 0x200781, + 0x2014c1, + 0x200041, + 0x200141, + 0x201401, + 0x200901, + 0x200541, + 0x200c01, + 0x200a81, + 0x200641, + 0x200081, + 0x2001c1, + 0x200341, + 0x201681, + 0x20ab41, + 0x2002c1, + 0x200a01, + 0x200401, + 0x200441, + 0x201ac1, + 0x203f81, + 0x20d601, + 0x201181, + 0x200dc1, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x200342, + 0x241d03, + 0x15da87, + 0x1f847, + 0x29546, + 0x4160a, + 0x88348, + 0x5a588, + 0x5aa47, + 0x86, + 0xd61c5, + 0x14a345, + 0x7dac6, + 0x157206, + 0x28b304, + 0x340547, + 0xcd588, + 0x2d2484, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x31a548, + 0x31e084, + 0x2374c4, + 0x22aa44, + 0x270c47, + 0x2cde07, + 0x20be03, + 0x23d74b, + 0x31b7ca, + 0x31cd47, + 0x2fc048, + 0x3255c8, + 0x237583, + 0x346c47, + 0x203d43, + 0x37c208, + 0x335049, + 0x26ff84, + 0x214bc3, + 0x2dce48, + 0x21f743, + 0x2cb6ca, + 0x2d1206, + 0x3a0547, + 0x20ec83, + 0x2da606, + 0x309308, + 0x241d03, + 0x28d806, + 0x2e394d, + 0x2e49c8, + 0x2ea0cb, + 0x331e86, + 0x348087, + 0x20f605, + 0x2ef98a, + 0x22bfc5, + 0x36210a, + 0x230882, + 0x203f83, + 0x244dc4, + 0x2021c6, + 0x3a7a03, + 0x2af043, + 0x24be43, + 0x23b003, + 0x349183, + 0x200582, + 0x2d7285, + 0x2a6589, + 0x245743, + 0x205583, + 0x202fc3, + 0x200301, + 0x2a1a85, + 0x39da83, + 0x2053c3, + 0x22aa44, + 0x326443, + 0x214948, + 0x2ec443, + 0x302e8d, + 0x2ebbc8, + 0x21ab46, + 0x31c783, + 0x378983, + 0x381cc3, + 0xaa0be03, + 0x236dc8, + 0x23d744, + 0x246d03, + 0x2022c6, + 0x249bc8, + 0x202e03, + 0x2ef9c3, + 0x2319c3, + 0x237583, + 0x21d8c3, + 0x21e903, + 0x21a303, + 0x31c703, + 0x2b25c3, + 0x225783, + 0x370645, + 0x256c04, + 0x258107, + 0x329982, + 0x25a303, + 0x25d486, + 0x25ed03, + 0x25f3c3, + 0x276543, + 0x202043, + 0x323283, + 0x269687, + 0xaf0e843, + 0x2363c3, + 0x2096c3, + 0x204d03, + 0x26ff83, + 0x2f3783, + 0x374ac5, + 0x363fc3, + 0x246889, + 0x20b0c3, + 0x2fe203, + 0xb2527c3, + 0x286d03, + 0x21cd08, + 0x2a64c6, + 0x200706, + 0x29aa46, + 0x27a5c7, + 0x200c83, + 0x20be83, + 0x21f743, + 0x288446, + 0x21a202, + 0x29ea43, + 0x32dd05, + 0x20ec83, + 0x2a2e47, + 0x160ae43, + 0x24e483, + 0x21fa83, + 0x225e03, + 0x241d03, + 0x212e46, + 0x31d286, + 0x36aa43, + 0x22ba83, + 0x219543, + 0x253743, + 0x303b83, + 0x2f0603, + 0x2f20c3, + 0x34f085, + 0x24f3c3, + 0x2d3246, + 0x23eb08, + 0x225b43, + 0x341789, + 0x33a308, + 0x2110c8, + 0x21a185, + 0x32a38a, + 0x35400a, + 0x37cd8b, + 0x37d408, + 0x2fb903, + 0x2f2103, + 0x33b1c3, + 0x366d88, + 0x2f4e83, + 0x39ec44, + 0x261983, + 0x202983, + 0x22d483, + 0x26fcc3, + 0x238843, + 0x230882, + 0x22d0c3, + 0x23f683, + 0x305403, + 0x3065c4, + 0x244dc4, + 0x3be143, + 0xcd588, + 0x200742, + 0x200602, + 0x200582, + 0x203402, + 0x2023c2, + 0x200782, + 0x238c02, + 0x201b02, + 0x202542, + 0x2000c2, + 0x225242, + 0x20d682, + 0x26cd82, + 0x206f02, + 0x2db482, + 0x20b182, + 0x201f82, + 0x2057c2, + 0x2f5f42, + 0x208102, + 0x200982, + 0x219e82, + 0x207bc2, + 0x207c02, + 0x201282, + 0x20fd82, + 0x201c42, + 0x742, + 0x602, + 0x582, + 0x3402, + 0x23c2, + 0x782, + 0x38c02, + 0x1b02, + 0x2542, + 0xc2, + 0x25242, + 0xd682, + 0x6cd82, + 0x6f02, + 0xdb482, + 0xb182, + 0x1f82, + 0x57c2, + 0xf5f42, + 0x8102, + 0x982, + 0x19e82, + 0x7bc2, + 0x7c02, + 0x1282, + 0xfd82, + 0x1c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3f82, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x241d03, + 0xc60be03, + 0x30e843, + 0x21f743, + 0xaff03, + 0x223b82, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0xaff03, + 0x241d03, + 0xdc2, + 0x142f49, + 0x202382, + 0x15bda05, + 0x2eaa02, + 0xcd588, + 0x2c42, + 0x23bfc2, + 0x200482, + 0x244482, + 0x21f682, + 0x253982, + 0x14a345, + 0x203082, + 0x2056c2, + 0x20c502, + 0x203042, + 0x20b182, + 0x392a82, + 0x20ed42, + 0x24eb42, + 0x15da87, + 0x120a8d, + 0xd6249, + 0x6898b, + 0xd97c8, + 0x60b89, + 0xfeec6, + 0x30e843, + 0xcd588, + 0x1221c4, + 0x121b83, + 0xf45, + 0xcd588, + 0x5b646, + 0xf89, + 0xab07, + 0x200742, + 0x28b304, + 0x202c42, + 0x20be03, + 0x209d42, + 0x237583, + 0x202542, + 0x2d2484, + 0x214bc3, + 0x254ac2, + 0x20ec83, + 0x200342, + 0x241d03, + 0x3a25c6, + 0x32414f, + 0x70ec03, + 0xcd588, + 0x202c42, + 0x203d43, + 0x30e843, + 0x21f743, + 0xae43, + 0x14ef74b, + 0x141650a, + 0x14eca47, + 0x78d4b, + 0xd7e45, + 0x15da87, + 0x202c42, + 0x20be03, + 0x30e843, + 0x20ec83, + 0x200742, + 0x211a42, + 0x209342, + 0xfe0be03, + 0x2442c2, + 0x237583, + 0x226d02, + 0x22ab02, + 0x30e843, + 0x25cd82, + 0x251942, + 0x2a8002, + 0x211742, + 0x28d302, + 0x2029c2, + 0x200902, + 0x2ebfc2, + 0x278142, + 0x25c982, + 0x2ad9c2, + 0x2fcdc2, + 0x223482, + 0x23d082, + 0x21f743, + 0x205a02, + 0x20ec83, + 0x211e82, + 0x2c9fc2, + 0x241d03, + 0x2457c2, + 0x207c02, + 0x209f82, + 0x203a42, + 0x204e02, + 0x2da942, + 0x21b2c2, + 0x251b02, + 0x2234c2, + 0x31350a, + 0x35acca, + 0x38bf0a, + 0x3c6b82, + 0x20f2c2, + 0x374a82, + 0x103358c9, + 0x1072f70a, + 0x14328c7, + 0x10a03fc2, + 0x1410983, + 0x3342, + 0x12f70a, + 0x253404, + 0x1120be03, + 0x237583, + 0x254a04, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x1aec5, + 0x20ae43, + 0x241d03, + 0x24f3c3, + 0x203f83, + 0xcd588, + 0x1400004, + 0x149845, + 0x142f49, + 0xa8ca, + 0x119fc2, + 0x19cbc6, + 0x187251, + 0x11b358c9, + 0x1498c8, + 0x1c1948, + 0x1fbc7, + 0x282, + 0x11748b, + 0x18c40a, + 0x844a, + 0x2aa47, + 0xcd588, + 0x10c788, + 0xd547, + 0x18419a4b, + 0x1c787, + 0x4c2, + 0x5e87, + 0x23a8a, + 0x1f8cf, + 0x8308f, + 0xefa02, + 0x2c42, + 0x173908, + 0xf698a, + 0x12b48, + 0x5fcc8, + 0xd3708, + 0x2e02, + 0x1bda8f, + 0x9dc8b, + 0x7e948, + 0x3c2c7, + 0x127c0a, + 0xf400b, + 0x78449, + 0x127b07, + 0x12a48, + 0x1541cc, + 0x3a347, + 0x17a28a, + 0x67008, + 0xf6f8e, + 0x2954e, + 0x2a88b, + 0x2e28b, + 0x30e8b, + 0x50b89, + 0xe32cb, + 0xeb5cd, + 0x17d18b, + 0x198c8d, + 0x19900d, + 0x3cc4a, + 0x44c0b, + 0x4638b, + 0x49ec5, + 0x18828e50, + 0x15770f, + 0x3b4cf, + 0xfb1cd, + 0x39610, + 0xa6c2, + 0x18e071c8, + 0x1f6c8, + 0x192ec405, + 0x5400b, + 0x12e350, + 0x59c88, + 0x12c4a, + 0x2e449, + 0x66007, + 0x66347, + 0x66507, + 0x66887, + 0x67347, + 0x67947, + 0x68187, + 0x68547, + 0x68e87, + 0x69187, + 0x69847, + 0x69a07, + 0x69bc7, + 0x69d87, + 0x6a087, + 0x6a687, + 0x6af47, + 0x6b707, + 0x6bcc7, + 0x6bf87, + 0x6c147, + 0x6c447, + 0x6cc47, + 0x6ce47, + 0x6dd87, + 0x6df47, + 0x6e107, + 0x6ebc7, + 0x6f0c7, + 0x6fd87, + 0x70687, + 0x71147, + 0x71647, + 0x71807, + 0x71c07, + 0x72447, + 0x726c7, + 0x72ac7, + 0x72c87, + 0x72e47, + 0x73287, + 0x73e87, + 0x743c7, + 0x74947, + 0x74b07, + 0x74e87, + 0x75407, + 0xd0c2, + 0x5fdca, + 0xdc547, + 0x84785, + 0xb3111, + 0x10ac6, + 0x10cc0a, + 0x17378a, + 0x5b646, + 0xcb0b, + 0x1402, + 0x34111, + 0xb29c9, + 0x948c9, + 0xebfc2, + 0x71e8a, + 0xa5a89, + 0xa61cf, + 0xa67ce, + 0xa7708, + 0x552c2, + 0x549, + 0x18b4ce, + 0xfc6cc, + 0xdbe0f, + 0x1a814e, + 0x1840c, + 0x25589, + 0x26751, + 0x2f988, + 0x1109d2, + 0x1115cd, + 0x1545cd, + 0x43f8b, + 0x4bad5, + 0x52c49, + 0x5438a, + 0x5ee89, + 0x6b310, + 0x7cc8b, + 0x85ecf, + 0xf0c0b, + 0x16130c, + 0x1b2610, + 0x9208a, + 0x9e90d, + 0x9fc4e, + 0xa9bca, + 0xab6cc, + 0xac394, + 0xb2651, + 0xbb04b, + 0xe1ecf, + 0xca50d, + 0x1a720e, + 0x16c4cc, + 0x18618c, + 0xb234b, + 0xb428e, + 0xb4d50, + 0xb584b, + 0xbaa8d, + 0xbb4cf, + 0xbef4c, + 0xbfb4e, + 0xc0411, + 0xdff4c, + 0x10d8c7, + 0xc738d, + 0xd000c, + 0xd65d0, + 0xdb80d, + 0x18acc7, + 0xe6310, + 0xf9348, + 0xfd44b, + 0x17d9cf, + 0x142188, + 0x10ce0d, + 0x190c10, + 0xf5f89, + 0x196af346, + 0xb0303, + 0xb5b05, + 0x9602, + 0x143709, + 0x5c40a, + 0x106606, + 0x2098a, + 0x1991f309, + 0x264c3, + 0xd2711, + 0xd2b49, + 0xd3ec7, + 0x1873cb, + 0xdae90, + 0xdb34c, + 0xdc2c8, + 0xdcc45, + 0x11e748, + 0x1afe8a, + 0x26587, + 0x140947, + 0x1382, + 0x12f04a, + 0x3b809, + 0x71505, + 0xa2cca, + 0x8a0cf, + 0x4794b, + 0x174b8c, + 0x1a252, + 0x9df05, + 0xdf0c8, + 0x13a60a, + 0x19ee8f05, + 0x17478c, + 0x12c443, + 0x192a82, + 0xf258a, + 0x14f2d8c, + 0x3a6c8, + 0x198e48, + 0x15da07, + 0x16f02, + 0xa02, + 0x49fd0, + 0x653c7, + 0x1282, + 0x333cf, + 0x7dac6, + 0x79a8e, + 0xdeb8b, + 0x6e308, + 0xa9dc9, + 0xf5012, + 0x18998d, + 0x1be608, + 0x68849, + 0x6a20d, + 0x6c5c9, + 0x6c98b, + 0x6e4c8, + 0x73c88, + 0x76248, + 0x79dc9, + 0x79fca, + 0x7b48c, + 0x17010a, + 0x103bc7, + 0x2fdcd, + 0xf7a8b, + 0x11a9cc, + 0x1979c8, + 0x4d3c9, + 0x13d8d0, + 0xc842, + 0x521cd, + 0x4182, + 0xc542, + 0x103b0a, + 0x10cb0a, + 0x10ec8b, + 0x4654c, + 0x10c28a, + 0x10c50e, + 0x121ccd, + 0xb6a08, + 0xdc2, + 0x11e0340e, + 0x1272184e, + 0x12f4960a, + 0x13742c0e, + 0x13f374ce, + 0x147ac40c, + 0x14328c7, + 0x14328c9, + 0x1410983, + 0x14eb784c, + 0x15727309, + 0x15f69bc9, + 0x1660a6c9, + 0x3342, + 0x3351, + 0x121791, + 0x14954d, + 0x142b51, + 0x137411, + 0x1ac34f, + 0xb778f, + 0x12724c, + 0x169b0c, + 0xa60c, + 0x1654cd, + 0x10e595, + 0x5a00c, + 0x1ba48c, + 0x138c90, + 0x155e8c, + 0x15dc0c, + 0x17a659, + 0x180a19, + 0x19f3d9, + 0x1b57d4, + 0x1bbcd4, + 0x3ed4, + 0x4ed4, + 0xb814, + 0x16e5a0c9, + 0x17404189, + 0x17fba549, + 0x1222fb89, + 0x3342, + 0x12a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1322fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x13a2fb89, + 0x3342, + 0x1422fb89, + 0x3342, + 0x14a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1522fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x15a2fb89, + 0x3342, + 0x1622fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x16a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1722fb89, + 0x3342, + 0x17a2fb89, + 0x3342, + 0x1822fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x187245, + 0x18c404, + 0x340e, + 0x12184e, + 0x14960a, + 0x142c0e, + 0x1374ce, + 0x1ac40c, + 0xb784c, + 0x127309, + 0x169bc9, + 0xa6c9, + 0x5a0c9, + 0x4189, + 0x1ba549, + 0x10e78d, + 0x5189, + 0xbac9, + 0x116a84, + 0x118c44, + 0x13aa44, + 0x18e7c4, + 0x79004, + 0x98884, + 0x477c4, + 0x143c44, + 0x1fbc4, + 0x157cd03, + 0xa6c2, + 0x121cc3, + 0x2e02, + 0x200742, + 0x202c42, + 0x209d42, + 0x208882, + 0x202542, + 0x200342, + 0x200a02, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff83, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0x1a9c3, + 0x30e843, + 0x6ff84, + 0x200742, + 0x2b6c03, + 0x1be0be03, + 0x2394c7, + 0x30e843, + 0x20f003, + 0x226444, + 0x20ec83, + 0x241d03, + 0x22d50a, + 0x3a25c5, + 0x219543, + 0x20e982, + 0xcd588, + 0xcd588, + 0x2c42, + 0x129242, + 0x1c74660b, + 0x5fc5, + 0x1f8c5, + 0xf9fc6, + 0x1221c4, + 0x121b83, + 0xf45, + 0x117485, + 0xcd588, + 0x1c787, + 0xbe03, + 0x1ce41447, + 0x143146, + 0x1d149445, + 0x143207, + 0xf84a, + 0xf708, + 0x13407, + 0x68348, + 0x98647, + 0xf28f, + 0x47f87, + 0x4e786, + 0x12e350, + 0x12cf0f, + 0x1c009, + 0x106684, + 0x1d5432ce, + 0xa978c, + 0xf420a, + 0x785c7, + 0xd9f8a, + 0x11e909, + 0xada0c, + 0x1bdf0a, + 0x5cc0a, + 0xf89, + 0x106606, + 0x7868a, + 0x11d84a, + 0x9a209, + 0xd1fc8, + 0xd22c6, + 0xd6c0d, + 0xb7cc5, + 0xab07, + 0xfb709, + 0x1a3207, + 0x10bd94, + 0xfdb4b, + 0x7e78a, + 0xa358d, + 0xf283, + 0xf283, + 0x29546, + 0xf283, + 0xb6c03, + 0xcd588, + 0x2c42, + 0x54a04, + 0x5da83, + 0xf5805, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x205583, + 0x20be03, + 0x237583, + 0x203d43, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x294a83, + 0x203f83, + 0x205583, + 0x28b304, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x235cc3, + 0x20be03, + 0x237583, + 0x208883, + 0x203d43, + 0x30e843, + 0x26ff84, + 0x3c32c3, + 0x20be83, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x219543, + 0x20c283, + 0x1f20be03, + 0x237583, + 0x24e683, + 0x30e843, + 0x211343, + 0x20be83, + 0x241d03, + 0x2057c3, + 0x317f04, + 0xcd588, + 0x1fa0be03, + 0x237583, + 0x2a77c3, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x232f43, + 0xcd588, + 0x2020be03, + 0x237583, + 0x203d43, + 0x20ae43, + 0x241d03, + 0xcd588, + 0x14328c7, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x226444, + 0x20ec83, + 0x241d03, + 0x142f49, + 0x117485, + 0x15da87, + 0x10bfcb, + 0xd2f44, + 0xb7cc5, + 0x1455908, + 0xa7e0d, + 0x21676405, + 0x8f204, + 0x10ec3, + 0xf5e85, + 0x31cc45, + 0xcd588, + 0xf282, + 0x3a283, + 0xefec6, + 0x31b0c8, + 0x397247, + 0x28b304, + 0x346046, + 0x3699c6, + 0xcd588, + 0x312ec3, + 0x23aec9, + 0x265555, + 0x6555f, + 0x20be03, + 0x3ba052, + 0x10db06, + 0x14fc85, + 0x12c4a, + 0x2e449, + 0x3b9e0f, + 0x2d2484, + 0x225285, + 0x2fdfd0, + 0x38cb87, + 0x20ae43, + 0x310f08, + 0x157146, + 0x2a47ca, + 0x22d244, + 0x2e8943, + 0x3a25c6, + 0x20e982, + 0x3987cb, + 0xae43, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2f0ec3, + 0x202c42, + 0xee203, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20f003, + 0x227b03, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x20ec83, + 0xae43, + 0x241d03, + 0x200742, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x1f8c5, + 0x28b304, + 0x20be03, + 0x237583, + 0x21a484, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x20be03, + 0x237583, + 0x203d43, + 0x204d03, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x357d43, + 0x3cf83, + 0xf003, + 0x20ec83, + 0x241d03, + 0x31350a, + 0x333049, + 0x3524cb, + 0x352b4a, + 0x35acca, + 0x3686cb, + 0x37bfca, + 0x3825ca, + 0x38bf0a, + 0x38c18b, + 0x3afbc9, + 0x3b1a0a, + 0x3b1d8b, + 0x3bc98b, + 0x3c5b0a, + 0x20be03, + 0x237583, + 0x203d43, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x18754b, + 0x60308, + 0x14f209, + 0xcd588, + 0x20be03, + 0x266004, + 0x206302, + 0x226444, + 0x201485, + 0x205583, + 0x28b304, + 0x20be03, + 0x23d744, + 0x237583, + 0x254a04, + 0x2d2484, + 0x26ff84, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x252385, + 0x235cc3, + 0x219543, + 0x2b5d83, + 0x2702c4, + 0x2020c4, + 0x3c0885, + 0xcd588, + 0x320f04, + 0x39db46, + 0x2010c4, + 0x202c42, + 0x38fd87, + 0x256087, + 0x252944, + 0x25e5c5, + 0x2dba05, + 0x232585, + 0x26ff84, + 0x27a688, + 0x23c806, + 0x3c5f88, + 0x278185, + 0x2d8345, + 0x251304, + 0x241d03, + 0x2e9484, + 0x367486, + 0x3a26c3, + 0x2702c4, + 0x362205, + 0x26e984, + 0x23fd84, + 0x20e982, + 0x397746, + 0x3a4b06, + 0x301805, + 0x200742, + 0x2b6c03, + 0x27e02c42, + 0x207344, + 0x202542, + 0x21f743, + 0x23a3c2, + 0x20ec83, + 0x200342, + 0x207c03, + 0x203f83, + 0xcd588, + 0xcd588, + 0x30e843, + 0x200742, + 0x28a02c42, + 0x30e843, + 0x2574c3, + 0x3c32c3, + 0x2168c4, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x200742, + 0x29202c42, + 0x20be03, + 0x20ec83, + 0xae43, + 0x241d03, + 0x982, + 0x20c202, + 0x230882, + 0x20f003, + 0x2e2d83, + 0x200742, + 0x117485, + 0xcd588, + 0x15da87, + 0x202c42, + 0x237583, + 0x254a04, + 0x206c03, + 0x30e843, + 0x204d03, + 0x21f743, + 0x20ec83, + 0x207783, + 0x241d03, + 0x2388c3, + 0xb5cd3, + 0x1b9994, + 0x15da87, + 0x102dc6, + 0x5c60b, + 0x29546, + 0x5a3c7, + 0x2809, + 0x195d4a, + 0x8820d, + 0x12078c, + 0x104fca, + 0x14a345, + 0xf888, + 0x7dac6, + 0x6ff06, + 0x157206, + 0x20a6c2, + 0x1c170c, + 0x18c5c7, + 0x282d1, + 0x20be03, + 0x682c5, + 0x8808, + 0x22644, + 0x2a507646, + 0xb3106, + 0xd95c6, + 0x8d5ca, + 0x19dac3, + 0x2aa48c44, + 0x27c5, + 0x15cc83, + 0x2ae38a07, + 0x1aec5, + 0xcbcc, + 0xed348, + 0x6f6cb, + 0x2b25168c, + 0x140d6c3, + 0xb8888, + 0x9db09, + 0x3ff48, + 0x14208c6, + 0x2b76d609, + 0xd7e4a, + 0x10d08, + 0xf9fc8, + 0x1fbc4, + 0x118b45, + 0x6f807, + 0x2ba6f803, + 0x2bf39c86, + 0x2c2e9d04, + 0x2c790207, + 0xf9fc4, + 0xf9fc4, + 0xf9fc4, + 0xf9fc4, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x200742, + 0x202c42, + 0x30e843, + 0x207d02, + 0x20ec83, + 0x241d03, + 0x207c03, + 0x37158f, + 0x37194e, + 0xcd588, + 0x20be03, + 0x49a07, + 0x237583, + 0x30e843, + 0x214bc3, + 0x20ec83, + 0x241d03, + 0x220443, + 0x322887, + 0x203d02, + 0x292889, + 0x200602, + 0x24a2cb, + 0x2cf44a, + 0x28d009, + 0x200182, + 0x3418c6, + 0x235295, + 0x24a415, + 0x236793, + 0x24a993, + 0x203942, + 0x222dc5, + 0x3ab48c, + 0x27410b, + 0x2a2205, + 0x203402, + 0x2f2542, + 0x37e706, + 0x200282, + 0x261bc6, + 0x212ecd, + 0x21ac4c, + 0x228ec4, + 0x200cc2, + 0x2149c2, + 0x310d88, + 0x2023c2, + 0x211446, + 0x35c704, + 0x235455, + 0x236913, + 0x2108c3, + 0x32508a, + 0x20df47, + 0x30eec9, + 0x2d9d07, + 0x314902, + 0x200882, + 0x3b4b46, + 0x2099c2, + 0xcd588, + 0x210702, + 0x200302, + 0x217a07, + 0x336087, + 0x21c485, + 0x2004c2, + 0x2da6c7, + 0x220488, + 0x204002, + 0x2f21c2, + 0x230502, + 0x203cc2, + 0x23e988, + 0x20bf83, + 0x25dc48, + 0x20bf8d, + 0x237c03, + 0x23bc48, + 0x237c0f, + 0x237fce, + 0x38feca, + 0x2d1311, + 0x2d1790, + 0x38360d, + 0x38394c, + 0x3452c7, + 0x325207, + 0x346109, + 0x228fc2, + 0x200782, + 0x25becc, + 0x25c1cb, + 0x203c02, + 0x2b2506, + 0x2010c2, + 0x202642, + 0x2efa02, + 0x202c42, + 0x231fc4, + 0x240647, + 0x230a42, + 0x245207, + 0x2475c7, + 0x21bc02, + 0x21b282, + 0x2498c5, + 0x204ac2, + 0x2e72ce, + 0x2a384d, + 0x237583, + 0x28400e, + 0x3b868d, + 0x348003, + 0x202ec2, + 0x2817c4, + 0x238c42, + 0x202e82, + 0x372a45, + 0x37b407, + 0x24d902, + 0x208882, + 0x254607, + 0x257688, + 0x329982, + 0x29df86, + 0x25bd4c, + 0x25c08b, + 0x212c42, + 0x26208f, + 0x262450, + 0x26284f, + 0x262c15, + 0x263154, + 0x26364e, + 0x2639ce, + 0x263d4f, + 0x26410e, + 0x264494, + 0x264993, + 0x264e4d, + 0x2755c9, + 0x289843, + 0x201802, + 0x215f45, + 0x206c06, + 0x202542, + 0x344e47, + 0x30e843, + 0x201402, + 0x36dfc8, + 0x2d1551, + 0x2d1990, + 0x200c42, + 0x270f87, + 0x205842, + 0x341287, + 0x209602, + 0x348f89, + 0x37e6c7, + 0x2a4b48, + 0x307486, + 0x2e2c83, + 0x326e05, + 0x20e402, + 0x202682, + 0x3b4f45, + 0x3c1485, + 0x200f82, + 0x214d03, + 0x26ea07, + 0x208007, + 0x2085c2, + 0x22e684, + 0x20b4c3, + 0x20b4c9, + 0x20f108, + 0x201542, + 0x204482, + 0x2e3547, + 0x33d705, + 0x293988, + 0x222a87, + 0x201cc3, + 0x298106, + 0x38348d, + 0x38380c, + 0x2e0646, + 0x200482, + 0x26f582, + 0x201e42, + 0x237a8f, + 0x237e8e, + 0x2dba87, + 0x200b82, + 0x3517c5, + 0x3517c6, + 0x203282, + 0x205a02, + 0x28ad86, + 0x292ac3, + 0x3411c6, + 0x2c3ec5, + 0x2c3ecd, + 0x2c4495, + 0x2c4e8c, + 0x2c59cd, + 0x2c5d92, + 0x20d682, + 0x26cd82, + 0x2047c2, + 0x21ce86, + 0x2fc586, + 0x201382, + 0x206c86, + 0x20c502, + 0x20d245, + 0x2013c2, + 0x2a3949, + 0x21d70c, + 0x21da4b, + 0x200342, + 0x258508, + 0x20cb42, + 0x206f02, + 0x271946, + 0x22fb05, + 0x31f507, + 0x250d85, + 0x2982c5, + 0x249a82, + 0x204c02, + 0x20b182, + 0x2dc107, + 0x24f4cd, + 0x24f84c, + 0x34f687, + 0x22bac2, + 0x201f82, + 0x23d488, + 0x343888, + 0x303d48, + 0x30cdc4, + 0x2b4507, + 0x2e3c83, + 0x24e882, + 0x204882, + 0x2e6b09, + 0x2f7387, + 0x2057c2, + 0x271d45, + 0x248602, + 0x209942, + 0x2bca43, + 0x2bca46, + 0x2f0602, + 0x2f1e82, + 0x201442, + 0x3b33c6, + 0x3454c7, + 0x205e42, + 0x200382, + 0x25da8f, + 0x283e4d, + 0x38b8ce, + 0x3b850c, + 0x2017c2, + 0x200502, + 0x3072c5, + 0x311d86, + 0x209002, + 0x208102, + 0x200982, + 0x222a04, + 0x2dcdc4, + 0x3c23c6, + 0x200a02, + 0x2b7307, + 0x231d03, + 0x231d08, + 0x2326c8, + 0x243e07, + 0x2ecbc6, + 0x204042, + 0x23e683, + 0x23e687, + 0x28a8c6, + 0x2f3045, + 0x30d148, + 0x206a42, + 0x341687, + 0x20fd82, + 0x2f4682, + 0x20c142, + 0x2f1149, + 0x20a402, + 0x201742, + 0x24adc3, + 0x325ec7, + 0x2040c2, + 0x21d88c, + 0x21db8b, + 0x2e06c6, + 0x35cbc5, + 0x227882, + 0x201c42, + 0x2ba046, + 0x22e983, + 0x331547, + 0x20cb82, + 0x202a82, + 0x235115, + 0x24a5d5, + 0x236653, + 0x24ab13, + 0x25d207, + 0x274548, + 0x274550, + 0x28744f, + 0x373ad3, + 0x28cdd2, + 0x292450, + 0x2b350f, + 0x2fd6d2, + 0x3af491, + 0x2af493, + 0x3938d2, + 0x2c3b0f, + 0x2cd74e, + 0x2cf252, + 0x2d09d1, + 0x2d3b0f, + 0x2d528e, + 0x2dc811, + 0x2dd7d0, + 0x2ed512, + 0x2f0f51, + 0x2f2206, + 0x2f3907, + 0x38e407, + 0x200d02, + 0x27efc5, + 0x3713c7, + 0x230882, + 0x20f6c2, + 0x22d0c5, + 0x200443, + 0x200446, + 0x24f68d, + 0x24f9cc, + 0x206d02, + 0x3ab30b, + 0x273fca, + 0x22358a, + 0x2b9489, + 0x2e530b, + 0x222bcd, + 0x2fe44c, + 0x2ec88a, + 0x27500c, + 0x294d4b, + 0x2a204c, + 0x2f968b, + 0x2b9e83, + 0x2f4f06, + 0x3b9942, + 0x2f3dc2, + 0x20e343, + 0x201ac2, + 0x207203, + 0x24ec86, + 0x262dc7, + 0x2ad706, + 0x2f6b48, + 0x343588, + 0x2ca146, + 0x211d82, + 0x3011cd, + 0x30150c, + 0x2d2547, + 0x304e07, + 0x23c242, + 0x219742, + 0x23e602, + 0x257a42, + 0x202c42, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x207c03, + 0x200742, + 0x201482, + 0x2e68ecc5, + 0x2ea8e4c5, + 0x2efb3086, + 0xcd588, + 0x2f2afb05, + 0x202c42, + 0x209d42, + 0x2f726285, + 0x2fa7cb85, + 0x2fe7d647, + 0x302867c9, + 0x30667d84, + 0x202542, + 0x201402, + 0x30b0dec5, + 0x30e95f49, + 0x31327988, + 0x316ac205, + 0x31af0707, + 0x31e21cc8, + 0x322def85, + 0x3266d246, + 0x32b6d849, + 0x32ed4ec8, + 0x332bf988, + 0x3369658a, + 0x33a75e04, + 0x33f7c545, + 0x342bc308, + 0x34727e05, + 0x217f42, + 0x34a061c3, + 0x34ea2606, + 0x35311408, + 0x356eee46, + 0x35b643c8, + 0x35eb6306, + 0x363c2f44, + 0x2032c2, + 0x366f1587, + 0x36aa8644, + 0x36e77e87, + 0x3723ecc7, + 0x200342, + 0x3769ae85, + 0x37a403c4, + 0x37ee1787, + 0x383a3387, + 0x38681606, + 0x38a7d205, + 0x38e96047, + 0x392e5a48, + 0x396162c7, + 0x39b94949, + 0x39ec51c5, + 0x3a2b4107, + 0x3a68e306, + 0x3aa941c8, + 0x227d8d, + 0x251989, + 0x272fcb, + 0x27ac8b, + 0x2a78cb, + 0x2da98b, + 0x311f8b, + 0x31224b, + 0x312889, + 0x31378b, + 0x313a4b, + 0x313fcb, + 0x314c8a, + 0x3151ca, + 0x3157cc, + 0x31938b, + 0x319dca, + 0x33080a, + 0x33b28e, + 0x33be8e, + 0x33c20a, + 0x33e14a, + 0x33eb4b, + 0x33ee0b, + 0x33fa8b, + 0x35edcb, + 0x35f3ca, + 0x36008b, + 0x36034a, + 0x3605ca, + 0x36084a, + 0x37d74b, + 0x3856cb, + 0x388f8e, + 0x38930b, + 0x391f4b, + 0x392e0b, + 0x39a6ca, + 0x39a949, + 0x39ab8a, + 0x39c6ca, + 0x3b06cb, + 0x3b204b, + 0x3b2a0a, + 0x3b390b, + 0x3b904b, + 0x3c554b, + 0x3ae7fd48, + 0x3b287989, + 0x3b69d989, + 0x3bad8988, + 0x34c805, + 0x200583, + 0x22a3c4, + 0x217c05, + 0x267ac6, + 0x26cfc5, + 0x286284, + 0x344d48, + 0x30b505, + 0x290604, + 0x2064c7, + 0x29cf0a, + 0x266b4a, + 0x2dbb87, + 0x20c4c7, + 0x2fd2c7, + 0x282187, + 0x2f8c45, + 0x3b6e46, + 0x386007, + 0x244e44, + 0x2df546, + 0x2df446, + 0x204745, + 0x3389c4, + 0x2975c6, + 0x29bfc7, + 0x22df06, + 0x27c8c7, + 0x250803, + 0x3912c6, + 0x234f05, + 0x27d747, + 0x26a84a, + 0x26e7c4, + 0x21bd88, + 0x2b8a49, + 0x2e0d07, + 0x319c46, + 0x258788, + 0x2ef589, + 0x30f084, + 0x33a484, + 0x29ef05, + 0x2ba648, + 0x2c2807, + 0x2b3e49, + 0x22dc08, + 0x2f2306, + 0x310806, + 0x297f88, + 0x362bc6, + 0x28e4c5, + 0x2816c6, + 0x278988, + 0x237986, + 0x25af0b, + 0x2c7c06, + 0x299b0d, + 0x369405, + 0x2a8506, + 0x21f085, + 0x331b49, + 0x3a6cc7, + 0x318308, + 0x2a1e46, + 0x298d89, + 0x33ffc6, + 0x26a7c5, + 0x24c486, + 0x288b86, + 0x2c6e49, + 0x31e2c6, + 0x29cc07, + 0x245e85, + 0x203983, + 0x25b085, + 0x299dc7, + 0x3ab746, + 0x369309, + 0x3b3086, + 0x26b146, + 0x213fc9, + 0x2810c9, + 0x29fac7, + 0x200908, + 0x2b2f49, + 0x27ec48, + 0x330a46, + 0x2d1d85, + 0x240c8a, + 0x26b1c6, + 0x239346, + 0x2cac05, + 0x2d4888, + 0x22b287, + 0x233f0a, + 0x254f86, + 0x251dc5, + 0x3324c6, + 0x224507, + 0x319b07, + 0x2835c5, + 0x26a985, + 0x395a06, + 0x3b8c06, + 0x2fa846, + 0x2bc7c4, + 0x280449, + 0x288806, + 0x2c814a, + 0x227248, + 0x36fd08, + 0x266b4a, + 0x212505, + 0x29bf05, + 0x2dd048, + 0x2c9688, + 0x233907, + 0x2ba946, + 0x32bf88, + 0x309507, + 0x27f348, + 0x2b5706, + 0x281e48, + 0x295586, + 0x278307, + 0x33a206, + 0x2975c6, + 0x22ecca, + 0x232046, + 0x2d1d89, + 0x2ee586, + 0x35c00a, + 0x3c2f49, + 0x27dd86, + 0x2b8304, + 0x21600d, + 0x287c07, + 0x239c06, + 0x2bf845, + 0x340045, + 0x37f306, + 0x2e15c9, + 0x2d4407, + 0x279406, + 0x306886, + 0x286309, + 0x2a3204, + 0x242544, + 0x3c2a88, + 0x24f046, + 0x271348, + 0x2e8008, + 0x29f447, + 0x3b6589, + 0x2faa47, + 0x2af9ca, + 0x2e79cf, + 0x31194a, + 0x3070c5, + 0x278bc5, + 0x218b05, + 0x35c647, + 0x2240c3, + 0x200b08, + 0x21e646, + 0x21e749, + 0x2d8646, + 0x2c8547, + 0x298b49, + 0x318208, + 0x2cacc7, + 0x30eb43, + 0x34c885, + 0x224045, + 0x2bc60b, + 0x327ec4, + 0x2d6884, + 0x276bc6, + 0x30f407, + 0x38f4ca, + 0x206247, + 0x20c347, + 0x27cb85, + 0x3c6485, + 0x282609, + 0x2975c6, + 0x2060cd, + 0x31e505, + 0x2b18c3, + 0x20b003, + 0x3a4d45, + 0x351305, + 0x258788, + 0x27a347, + 0x2422c6, + 0x29d606, + 0x22de05, + 0x237847, + 0x3c1d47, + 0x23c6c7, + 0x37c5ca, + 0x391388, + 0x2bc7c4, + 0x257bc7, + 0x27bb07, + 0x33f086, + 0x2692c7, + 0x2a1808, + 0x395f08, + 0x329b06, + 0x20c708, + 0x2cfbc4, + 0x386006, + 0x370d86, + 0x36bd46, + 0x277806, + 0x29b244, + 0x282246, + 0x2be246, + 0x297986, + 0x2060c6, + 0x20aec6, + 0x2a1646, + 0x2421c8, + 0x385a88, + 0x2cdac8, + 0x26d1c8, + 0x2dcfc6, + 0x212305, + 0x39e746, + 0x2ac285, + 0x396f87, + 0x22dcc5, + 0x213c03, + 0x38e045, + 0x33dd04, + 0x20b005, + 0x247643, + 0x33c4c7, + 0x30d708, + 0x27c986, + 0x2c930d, + 0x278b86, + 0x296f45, + 0x222083, + 0x2bbcc9, + 0x2a3386, + 0x291706, + 0x271e04, + 0x3118c7, + 0x23a1c6, + 0x2d46c5, + 0x21af83, + 0x3be4c4, + 0x27bcc6, + 0x3b6f44, + 0x370e88, + 0x3459c9, + 0x2317c9, + 0x29ed0a, + 0x2a05cd, + 0x2118c7, + 0x2391c6, + 0x20f984, + 0x2867c9, + 0x284ac8, + 0x287806, + 0x241906, + 0x2692c7, + 0x2d9346, + 0x22a046, + 0x347086, + 0x23ed4a, + 0x221cc8, + 0x22f885, + 0x2a2fc9, + 0x27f84a, + 0x2ff648, + 0x29b6c8, + 0x291688, + 0x29d24c, + 0x3124c5, + 0x29d888, + 0x385d86, + 0x24c9c6, + 0x35eb07, + 0x206145, + 0x281845, + 0x231689, + 0x2139c7, + 0x21e705, + 0x22aec7, + 0x20b003, + 0x2c2d45, + 0x2151c8, + 0x280d47, + 0x29b589, + 0x2e9f85, + 0x33e384, + 0x2a0288, + 0x2f16c7, + 0x2cae88, + 0x3aac88, + 0x2e1dc5, + 0x21e546, + 0x29d706, + 0x3a7009, + 0x2cb3c7, + 0x2ac8c6, + 0x206e87, + 0x239fc3, + 0x267d84, + 0x2cfcc5, + 0x2f3f84, + 0x246804, + 0x27ffc7, + 0x340d87, + 0x26dc84, + 0x29b3d0, + 0x31d507, + 0x3c6485, + 0x2561cc, + 0x224a04, + 0x2c4c88, + 0x278209, + 0x375886, + 0x240088, + 0x21ca84, + 0x276ec8, + 0x234506, + 0x22eb48, + 0x29a086, + 0x28854b, + 0x38ddc5, + 0x2cfb48, + 0x2173c4, + 0x345e0a, + 0x29b589, + 0x33a106, + 0x218bc8, + 0x25ed85, + 0x31dec4, + 0x2c4b86, + 0x23c588, + 0x27fd48, + 0x32c806, + 0x3c2344, + 0x240c06, + 0x2faac7, + 0x277d87, + 0x2692cf, + 0x205847, + 0x27de47, + 0x351685, + 0x35e345, + 0x29f789, + 0x382e46, + 0x27d885, + 0x2813c7, + 0x3934c8, + 0x2c7645, + 0x33a206, + 0x227088, + 0x2eee4a, + 0x3bf088, + 0x28ab07, + 0x2e7e06, + 0x2a2f86, + 0x202583, + 0x20de03, + 0x27fa09, + 0x2b2dc9, + 0x2c4a86, + 0x2e9f85, + 0x36bac8, + 0x218bc8, + 0x362d48, + 0x34710b, + 0x2c9547, + 0x309149, + 0x269548, + 0x350284, + 0x318648, + 0x28c889, + 0x2acbc5, + 0x35c547, + 0x267e05, + 0x27fc48, + 0x28eb4b, + 0x295d90, + 0x2a8145, + 0x21730c, + 0x242485, + 0x27cc03, + 0x2b1d06, + 0x2bd884, + 0x2404c6, + 0x29bfc7, + 0x227104, + 0x248688, + 0x2009cd, + 0x2dfc05, + 0x211904, + 0x28f244, + 0x28f249, + 0x2ae548, + 0x31bc47, + 0x234588, + 0x280508, + 0x279705, + 0x21f2c7, + 0x279687, + 0x23ac87, + 0x26a989, + 0x346dc9, + 0x272146, + 0x383b46, + 0x269506, + 0x33b6c5, + 0x3aa4c4, + 0x3bd646, + 0x3c4c46, + 0x279748, + 0x2241cb, + 0x26e687, + 0x20f984, + 0x23a106, + 0x2a1b47, + 0x335405, + 0x3583c5, + 0x223884, + 0x346d46, + 0x3bd6c8, + 0x2867c9, + 0x2091c6, + 0x2848c8, + 0x2d4786, + 0x350908, + 0x2ce58c, + 0x2795c6, + 0x296c0d, + 0x29708b, + 0x29ccc5, + 0x3c1e87, + 0x31e3c6, + 0x3199c8, + 0x2721c9, + 0x329dc8, + 0x3c6485, + 0x208947, + 0x27ed48, + 0x24ff89, + 0x2a5586, + 0x24da8a, + 0x319748, + 0x329c0b, + 0x2ccd8c, + 0x276fc8, + 0x27b286, + 0x21dfc8, + 0x2eeac7, + 0x205989, + 0x2f084d, + 0x2974c6, + 0x31dd48, + 0x385949, + 0x2bc8c8, + 0x281f48, + 0x2bec8c, + 0x2bff87, + 0x2c0a47, + 0x26a7c5, + 0x2b4807, + 0x393388, + 0x2c4c06, + 0x20904c, + 0x2ec1c8, + 0x2c8c48, + 0x250286, + 0x223dc7, + 0x272344, + 0x26d1c8, + 0x2b6d0c, + 0x28430c, + 0x307145, + 0x2047c7, + 0x3c22c6, + 0x223d46, + 0x331d08, + 0x367784, + 0x22df0b, + 0x2b744b, + 0x2e7e06, + 0x200847, + 0x322385, + 0x271285, + 0x22e046, + 0x25ed45, + 0x327e85, + 0x2c6c87, + 0x270a09, + 0x3b8dc4, + 0x25f405, + 0x2de045, + 0x2add08, + 0x2da405, + 0x287109, + 0x2c9ac7, + 0x2c9acb, + 0x24fbc6, + 0x241f09, + 0x338908, + 0x291f85, + 0x23ad88, + 0x346e08, + 0x2570c7, + 0x208e47, + 0x280049, + 0x22ea87, + 0x2aa389, + 0x2b7dcc, + 0x394848, + 0x2d4d09, + 0x2d6447, + 0x2805c9, + 0x340ec7, + 0x2cce88, + 0x3b6745, + 0x385f86, + 0x2bf888, + 0x30d3c8, + 0x27f709, + 0x327ec7, + 0x256d85, + 0x2301c9, + 0x201c46, + 0x28e304, + 0x326006, + 0x311288, + 0x328747, + 0x2243c8, + 0x20c7c9, + 0x325b87, + 0x29d0c6, + 0x3c1f44, + 0x38e0c9, + 0x21f148, + 0x250147, + 0x2adf86, + 0x224106, + 0x2392c4, + 0x26d846, + 0x20af83, + 0x38d949, + 0x38dd86, + 0x20ca45, + 0x29d606, + 0x2c7205, + 0x27f1c8, + 0x2ee987, + 0x2eb146, + 0x3262c6, + 0x36fd08, + 0x29f907, + 0x297505, + 0x29b1c8, + 0x3b2448, + 0x319748, + 0x242345, + 0x386006, + 0x231589, + 0x3a6e84, + 0x2c708b, + 0x229d4b, + 0x22f789, + 0x20b003, + 0x25cf45, + 0x22abc6, + 0x242cc8, + 0x34e904, + 0x27c986, + 0x37c709, + 0x2f0405, + 0x2c6bc6, + 0x2f16c6, + 0x20c984, + 0x2a86ca, + 0x20c988, + 0x30d3c6, + 0x2934c5, + 0x331287, + 0x351547, + 0x21e544, + 0x229f87, + 0x22dc84, + 0x22dc86, + 0x200b43, + 0x26a985, + 0x37dc85, + 0x364648, + 0x257d85, + 0x279309, + 0x26d007, + 0x26d00b, + 0x2a240c, + 0x2a2a0a, + 0x2f0707, + 0x205c83, + 0x2ebcc8, + 0x242505, + 0x2c76c5, + 0x34c944, + 0x2ccd86, + 0x278206, + 0x26d887, + 0x23f8cb, + 0x29b244, + 0x2d7404, + 0x2c2784, + 0x2c6986, + 0x227104, + 0x2ba748, + 0x34c745, + 0x258a45, + 0x362c87, + 0x3c1f89, + 0x351305, + 0x37f30a, + 0x245d89, + 0x2d698a, + 0x23ee89, + 0x3a5104, + 0x306945, + 0x2d9448, + 0x2e184b, + 0x29ef05, + 0x2f3206, + 0x247684, + 0x279846, + 0x325a09, + 0x2a1c47, + 0x3b3248, + 0x2a0946, + 0x2faa47, + 0x27fd48, + 0x37f886, + 0x334f84, + 0x371c87, + 0x361205, + 0x373507, + 0x21c984, + 0x31e346, + 0x2e5bc8, + 0x297248, + 0x2e44c7, + 0x24e388, + 0x295645, + 0x20ae44, + 0x266a48, + 0x24e484, + 0x208e45, + 0x2f8e44, + 0x309607, + 0x2888c7, + 0x280708, + 0x2cb006, + 0x257d05, + 0x279108, + 0x3bf288, + 0x29ec49, + 0x22a046, + 0x233f88, + 0x345c8a, + 0x335488, + 0x2def85, + 0x225446, + 0x245c48, + 0x208a0a, + 0x229207, + 0x285dc5, + 0x28e508, + 0x2cc2c4, + 0x2d4906, + 0x2c0dc8, + 0x20aec6, + 0x31fc48, + 0x25b247, + 0x2063c6, + 0x2b8304, + 0x2a6b87, + 0x2b0d44, + 0x3259c7, + 0x2a524d, + 0x22f805, + 0x2e13cb, + 0x284586, + 0x258608, + 0x248644, + 0x2ee246, + 0x27bcc6, + 0x21e307, + 0x2968cd, + 0x24b947, + 0x2b1808, + 0x286985, + 0x364e48, + 0x2c2786, + 0x2956c8, + 0x354486, + 0x336b47, + 0x2c5689, + 0x353e07, + 0x287ac8, + 0x2733c5, + 0x21c508, + 0x223c85, + 0x2f7505, + 0x23f105, + 0x24c4c3, + 0x277884, + 0x28e705, + 0x36d849, + 0x36b906, + 0x2a1908, + 0x208c05, + 0x2b46c7, + 0x29f14a, + 0x2c6b09, + 0x288a8a, + 0x2cdb48, + 0x22ad0c, + 0x28144d, + 0x34a703, + 0x31fb48, + 0x3be485, + 0x2eec06, + 0x318086, + 0x2deac5, + 0x206f89, + 0x3ab885, + 0x279108, + 0x25e046, + 0x3532c6, + 0x2a0149, + 0x3a0f87, + 0x28ee06, + 0x29f0c8, + 0x36bc48, + 0x2d8b87, + 0x2be3ce, + 0x2c29c5, + 0x24fe85, + 0x20adc8, + 0x3269c7, + 0x208f82, + 0x2be804, + 0x2403ca, + 0x250208, + 0x346f46, + 0x298c88, + 0x29d706, + 0x31da88, + 0x2ac8c8, + 0x2f74c4, + 0x2b4a85, + 0x6010c4, + 0x6010c4, + 0x6010c4, + 0x200a43, + 0x223f86, + 0x2795c6, + 0x29c98c, + 0x201343, + 0x21c986, + 0x200b04, + 0x2a3308, + 0x37c545, + 0x2404c6, + 0x2bc408, + 0x2cef86, + 0x2eb0c6, + 0x339f08, + 0x2cfd47, + 0x22e849, + 0x2a714a, + 0x211644, + 0x22dcc5, + 0x2b3e05, + 0x2c5406, + 0x211906, + 0x29c706, + 0x2f8686, + 0x22e984, + 0x22e98b, + 0x22d744, + 0x242085, + 0x2ab5c5, + 0x29f506, + 0x369808, + 0x281307, + 0x38dd04, + 0x2076c3, + 0x2cbdc5, + 0x22dac7, + 0x28120b, + 0x364547, + 0x2bc308, + 0x2b4bc7, + 0x26be06, + 0x251c48, + 0x26f24b, + 0x217b46, + 0x216b09, + 0x26f3c5, + 0x30eb43, + 0x2c6bc6, + 0x25b148, + 0x20c843, + 0x22dbc3, + 0x27fd46, + 0x29d706, + 0x36808a, + 0x27b2c5, + 0x27bb0b, + 0x29d54b, + 0x247b03, + 0x220043, + 0x2af944, + 0x2a88c7, + 0x25b1c4, + 0x240084, + 0x385c04, + 0x335788, + 0x293408, + 0x20dd89, + 0x2c5248, + 0x23f387, + 0x2060c6, + 0x2a154f, + 0x2c2b06, + 0x2cd084, + 0x29324a, + 0x22d9c7, + 0x2b0e46, + 0x28e349, + 0x20dd05, + 0x364785, + 0x20de46, + 0x21c643, + 0x2cc309, + 0x221e46, + 0x20c589, + 0x38f4c6, + 0x26a985, + 0x307545, + 0x205843, + 0x2a8a08, + 0x31be07, + 0x21e644, + 0x2a3188, + 0x24c744, + 0x39a286, + 0x2b1d06, + 0x2445c6, + 0x2cfa09, + 0x2c7645, + 0x2975c6, + 0x21afc9, + 0x393086, + 0x2a1646, + 0x395846, + 0x203a45, + 0x2f8e46, + 0x336b44, + 0x3b6745, + 0x2bf884, + 0x2b2206, + 0x31e4c4, + 0x200d03, + 0x284b85, + 0x238888, + 0x2509c7, + 0x34e989, + 0x285cc8, + 0x297d51, + 0x2f174a, + 0x2e7d47, + 0x396246, + 0x200b04, + 0x2bf988, + 0x26d9c8, + 0x297f0a, + 0x286ecd, + 0x24c486, + 0x33a006, + 0x2a6c46, + 0x283447, + 0x2b18c5, + 0x341987, + 0x2009c5, + 0x2c9c04, + 0x2a7586, + 0x26d6c7, + 0x2cc00d, + 0x245b87, + 0x344c48, + 0x279409, + 0x225346, + 0x2a5505, + 0x2fa084, + 0x311386, + 0x21e446, + 0x250386, + 0x299508, + 0x21d683, + 0x208d83, + 0x341f45, + 0x257e46, + 0x2ac885, + 0x2a0b48, + 0x29c18a, + 0x39e284, + 0x2a3308, + 0x291688, + 0x29f347, + 0x208cc9, + 0x2bc008, + 0x286847, + 0x385e86, + 0x20aeca, + 0x311408, + 0x3a6b09, + 0x2ae608, + 0x228089, + 0x396107, + 0x2fea85, + 0x347306, + 0x2c4a88, + 0x27a888, + 0x28de08, + 0x38ab08, + 0x242085, + 0x203bc4, + 0x236ec8, + 0x209784, + 0x23ec84, + 0x26a985, + 0x290647, + 0x3c1d49, + 0x21e107, + 0x214045, + 0x276dc6, + 0x35bc86, + 0x211a84, + 0x2a0486, + 0x257b44, + 0x2a11c6, + 0x3c1b06, + 0x2181c6, + 0x3c6485, + 0x2a0a07, + 0x205c83, + 0x216e89, + 0x36fb08, + 0x2866c4, + 0x2866cd, + 0x297348, + 0x2ddc88, + 0x3a6a86, + 0x2c5789, + 0x2c6b09, + 0x325705, + 0x29c28a, + 0x252a0a, + 0x25e6cc, + 0x25e846, + 0x277c06, + 0x2c2c86, + 0x372b09, + 0x2eee46, + 0x29f946, + 0x3ab946, + 0x26d1c8, + 0x24e386, + 0x2cca0b, + 0x2907c5, + 0x258a45, + 0x277e85, + 0x3c2806, + 0x20ae83, + 0x244546, + 0x245b07, + 0x2bf845, + 0x3108c5, + 0x340045, + 0x2f83c6, + 0x3257c4, + 0x327886, + 0x2bad89, + 0x3c268c, + 0x2c9948, + 0x23c504, + 0x2f8b46, + 0x284686, + 0x25b148, + 0x218bc8, + 0x3c2589, + 0x331287, + 0x24ed89, + 0x37ba46, + 0x230604, + 0x20d804, + 0x280344, + 0x27fd48, + 0x3c1b8a, + 0x351286, + 0x35e207, + 0x36e207, + 0x242005, + 0x2b3dc4, + 0x28c846, + 0x2b1906, + 0x23a283, + 0x36f947, + 0x3aab88, + 0x32584a, + 0x22ca48, + 0x3643c8, + 0x31e505, + 0x29cdc5, + 0x26e785, + 0x2423c6, + 0x243286, + 0x340cc5, + 0x38db89, + 0x2b3bcc, + 0x26e847, + 0x297f88, + 0x2dee05, + 0x6010c4, + 0x24d184, + 0x280e84, + 0x21b846, + 0x29e4ce, + 0x364807, + 0x283645, + 0x3a6e0c, + 0x2f8f47, + 0x26d647, + 0x2f4449, + 0x21be49, + 0x285dc5, + 0x36fb08, + 0x231589, + 0x319605, + 0x2bf788, + 0x221fc6, + 0x266cc6, + 0x3c2f44, + 0x28b688, + 0x225503, + 0x3875c4, + 0x2cbe45, + 0x388307, + 0x228785, + 0x345b49, + 0x2ab04d, + 0x2b0486, + 0x207704, + 0x2ba8c8, + 0x27084a, + 0x228b87, + 0x31ce85, + 0x23b3c3, + 0x29d70e, + 0x2a8b0c, + 0x2ff747, + 0x29e687, + 0x217b83, + 0x2eee85, + 0x280e85, + 0x299048, + 0x2963c9, + 0x23c406, + 0x25b1c4, + 0x2e7c86, + 0x23390b, + 0x38320c, + 0x33a8c7, + 0x2cccc5, + 0x3b2348, + 0x2d8945, + 0x293247, + 0x2f1587, + 0x245945, + 0x20ae83, + 0x335ac4, + 0x22a385, + 0x3b8cc5, + 0x3b8cc6, + 0x2b5308, + 0x26d6c7, + 0x318386, + 0x205c06, + 0x23f046, + 0x27e509, + 0x21f3c7, + 0x250646, + 0x383386, + 0x275d06, + 0x2a8605, + 0x3c53c6, + 0x3746c5, + 0x2da488, + 0x28ff4b, + 0x28c586, + 0x36e244, + 0x2e0409, + 0x26d004, + 0x221f48, + 0x326107, + 0x281e44, + 0x2bb308, + 0x2c0844, + 0x2a8644, + 0x286605, + 0x2dfc46, + 0x3356c7, + 0x27f283, + 0x29d185, + 0x32ce84, + 0x24fec6, + 0x325788, + 0x2b6c05, + 0x28fc09, + 0x2303c5, + 0x21c988, + 0x2312c7, + 0x38de88, + 0x2ba487, + 0x27df09, + 0x2820c6, + 0x305706, + 0x2b3084, + 0x2d7345, + 0x300a4c, + 0x277e87, + 0x278a87, + 0x36e0c8, + 0x2b0486, + 0x271484, + 0x30a244, + 0x27fec9, + 0x2c2d86, + 0x282687, + 0x277784, + 0x24d786, + 0x317bc5, + 0x2cab47, + 0x2cc986, + 0x24d949, + 0x383047, + 0x2692c7, + 0x29ffc6, + 0x24d6c5, + 0x27d1c8, + 0x221cc8, + 0x348546, + 0x2b6c45, + 0x349e86, + 0x206543, + 0x298ec9, + 0x29c48e, + 0x2ba1c8, + 0x24c848, + 0x34834b, + 0x28fe46, + 0x211584, + 0x281044, + 0x29c58a, + 0x217207, + 0x250705, + 0x216b09, + 0x2be305, + 0x23ecc7, + 0x24e304, + 0x2a9a47, + 0x2e7f08, + 0x2e0dc6, + 0x24c589, + 0x2bc10a, + 0x217186, + 0x296e86, + 0x2ab545, + 0x3898c5, + 0x347ac7, + 0x24cf08, + 0x317b08, + 0x2f74c6, + 0x3075c5, + 0x21168e, + 0x2bc7c4, + 0x298fc5, + 0x276749, + 0x382c48, + 0x28aa46, + 0x29accc, + 0x29bd90, + 0x29e10f, + 0x29f688, + 0x2f0707, + 0x3c6485, + 0x28e705, + 0x335549, + 0x28e709, + 0x240d06, + 0x29ef87, + 0x2d7245, + 0x34d589, + 0x33f106, + 0x2eec8d, + 0x280209, + 0x240084, + 0x2b9f48, + 0x236f89, + 0x351446, + 0x2ebec5, + 0x305706, + 0x3b3109, + 0x277608, + 0x212305, + 0x28b684, + 0x29ae8b, + 0x351305, + 0x242d46, + 0x281786, + 0x285206, + 0x28f64b, + 0x28fd09, + 0x205b45, + 0x396e87, + 0x2f16c6, + 0x240206, + 0x280c08, + 0x2dfd49, + 0x344a0c, + 0x22d8c8, + 0x308ec6, + 0x32c803, + 0x32a506, + 0x27ddc5, + 0x27be48, + 0x306fc6, + 0x2cad88, + 0x2062c5, + 0x27a585, + 0x365288, + 0x31dc07, + 0x317fc7, + 0x26d887, + 0x240088, + 0x2c5508, + 0x2b1206, + 0x2b2047, + 0x267c47, + 0x28f34a, + 0x256c83, + 0x3c2806, + 0x23c645, + 0x2403c4, + 0x279409, + 0x27de84, + 0x250a44, + 0x29a104, + 0x29e68b, + 0x31bd47, + 0x2118c5, + 0x295348, + 0x276dc6, + 0x276dc8, + 0x27b206, + 0x28b5c5, + 0x28b885, + 0x28d446, + 0x28dbc8, + 0x28e288, + 0x2795c6, + 0x29518f, + 0x298990, + 0x369405, + 0x205c83, + 0x2306c5, + 0x309088, + 0x28e609, + 0x319748, + 0x24c408, + 0x238d88, + 0x31be07, + 0x276a89, + 0x2caf88, + 0x28dac4, + 0x299f88, + 0x2addc9, + 0x2b38c7, + 0x299f04, + 0x21e1c8, + 0x2a07ca, + 0x2aff86, + 0x24c486, + 0x229f09, + 0x29bfc7, + 0x2c83c8, + 0x345608, + 0x294048, + 0x25d345, + 0x38a705, + 0x258a45, + 0x280e45, + 0x37ffc7, + 0x20ae85, + 0x2bf845, + 0x206d86, + 0x319687, + 0x2e1787, + 0x2a0ac6, + 0x2ce085, + 0x242d46, + 0x24c685, + 0x2d70c8, + 0x2ff5c4, + 0x393106, + 0x334e84, + 0x31dec8, + 0x22f10a, + 0x27a34c, + 0x23fac5, + 0x283506, + 0x344bc6, + 0x341e06, + 0x308f44, + 0x317e85, + 0x27b047, + 0x29c049, + 0x2c6f47, + 0x6010c4, + 0x6010c4, + 0x31bbc5, + 0x2cb984, + 0x29a68a, + 0x276c46, + 0x251e84, + 0x204745, + 0x36c3c5, + 0x2b1804, + 0x2813c7, + 0x230347, + 0x2c6988, + 0x31fec8, + 0x212309, + 0x26eec8, + 0x29a84b, + 0x2b7fc4, + 0x37b985, + 0x27d905, + 0x26d809, + 0x2dfd49, + 0x2e0308, + 0x22d748, + 0x29f504, + 0x2846c5, + 0x200583, + 0x2c53c5, + 0x297646, + 0x29620c, + 0x21f046, + 0x2ebdc6, + 0x28acc5, + 0x2f8448, + 0x35ec46, + 0x3963c6, + 0x24c486, + 0x22c7cc, + 0x250544, + 0x23f18a, + 0x28ac08, + 0x296047, + 0x32cd86, + 0x23c4c7, + 0x2e7885, + 0x2adf86, + 0x35aa46, + 0x366207, + 0x250a84, + 0x309705, + 0x276744, + 0x2c9c87, + 0x276988, + 0x277a8a, + 0x27ebc7, + 0x2a8207, + 0x2f0687, + 0x2d8a89, + 0x29620a, + 0x22e943, + 0x250985, + 0x218203, + 0x385c49, + 0x336dc8, + 0x351687, + 0x319849, + 0x221dc6, + 0x3b6808, + 0x33c445, + 0x3bf38a, + 0x200c89, + 0x3299c9, + 0x35eb07, + 0x26dac9, + 0x2180c8, + 0x3663c6, + 0x2836c8, + 0x203a47, + 0x22ea87, + 0x245d87, + 0x2e5a48, + 0x2f89c6, + 0x2a0585, + 0x27b047, + 0x296988, + 0x334e04, + 0x2c8004, + 0x28ed07, + 0x2acc47, + 0x23140a, + 0x366346, + 0x364c4a, + 0x2be747, + 0x2bc587, + 0x3097c4, + 0x2aa444, + 0x2caa46, + 0x23a444, + 0x23a44c, + 0x39ee05, + 0x218a09, + 0x337284, + 0x2b18c5, + 0x2707c8, + 0x239dc5, + 0x37f306, + 0x2311c4, + 0x2d02ca, + 0x2cb2c6, + 0x29180a, + 0x2162c7, + 0x224505, + 0x21c645, + 0x24204a, + 0x28dd45, + 0x29ed06, + 0x209784, + 0x2afac6, + 0x347b85, + 0x307086, + 0x2e44cc, + 0x218d4a, + 0x252b04, + 0x2060c6, + 0x29bfc7, + 0x2cc904, + 0x26d1c8, + 0x2f3106, + 0x211509, + 0x2db609, + 0x394949, + 0x2c7246, + 0x203b46, + 0x283807, + 0x38dac8, + 0x203949, + 0x31bd47, + 0x2954c6, + 0x2faac7, + 0x2a6b05, + 0x2bc7c4, + 0x2833c7, + 0x267e05, + 0x286545, + 0x31f747, + 0x245808, + 0x3b22c6, + 0x2977cd, + 0x29924f, + 0x29d54d, + 0x214084, + 0x238986, + 0x2d0688, + 0x3ab905, + 0x28f508, + 0x256f8a, + 0x240084, + 0x233b46, + 0x2cd107, + 0x2ca387, + 0x2cfe09, + 0x283685, + 0x2b1804, + 0x2b49ca, + 0x2bbbc9, + 0x26dbc7, + 0x297a86, + 0x351446, + 0x284606, + 0x371d46, + 0x2cf6cf, + 0x2d0549, + 0x24e386, + 0x354846, + 0x31ac09, + 0x2b2147, + 0x20be43, + 0x22c946, + 0x20de03, + 0x2de988, + 0x2fa907, + 0x29f889, + 0x2b1b88, + 0x318108, + 0x328006, + 0x21ef89, + 0x399b05, + 0x2b2204, + 0x2e8187, + 0x372b85, + 0x214084, + 0x211988, + 0x2174c4, + 0x2b1e87, + 0x30d686, + 0x395ac5, + 0x2ae608, + 0x35130b, + 0x2b4107, + 0x2422c6, + 0x2c2b84, + 0x2b6286, + 0x26a985, + 0x267e05, + 0x27cf49, + 0x280fc9, + 0x22eac4, + 0x22eb05, + 0x206105, + 0x3bf206, + 0x36fc08, + 0x2bdc86, + 0x3aa9cb, + 0x37570a, + 0x2ba585, + 0x28b906, + 0x39df85, + 0x345405, + 0x29b847, + 0x3c2a88, + 0x24ed84, + 0x39ce86, + 0x28e306, + 0x218287, + 0x30eb04, + 0x27bcc6, + 0x35c745, + 0x35c749, + 0x203d44, + 0x2b3f49, + 0x2795c6, + 0x2c0048, + 0x206105, + 0x36e305, + 0x307086, + 0x344909, + 0x21be49, + 0x2ebe46, + 0x382d48, + 0x2ab188, + 0x39df44, + 0x2b5504, + 0x2b5508, + 0x239d08, + 0x24ee89, + 0x2975c6, + 0x24c486, + 0x32be4d, + 0x27c986, + 0x2ce449, + 0x39e845, + 0x20de46, + 0x2941c8, + 0x3277c5, + 0x267c84, + 0x26a985, + 0x280908, + 0x29a449, + 0x276804, + 0x31e346, + 0x251f0a, + 0x2ff648, + 0x231589, + 0x25890a, + 0x3197c6, + 0x299408, + 0x293005, + 0x28ae88, + 0x2e7905, + 0x221c89, + 0x376189, + 0x23c442, + 0x26f3c5, + 0x2ebf86, + 0x279507, + 0x38c8c5, + 0x30d2c6, + 0x304c08, + 0x2b0486, + 0x2d9309, + 0x278b86, + 0x280a88, + 0x2a9605, + 0x382106, + 0x336c48, + 0x27fd48, + 0x396008, + 0x2f2388, + 0x3c53c4, + 0x21e583, + 0x2d9544, + 0x27edc6, + 0x2a6b44, + 0x24c787, + 0x3962c9, + 0x3c0485, + 0x345606, + 0x22c946, + 0x2b514b, + 0x2b0d86, + 0x293b06, + 0x393208, + 0x310806, + 0x224303, + 0x3c4e83, + 0x2bc7c4, + 0x233e85, + 0x2d45c7, + 0x276988, + 0x27698f, + 0x27af4b, + 0x36fa08, + 0x31e3c6, + 0x36fd0e, + 0x242483, + 0x2d4544, + 0x2b0d05, + 0x2b1686, + 0x28c94b, + 0x290706, + 0x227109, + 0x395ac5, + 0x2eccc8, + 0x20e688, + 0x21bd0c, + 0x29e6c6, + 0x2c5406, + 0x2e9f85, + 0x287888, + 0x27a345, + 0x350288, + 0x29b04a, + 0x29d989, + 0x6010c4, + 0x200742, + 0x3c202c42, + 0x202542, + 0x26ff84, + 0x201e42, + 0x21a484, + 0x2032c2, + 0x200342, + 0x207c02, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x210143, + 0x28b304, + 0x20be03, + 0x23d744, + 0x237583, + 0x2d2484, + 0x30e843, + 0x38cb87, + 0x21f743, + 0x20ae43, + 0x310f08, + 0x241d03, + 0x2a47cb, + 0x2e8943, + 0x3a25c6, + 0x20e982, + 0x3987cb, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x241d03, + 0x2098c3, + 0x204543, + 0x200742, + 0xcd588, + 0x3574c5, + 0x267e88, + 0x2e2e08, + 0x202c42, + 0x3325c5, + 0x331707, + 0x201b02, + 0x248887, + 0x202542, + 0x256807, + 0x3c0b89, + 0x292bc8, + 0x293ec9, + 0x247342, + 0x269ec7, + 0x329844, + 0x3317c7, + 0x375607, + 0x25fe82, + 0x21f743, + 0x20d682, + 0x2032c2, + 0x200342, + 0x20b182, + 0x200382, + 0x207c02, + 0x2a9105, + 0x24d0c5, + 0x2c42, + 0x37583, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x10c43, + 0x781, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x21a003, + 0x3f0eca46, + 0x6f803, + 0x7f685, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x9482, + 0xcd588, + 0xae43, + 0xaff03, + 0x4cec4, + 0xd8d45, + 0x200742, + 0x3a4c04, + 0x20be03, + 0x237583, + 0x30e843, + 0x3a2f03, + 0x232585, + 0x214bc3, + 0x20f003, + 0x20ec83, + 0x228803, + 0x241d03, + 0x207c03, + 0x2605c3, + 0x203f83, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x241d03, + 0xcd588, + 0x30e843, + 0xaff03, + 0xcd588, + 0xaff03, + 0x2b8283, + 0x20be03, + 0x234784, + 0x237583, + 0x30e843, + 0x207d02, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x207d02, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x2e2d83, + 0x207c03, + 0x200742, + 0x202c42, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3a25c5, + 0x9a2c6, + 0x28b304, + 0x20e982, + 0xcd588, + 0x200742, + 0x20288, + 0x132983, + 0x202c42, + 0x43490186, + 0x12b44, + 0x10bfcb, + 0x41546, + 0x1f847, + 0x237583, + 0x52748, + 0x30e843, + 0xef4c5, + 0xe84, + 0x222003, + 0x56c47, + 0xd4344, + 0x20ec83, + 0xafd44, + 0xaff03, + 0x241d03, + 0x2e9484, + 0xfdb48, + 0x157206, + 0x10d08, + 0x135fc5, + 0x126749, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ae43, + 0x241d03, + 0x2e8943, + 0x20e982, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff83, + 0x226444, + 0x20ec83, + 0xae43, + 0x241d03, + 0x20be03, + 0x237583, + 0x2d2484, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3a25c6, + 0x237583, + 0x30e843, + 0x181c43, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x1f847, + 0xcd588, + 0x30e843, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x45e0be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x200742, + 0x202c42, + 0x20be03, + 0x30e843, + 0x20ec83, + 0x200342, + 0x241d03, + 0x32e147, + 0x23b00b, + 0x205d03, + 0x2409c8, + 0x38d847, + 0x224906, + 0x2c1605, + 0x38e5c9, + 0x21f4c8, + 0x36b4c9, + 0x39b210, + 0x36b4cb, + 0x2f6809, + 0x207503, + 0x22bcc9, + 0x235b46, + 0x235b4c, + 0x357588, + 0x3c02c8, + 0x200289, + 0x2e244e, + 0x3c094b, + 0x34754c, + 0x205583, + 0x27b80c, + 0x205589, + 0x2fbf47, + 0x2374cc, + 0x2ad14a, + 0x253404, + 0x32a08d, + 0x27b6c8, + 0x21014d, + 0x28a7c6, + 0x28b30b, + 0x31d689, + 0x27a747, + 0x3c2c46, + 0x341409, + 0x32538a, + 0x313048, + 0x2e8544, + 0x332e87, + 0x249d47, + 0x277984, + 0x2d9a44, + 0x386dc9, + 0x364209, + 0x215cc8, + 0x2108c5, + 0x2ea8c5, + 0x20d106, + 0x329f49, + 0x25720d, + 0x2f3308, + 0x20d007, + 0x2c1688, + 0x23ce86, + 0x37ce44, + 0x286c45, + 0x203846, + 0x2043c4, + 0x205487, + 0x2081ca, + 0x213904, + 0x2170c6, + 0x217d49, + 0x217d4f, + 0x21870d, + 0x218fc6, + 0x21fe90, + 0x220286, + 0x220807, + 0x221447, + 0x22144f, + 0x222149, + 0x228d46, + 0x22a1c7, + 0x22a1c8, + 0x22b089, + 0x395b88, + 0x2de4c7, + 0x22cf43, + 0x3bcfc6, + 0x3bbb08, + 0x2e270a, + 0x387809, + 0x212743, + 0x331606, + 0x39ccca, + 0x2e4b47, + 0x2fbd8a, + 0x209a8e, + 0x222286, + 0x26f5c7, + 0x21bac6, + 0x205646, + 0x38a50b, + 0x34edca, + 0x309b0d, + 0x203c07, + 0x260648, + 0x260649, + 0x26064f, + 0x34eb0c, + 0x27c0c9, + 0x2d778e, + 0x38cc8a, + 0x293886, + 0x2f9f06, + 0x313ccc, + 0x315a8c, + 0x328308, + 0x353d07, + 0x26d545, + 0x290504, + 0x202c4e, + 0x266f84, + 0x3188c7, + 0x39270a, + 0x3a2a54, + 0x3b92cf, + 0x221608, + 0x3bce88, + 0x33984d, + 0x33984e, + 0x231a09, + 0x232b08, + 0x232b0f, + 0x2371cc, + 0x2371cf, + 0x2386c7, + 0x23dfca, + 0x22c54b, + 0x23f5c8, + 0x242707, + 0x2616cd, + 0x20a286, + 0x32a246, + 0x2443c9, + 0x2a6f88, + 0x249208, + 0x24920e, + 0x23b107, + 0x24b505, + 0x24cc85, + 0x204c44, + 0x224bc6, + 0x215bc8, + 0x322ec3, + 0x397e4e, + 0x261a88, + 0x2ae14b, + 0x301c07, + 0x2f7305, + 0x27b986, + 0x2aab07, + 0x2f4848, + 0x317909, + 0x20a505, + 0x284bc8, + 0x226e06, + 0x39caca, + 0x202b49, + 0x237589, + 0x23758b, + 0x3211c8, + 0x277849, + 0x210986, + 0x36db0a, + 0x2bf50a, + 0x23e1cc, + 0x21e907, + 0x2929ca, + 0x211d0b, + 0x211d19, + 0x30a988, + 0x3a2645, + 0x261886, + 0x26ba89, + 0x358706, + 0x2d340a, + 0x21f6c6, + 0x225784, + 0x2c380d, + 0x323307, + 0x225789, + 0x24e685, + 0x251548, + 0x252509, + 0x252944, + 0x253307, + 0x253308, + 0x253907, + 0x268688, + 0x257887, + 0x205dc5, + 0x25d60c, + 0x25de49, + 0x30590a, + 0x3a0e09, + 0x22bdc9, + 0x37924c, + 0x26004b, + 0x260dc8, + 0x261e88, + 0x265244, + 0x281b08, + 0x283c89, + 0x2ad207, + 0x217f86, + 0x31d907, + 0x3843c9, + 0x335c0b, + 0x2b6107, + 0x3c6847, + 0x216407, + 0x2100c4, + 0x2100c5, + 0x278845, + 0x34c04b, + 0x3ad084, + 0x320d08, + 0x28550a, + 0x226ec7, + 0x35b207, + 0x28c112, + 0x2a10c6, + 0x234106, + 0x2b658e, + 0x2a4a86, + 0x291508, + 0x291a8f, + 0x210508, + 0x38b748, + 0x2bb78a, + 0x2bb791, + 0x2a0d4e, + 0x242a0a, + 0x242a0c, + 0x232d07, + 0x232d10, + 0x3c4cc8, + 0x2a0f45, + 0x2aae0a, + 0x20440c, + 0x29580d, + 0x2fc446, + 0x2fc447, + 0x2fc44c, + 0x30460c, + 0x214bcc, + 0x2ab88b, + 0x3706c4, + 0x211dc4, + 0x388489, + 0x30a2c7, + 0x23dd89, + 0x2bf349, + 0x2ace07, + 0x2acfc6, + 0x2acfc9, + 0x2ad3c3, + 0x2b058a, + 0x31b487, + 0x3491cb, + 0x30998a, + 0x3298c4, + 0x316946, + 0x27ee49, + 0x23a2c4, + 0x39eeca, + 0x2425c5, + 0x2bcbc5, + 0x2bcbcd, + 0x2bcf0e, + 0x2d9685, + 0x32d506, + 0x3a21c7, + 0x25d88a, + 0x37a506, + 0x2e1304, + 0x303707, + 0x246a4b, + 0x23cf47, + 0x3c1a04, + 0x390706, + 0x39070d, + 0x38408c, + 0x20eb46, + 0x2f350a, + 0x27c686, + 0x285788, + 0x354b47, + 0x23618a, + 0x24b386, + 0x203b03, + 0x294346, + 0x3bb988, + 0x38860a, + 0x2cb107, + 0x2cb108, + 0x30df84, + 0x28c687, + 0x201cc8, + 0x2a12c8, + 0x2827c8, + 0x2b130a, + 0x2d8345, + 0x20be87, + 0x242853, + 0x25a706, + 0x21b388, + 0x227609, + 0x248748, + 0x32808b, + 0x318488, + 0x246b84, + 0x365386, + 0x311e06, + 0x2dfa89, + 0x382807, + 0x25d708, + 0x29c806, + 0x31f644, + 0x341d05, + 0x2c7e48, + 0x34398a, + 0x2c3488, + 0x2c8906, + 0x29960a, + 0x3b8e48, + 0x2cc708, + 0x2cd2c8, + 0x2cdd46, + 0x2d0886, + 0x3a08cc, + 0x2d0e10, + 0x29ea45, + 0x210308, + 0x394490, + 0x210310, + 0x39b08e, + 0x3a054e, + 0x3a0554, + 0x3a624f, + 0x3a6606, + 0x321391, + 0x31eb13, + 0x31ef88, + 0x3ab285, + 0x240f08, + 0x387b05, + 0x2da18c, + 0x22cd09, + 0x290349, + 0x22d187, + 0x251309, + 0x24f147, + 0x2f8cc6, + 0x286a47, + 0x2034c5, + 0x210c83, + 0x323089, + 0x2278c9, + 0x381c43, + 0x38c7c4, + 0x326f0d, + 0x347c8f, + 0x31f685, + 0x3175c6, + 0x225a47, + 0x357307, + 0x2f5886, + 0x2f588b, + 0x2a2bc5, + 0x25f106, + 0x2f9d87, + 0x258249, + 0x375d06, + 0x322285, + 0x374e4b, + 0x3be7c6, + 0x229a85, + 0x27dc08, + 0x2b2bc8, + 0x2aec8c, + 0x2aec90, + 0x2ae949, + 0x2bd487, + 0x2d8e4b, + 0x306746, + 0x2de38a, + 0x2df80b, + 0x2e094a, + 0x2e0bc6, + 0x2e2c45, + 0x31b1c6, + 0x2b7048, + 0x22d24a, + 0x3394dc, + 0x2e8a0c, + 0x2e8d08, + 0x3a25c5, + 0x361f87, + 0x209946, + 0x270bc5, + 0x21a846, + 0x2f5a48, + 0x2bbe47, + 0x2e2348, + 0x25a7ca, + 0x225b4c, + 0x20d309, + 0x345787, + 0x222a04, + 0x24cd46, + 0x38b2ca, + 0x2bf445, + 0x2110cc, + 0x213588, + 0x373608, + 0x2238cc, + 0x2dd30c, + 0x329409, + 0x329647, + 0x24398c, + 0x22c044, + 0x2482ca, + 0x3033cc, + 0x27280b, + 0x372f0b, + 0x253786, + 0x258d87, + 0x232f47, + 0x232f4f, + 0x2fce11, + 0x2d5b52, + 0x2590cd, + 0x2590ce, + 0x25940e, + 0x3a6408, + 0x3a6412, + 0x25eb08, + 0x38d487, + 0x2556ca, + 0x355cc8, + 0x2a4a45, + 0x37fe0a, + 0x220607, + 0x316244, + 0x221b83, + 0x378205, + 0x2bba07, + 0x307c47, + 0x295a0e, + 0x399e8d, + 0x39b5c9, + 0x20fd85, + 0x3b00c3, + 0x20ab06, + 0x25f705, + 0x2ae388, + 0x2b9609, + 0x2618c5, + 0x2618cf, + 0x2e2a87, + 0x38e505, + 0x3025ca, + 0x2b1506, + 0x25b949, + 0x2f640c, + 0x2f80c9, + 0x3be506, + 0x28530c, + 0x32c906, + 0x2fb508, + 0x2fba86, + 0x30ab06, + 0x2b0f04, + 0x30d603, + 0x38668a, + 0x216711, + 0x27c28a, + 0x272085, + 0x282347, + 0x25ab47, + 0x201dc4, + 0x201dcb, + 0x293d48, + 0x2ba046, + 0x36e145, + 0x33d8c4, + 0x362109, + 0x202a84, + 0x249047, + 0x300585, + 0x300587, + 0x2b67c5, + 0x3482c3, + 0x38d348, + 0x317c4a, + 0x27f283, + 0x35750a, + 0x3b3486, + 0x26164f, + 0x3b8349, + 0x397dd0, + 0x2effc8, + 0x2c8d49, + 0x296707, + 0x39068f, + 0x319c04, + 0x2d2504, + 0x220106, + 0x34f846, + 0x2d6e8a, + 0x253c06, + 0x352987, + 0x303f48, + 0x304147, + 0x3049c7, + 0x305b8a, + 0x3083cb, + 0x341ac5, + 0x2d5788, + 0x256703, + 0x3b6b0c, + 0x35d60f, + 0x26d34d, + 0x25e287, + 0x39b709, + 0x36de47, + 0x282c48, + 0x3a2c4c, + 0x2c8a48, + 0x2701c8, + 0x31c38e, + 0x333d94, + 0x3342a4, + 0x35308a, + 0x36becb, + 0x24f204, + 0x24f209, + 0x233bc8, + 0x24d305, + 0x3229ca, + 0x261cc7, + 0x31b0c4, + 0x2b6c03, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x2d0e06, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x214bc3, + 0x2d0e06, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x203d43, + 0x20ec83, + 0xaff03, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x200742, + 0x24be43, + 0x202c42, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x203cc2, + 0x24d5c2, + 0x202c42, + 0x20be03, + 0x20b2c2, + 0x201342, + 0x26ff84, + 0x21a484, + 0x227d42, + 0x226444, + 0x200342, + 0x241d03, + 0x219543, + 0x253786, + 0x230882, + 0x204182, + 0x228382, + 0x48610503, + 0x48a32d03, + 0x5b586, + 0x5b586, + 0x28b304, + 0x20ae43, + 0x15a4a, + 0x3ba4c, + 0x121ecc, + 0x7f48d, + 0x117485, + 0x2aa47, + 0x14ec6, + 0x19148, + 0x1c787, + 0x23288, + 0x1807ca, + 0x102c07, + 0x496d2fc5, + 0x133789, + 0x3c00b, + 0x18754b, + 0x1bdd08, + 0xf608a, + 0x8a34e, + 0x144854b, + 0x12b44, + 0x5f246, + 0x8808, + 0x7e948, + 0x3c2c7, + 0x910c7, + 0x78449, + 0x3a347, + 0x67008, + 0x100249, + 0x170bc4, + 0x191e05, + 0x12f34e, + 0xa964d, + 0x1f6c8, + 0x49b64046, + 0x4a564048, + 0x739c8, + 0x12e350, + 0x5978c, + 0x666c7, + 0x66e47, + 0x6abc7, + 0x704c7, + 0xd0c2, + 0x1807, + 0x14ef8c, + 0x11d107, + 0xa4686, + 0xa5a89, + 0xa7708, + 0x552c2, + 0x1342, + 0x3900b, + 0xafdc7, + 0x25589, + 0x52c49, + 0x142188, + 0xb0102, + 0x1970c9, + 0xc9f8a, + 0xc6209, + 0xd3909, + 0xd50c8, + 0xd6007, + 0xd82c9, + 0xda885, + 0xdae90, + 0x138ac6, + 0x14a345, + 0xeb78d, + 0x2bac6, + 0xe3d47, + 0xe9498, + 0x3a6c8, + 0x14640a, + 0x16f02, + 0x5f88d, + 0x1282, + 0x7dac6, + 0x8cc08, + 0x6e308, + 0xcd449, + 0x1be608, + 0x6f98e, + 0xab07, + 0xfe68d, + 0xf26c5, + 0x1588, + 0x1a1e08, + 0xfeec6, + 0xc842, + 0x157206, + 0xdc2, + 0x2c1, + 0x60a07, + 0x8b003, + 0x49ee9d04, + 0x4a294a43, + 0x101, + 0x13d06, + 0x101, + 0x301, + 0x13d06, + 0x8b003, + 0x140e3c5, + 0x253404, + 0x20be03, + 0x254a04, + 0x26ff84, + 0x20ec83, + 0x2274c5, + 0x21a003, + 0x24f3c3, + 0x2f5805, + 0x203f83, + 0x4b60be03, + 0x237583, + 0x30e843, + 0x200541, + 0x21f743, + 0x21a484, + 0x226444, + 0x20ec83, + 0x241d03, + 0x207c03, + 0xcd588, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x203d43, + 0x201342, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x203f83, + 0xcd588, + 0x35d382, + 0x1851c7, + 0x2c42, + 0x141c85, + 0x598cf, + 0x1455908, + 0x10430e, + 0x4c607402, + 0x31a848, + 0x307206, + 0x2c1146, + 0x306b87, + 0x4ca11a42, + 0x4cfb81c8, + 0x21ed8a, + 0x266148, + 0x200602, + 0x31b2c9, + 0x341b07, + 0x217f06, + 0x38d089, + 0x20bfc4, + 0x20e2c6, + 0x2f2904, + 0x270984, + 0x25cf89, + 0x3030c6, + 0x24d185, + 0x2faf45, + 0x2322c7, + 0x2be9c7, + 0x354984, + 0x306dc6, + 0x2ff205, + 0x309485, + 0x39dec5, + 0x2ea687, + 0x301a45, + 0x319149, + 0x336f85, + 0x2f4984, + 0x37a447, + 0x348a0e, + 0x3aaec9, + 0x2b6449, + 0x335246, + 0x2454c8, + 0x2ee34b, + 0x35bd8c, + 0x33b746, + 0x347407, + 0x2afbc5, + 0x2d9a4a, + 0x215dc9, + 0x366f09, + 0x332706, + 0x2f9b45, + 0x383105, + 0x338689, + 0x39e04b, + 0x275e86, + 0x343ec6, + 0x203104, + 0x28bdc6, + 0x24b588, + 0x3bb806, + 0x21d186, + 0x206888, + 0x209347, + 0x209509, + 0x20acc5, + 0xcd588, + 0x291044, + 0x304f44, + 0x210f05, + 0x3a8c49, + 0x226087, + 0x22608b, + 0x2288ca, + 0x22cc45, + 0x4d207a42, + 0x309847, + 0x4d62cf48, + 0x370a47, + 0x383e85, + 0x32654a, + 0x2c42, + 0x2fac0b, + 0x2579ca, + 0x2277c6, + 0x210d83, + 0x2a4f8d, + 0x3aa74c, + 0x3bedcd, + 0x24e2c5, + 0x37dd45, + 0x322f07, + 0x20b2c9, + 0x21ec86, + 0x253a85, + 0x2ced88, + 0x28bcc3, + 0x2e3108, + 0x28bcc8, + 0x2c2107, + 0x30f108, + 0x3aa549, + 0x286d47, + 0x23ab87, + 0x224788, + 0x38ae04, + 0x38ae07, + 0x28a6c8, + 0x353706, + 0x3b544f, + 0x2293c7, + 0x2de646, + 0x329785, + 0x228503, + 0x391c87, + 0x378183, + 0x253e86, + 0x2553c6, + 0x255b06, + 0x28fa05, + 0x268683, + 0x396d48, + 0x379c89, + 0x38eb4b, + 0x255c88, + 0x257545, + 0x258b85, + 0x4db29982, + 0x286b09, + 0x38d707, + 0x25f185, + 0x25ce87, + 0x25e9c6, + 0x371c05, + 0x25f54b, + 0x260dc4, + 0x265d05, + 0x265e47, + 0x275806, + 0x275c45, + 0x281d07, + 0x2829c7, + 0x2e1704, + 0x28990a, + 0x289dc8, + 0x293089, + 0x241245, + 0x364946, + 0x24b74a, + 0x2fae46, + 0x268fc7, + 0x292d4d, + 0x2a2709, + 0x3954c5, + 0x2031c7, + 0x31f148, + 0x336a08, + 0x209ec7, + 0x3be1c6, + 0x21d4c7, + 0x255083, + 0x303044, + 0x36e785, + 0x39fc47, + 0x3a4609, + 0x230cc8, + 0x33dc85, + 0x2363c4, + 0x253d45, + 0x39038d, + 0x211742, + 0x2bd986, + 0x27da06, + 0x2dac0a, + 0x381346, + 0x38b205, + 0x31ffc5, + 0x31ffc7, + 0x39c90c, + 0x27384a, + 0x28ba86, + 0x2c4d85, + 0x28bc06, + 0x28bf47, + 0x28d986, + 0x28f90c, + 0x38d1c9, + 0x4de14187, + 0x291e45, + 0x291e46, + 0x2944c8, + 0x247e85, + 0x2a3505, + 0x2a3c88, + 0x2a3e8a, + 0x4e278142, + 0x4e607c82, + 0x2d7485, + 0x2a6b43, + 0x2461c8, + 0x211b83, + 0x2a4104, + 0x25ba8b, + 0x211b88, + 0x2ce288, + 0x4eb1cb49, + 0x2a8e09, + 0x2a9546, + 0x2aa788, + 0x2aa989, + 0x2ab386, + 0x2ab505, + 0x24e0c6, + 0x2abfc9, + 0x3ac147, + 0x381fc6, + 0x2d8787, + 0x21eb07, + 0x34e204, + 0x4ee3a9c9, + 0x270e08, + 0x3b80c8, + 0x31f887, + 0x2c2f46, + 0x20b0c9, + 0x2f2bc7, + 0x33194a, + 0x364a88, + 0x3be307, + 0x20ed86, + 0x39d28a, + 0x372cc8, + 0x382ac5, + 0x22b9c5, + 0x30b047, + 0x36ac49, + 0x30280b, + 0x314248, + 0x337009, + 0x255f87, + 0x2b868c, + 0x2b8c8c, + 0x2b8f8a, + 0x2b920c, + 0x2c10c8, + 0x2c12c8, + 0x2c14c4, + 0x2c1889, + 0x2c1ac9, + 0x2c1d0a, + 0x2c1f89, + 0x2c22c7, + 0x3b4c4c, + 0x23d386, + 0x3c0708, + 0x2faf06, + 0x37fc46, + 0x3953c7, + 0x3ab0c8, + 0x349c4b, + 0x370907, + 0x35b849, + 0x3782c9, + 0x254b87, + 0x2f2b44, + 0x282487, + 0x2eaf46, + 0x215946, + 0x2f36c5, + 0x3720c8, + 0x290244, + 0x290246, + 0x27370b, + 0x2b0889, + 0x39ddc6, + 0x204cc9, + 0x2ea806, + 0x22e688, + 0x20b4c3, + 0x2f9cc5, + 0x21d2c9, + 0x228b05, + 0x30ae84, + 0x274d06, + 0x3993c5, + 0x259b06, + 0x308747, + 0x331086, + 0x23078b, + 0x36da07, + 0x256e46, + 0x348606, + 0x232386, + 0x354949, + 0x2e474a, + 0x2ba345, + 0x3be8cd, + 0x2a3f86, + 0x2e9106, + 0x397cc6, + 0x285705, + 0x2db187, + 0x2f75c7, + 0x207cce, + 0x21f743, + 0x2c2f09, + 0x358489, + 0x2d9e47, + 0x26c287, + 0x2a1445, + 0x2ae085, + 0x4f386f0f, + 0x2c8f87, + 0x2c9148, + 0x2c9884, + 0x2c9e46, + 0x4f64cd02, + 0x2cdfc6, + 0x2d0e06, + 0x349f8e, + 0x2e2f4a, + 0x3b8946, + 0x2ca24a, + 0x2065c9, + 0x231e85, + 0x344788, + 0x39a146, + 0x29aac8, + 0x3c2dc8, + 0x2a57cb, + 0x306c85, + 0x301ac8, + 0x2069cc, + 0x383d47, + 0x255606, + 0x27c4c8, + 0x224a88, + 0x4fa53982, + 0x20e08b, + 0x3361c9, + 0x21cb09, + 0x39dc47, + 0x38a7c8, + 0x4fe3ca88, + 0x21318b, + 0x342009, + 0x28394d, + 0x24e488, + 0x3518c8, + 0x502056c2, + 0x331404, + 0x50623b82, + 0x2f7e06, + 0x50a0a542, + 0x24fc8a, + 0x204b86, + 0x22e0c8, + 0x2be048, + 0x326cc6, + 0x398b46, + 0x2efd46, + 0x2ae305, + 0x240684, + 0x50e2e604, + 0x34c986, + 0x2a2247, + 0x5121c1c7, + 0x2e1bcb, + 0x348dc9, + 0x37dd8a, + 0x357ec4, + 0x320108, + 0x381d8d, + 0x2e6e49, + 0x2e7088, + 0x2e7709, + 0x2e9484, + 0x22c404, + 0x27d505, + 0x2ee68b, + 0x211b06, + 0x34c7c5, + 0x222449, + 0x306e88, + 0x29fb44, + 0x2d9bc9, + 0x326b05, + 0x2bea08, + 0x23b247, + 0x2b6848, + 0x27f046, + 0x207907, + 0x2d4109, + 0x374fc9, + 0x229b05, + 0x240305, + 0x51607482, + 0x2f4744, + 0x225dc5, + 0x292786, + 0x2f8305, + 0x297b87, + 0x34ca85, + 0x275844, + 0x335306, + 0x253b07, + 0x234fc6, + 0x384305, + 0x20e4c8, + 0x307405, + 0x20ef87, + 0x2154c9, + 0x2b09ca, + 0x34e587, + 0x34e58c, + 0x24d146, + 0x241b89, + 0x244885, + 0x247dc8, + 0x201283, + 0x210945, + 0x2eac05, + 0x257f47, + 0x51a12c02, + 0x398347, + 0x2f3c06, + 0x32fdc6, + 0x2f7f46, + 0x2249c6, + 0x2eb408, + 0x241045, + 0x2de707, + 0x2de70d, + 0x221b83, + 0x221b85, + 0x302387, + 0x398688, + 0x301f45, + 0x219788, + 0x23dc86, + 0x333947, + 0x3c0645, + 0x306d06, + 0x3a4c85, + 0x226bca, + 0x2fe986, + 0x22eec7, + 0x2f04c5, + 0x2ffc87, + 0x303684, + 0x30ae06, + 0x3446c5, + 0x357a0b, + 0x2eadc9, + 0x24bf4a, + 0x229b88, + 0x34c2c8, + 0x34cb8c, + 0x353847, + 0x36f808, + 0x387c08, + 0x394205, + 0x3a684a, + 0x3b00c9, + 0x51e01b42, + 0x3c6646, + 0x222e44, + 0x222e49, + 0x294f49, + 0x276587, + 0x2f9887, + 0x2bf1c9, + 0x285908, + 0x28590f, + 0x21dec6, + 0x2d2c8b, + 0x2f5645, + 0x2f5647, + 0x2f5c49, + 0x25bbc6, + 0x2d9b47, + 0x2d5ec5, + 0x234dc4, + 0x341006, + 0x226244, + 0x2df647, + 0x2ce808, + 0x522f9a48, + 0x2fa1c5, + 0x2fa307, + 0x24eb09, + 0x20de44, + 0x2473c8, + 0x52716788, + 0x201dc4, + 0x235fc8, + 0x3c2d04, + 0x3bebc9, + 0x21b2c5, + 0x52a0e982, + 0x21df05, + 0x2cb8c5, + 0x203008, + 0x238507, + 0x52e02a82, + 0x339e45, + 0x2cc586, + 0x249746, + 0x2f4708, + 0x2f4c88, + 0x2f82c6, + 0x30a146, + 0x385289, + 0x32fd06, + 0x29124b, + 0x3478c5, + 0x355c06, + 0x28e088, + 0x231bc6, + 0x20a386, + 0x219c4a, + 0x2a91ca, + 0x370c85, + 0x241107, + 0x30d0c6, + 0x53206d02, + 0x3024c7, + 0x260505, + 0x24b6c4, + 0x24b6c5, + 0x357dc6, + 0x271a47, + 0x220105, + 0x295004, + 0x2ad5c8, + 0x20a445, + 0x309fc7, + 0x3b1c45, + 0x226b05, + 0x268904, + 0x2abac9, + 0x2ff048, + 0x399286, + 0x2adc46, + 0x201ac6, + 0x536ff908, + 0x2ffb07, + 0x2ffe4d, + 0x30074c, + 0x300d49, + 0x300f89, + 0x53b65c82, + 0x3b7e83, + 0x2228c3, + 0x2eb005, + 0x39fd4a, + 0x32fbc6, + 0x3052c5, + 0x308904, + 0x30890b, + 0x323e4c, + 0x32488c, + 0x324b95, + 0x32754d, + 0x32a68f, + 0x32aa52, + 0x32aecf, + 0x32b292, + 0x32b713, + 0x32bbcd, + 0x32c18d, + 0x32c50e, + 0x32ca8e, + 0x32d2cc, + 0x32d68c, + 0x32dacb, + 0x32de4e, + 0x32e752, + 0x32f98c, + 0x32ff50, + 0x33ba12, + 0x33c68c, + 0x33cd4d, + 0x33d08c, + 0x33f651, + 0x34404d, + 0x34ac8d, + 0x34b28a, + 0x34b50c, + 0x34be0c, + 0x34c4cc, + 0x34ce8c, + 0x350493, + 0x350b10, + 0x350f10, + 0x351acd, + 0x3520cc, + 0x352dc9, + 0x35518d, + 0x3554d3, + 0x356491, + 0x3568d3, + 0x35888f, + 0x358c4c, + 0x358f4f, + 0x35930d, + 0x35990f, + 0x359cd0, + 0x35a74e, + 0x35df0e, + 0x35e490, + 0x35f08d, + 0x35fa0e, + 0x35fd8c, + 0x360d93, + 0x3628ce, + 0x362f50, + 0x363351, + 0x36378f, + 0x363b53, + 0x36580d, + 0x365b4f, + 0x365f0e, + 0x3665d0, + 0x3669c9, + 0x367d10, + 0x36830f, + 0x36898f, + 0x368d52, + 0x369e0e, + 0x36a80d, + 0x36ae8d, + 0x36b1cd, + 0x36c84d, + 0x36cb8d, + 0x36ced0, + 0x36d2cb, + 0x36e54c, + 0x36e8cc, + 0x36eecc, + 0x36f1ce, + 0x37bbd0, + 0x37e012, + 0x37e48b, + 0x37e98e, + 0x37ed0e, + 0x37f58e, + 0x37fa0b, + 0x53f80196, + 0x38104d, + 0x3814d4, + 0x38228d, + 0x386915, + 0x388c4d, + 0x3895cf, + 0x389ccf, + 0x38ee0f, + 0x38f1ce, + 0x38f74d, + 0x391891, + 0x394b8c, + 0x394e8c, + 0x39518b, + 0x39560c, + 0x39654f, + 0x396912, + 0x39950d, + 0x39ae0c, + 0x39b94c, + 0x39bc4d, + 0x39bf8f, + 0x39c34e, + 0x39fa0c, + 0x39ffcd, + 0x3a030b, + 0x3a0bcc, + 0x3a14cd, + 0x3a180e, + 0x3a1b89, + 0x3a3553, + 0x3a3a8d, + 0x3a3dcd, + 0x3a43cc, + 0x3a484e, + 0x3a520f, + 0x3a55cc, + 0x3a58cd, + 0x3a5c0f, + 0x3a5fcc, + 0x3a778c, + 0x3a7b0c, + 0x3a7e0c, + 0x3a84cd, + 0x3a8812, + 0x3a8e8c, + 0x3a918c, + 0x3a9491, + 0x3a98cf, + 0x3a9c8f, + 0x3aa053, + 0x3ac70e, + 0x3aca8f, + 0x3ace4c, + 0x543ad18e, + 0x3ad50f, + 0x3ad8d6, + 0x3ae0d2, + 0x3af8cc, + 0x3b030f, + 0x3b098d, + 0x3b0ccf, + 0x3b108c, + 0x3b138d, + 0x3b16cd, + 0x3b2c8e, + 0x3b3bcc, + 0x3b3ecc, + 0x3b41d0, + 0x3b7211, + 0x3b764b, + 0x3b7a8c, + 0x3b7d8e, + 0x3baa91, + 0x3baece, + 0x3bb24d, + 0x3c338b, + 0x3c3c8f, + 0x3c4794, + 0x25cd82, + 0x25cd82, + 0x2032c3, + 0x25cd82, + 0x2032c3, + 0x25cd82, + 0x2009c2, + 0x24e105, + 0x3ba78c, + 0x25cd82, + 0x25cd82, + 0x2009c2, + 0x25cd82, + 0x294b45, + 0x2b09c5, + 0x25cd82, + 0x25cd82, + 0x200302, + 0x294b45, + 0x328909, + 0x35618c, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x24e105, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x200302, + 0x328909, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x2b09c5, + 0x25cd82, + 0x2b09c5, + 0x35618c, + 0x3ba78c, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x1233c8, + 0x6fac4, + 0xae43, + 0x193708, + 0x200742, + 0x55202c42, + 0x246d03, + 0x252d84, + 0x206c03, + 0x3c24c4, + 0x234106, + 0x2137c3, + 0x2f9084, + 0x2f0b05, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x22d50a, + 0x253786, + 0x37f08c, + 0xcd588, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20be83, + 0x2d0e06, + 0x20ec83, + 0x241d03, + 0x219543, + 0xa4d48, + 0x117485, + 0x187689, + 0x15842, + 0x56793ec5, + 0x2aa47, + 0xaf148, + 0xd9ce, + 0x87e92, + 0x11b5cb, + 0x102d06, + 0x56ad2fc5, + 0x56ed2fcc, + 0x25147, + 0x15da87, + 0x1208ca, + 0x42ed0, + 0x149445, + 0x10bfcb, + 0x7e948, + 0x3c2c7, + 0xf400b, + 0x78449, + 0x127b07, + 0x3a347, + 0x760c7, + 0x3c206, + 0x67008, + 0x57429546, + 0xa964d, + 0x120290, + 0x5780a6c2, + 0x1f6c8, + 0x82dd0, + 0x15b5cc, + 0x57f61e0d, + 0x5fbc8, + 0x6b8c7, + 0x164f49, + 0x5b646, + 0x946c8, + 0xebfc2, + 0x71e8a, + 0x31047, + 0x11d107, + 0xa5a89, + 0xa7708, + 0xef4c5, + 0xe85ce, + 0x1260e, + 0x1b98f, + 0x25589, + 0x52c49, + 0x7e10b, + 0x8ef4f, + 0xabccc, + 0x1125cb, + 0x105848, + 0xe1ac7, + 0xf87c8, + 0x132c8b, + 0x15274c, + 0x15af0c, + 0x1625cc, + 0x16784d, + 0x142188, + 0xfcdc2, + 0x1970c9, + 0x133acb, + 0xc3146, + 0x12e28b, + 0xd560a, + 0xd61c5, + 0xdae90, + 0xdd606, + 0x140806, + 0x14a345, + 0x18a988, + 0xe3d47, + 0xe4007, + 0x1fcc7, + 0xf7c4a, + 0xaefca, + 0x7dac6, + 0x9088d, + 0x6e308, + 0x1be608, + 0x68849, + 0xb7cc5, + 0xf778c, + 0x167a4b, + 0x16ab84, + 0xfec89, + 0xfeec6, + 0x4cb06, + 0x4182, + 0x157206, + 0x14634b, + 0x10ac87, + 0xdc2, + 0xc5105, + 0x16704, + 0x781, + 0x7bc3, + 0x572a6d06, + 0x94a43, + 0x2542, + 0x31044, + 0x602, + 0x8b304, + 0xcc2, + 0x7ec2, + 0x3102, + 0x114902, + 0x3cc2, + 0xd2fc2, + 0x3c02, + 0xefa02, + 0x3e242, + 0x4ac2, + 0x2e02, + 0x4c3c2, + 0x37583, + 0x1f02, + 0x1b02, + 0x8882, + 0x12c42, + 0x1402, + 0x35c02, + 0x552c2, + 0x6202, + 0x3882, + 0x1342, + 0x14bc3, + 0x5842, + 0x1102, + 0xb0102, + 0x9602, + 0x1542, + 0x4482, + 0xe302, + 0x6f582, + 0x1e42, + 0x17c0c2, + 0x6cd82, + 0x3a3c2, + 0xec83, + 0x2002, + 0x53982, + 0x1382, + 0x6e02, + 0x29a85, + 0x8302, + 0x48602, + 0x44303, + 0x982, + 0x16f02, + 0x1282, + 0x4042, + 0xed42, + 0x2a82, + 0xc842, + 0x4182, + 0x1f8c5, + 0x582009c2, + 0x587696c3, + 0x1fbc3, + 0x58a009c2, + 0x1fbc3, + 0x179487, + 0x215383, + 0x200742, + 0x20be03, + 0x237583, + 0x203d43, + 0x201fc3, + 0x20be83, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x294a83, + 0x10ec3, + 0xcd588, + 0x20be03, + 0x237583, + 0x203d43, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0xaff03, + 0x241d03, + 0x20be03, + 0x237583, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x200541, + 0x21f743, + 0x20ec83, + 0x228803, + 0x241d03, + 0x3744, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x21f6c3, + 0x203d43, + 0x257e43, + 0x26b143, + 0x2a2c83, + 0x280e83, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x203f83, + 0x31e084, + 0x250b03, + 0x5583, + 0x22d443, + 0x332388, + 0x325384, + 0x2023ca, + 0x238f06, + 0x10a904, + 0x37a147, + 0x22174a, + 0x21dd89, + 0x3ade07, + 0x3b628a, + 0x2b6c03, + 0x2d750b, + 0x293609, + 0x201bc5, + 0x34e347, + 0x2c42, + 0x20be03, + 0x214447, + 0x2fb145, + 0x2f2a09, + 0x237583, + 0x306a86, + 0x2c0c03, + 0xeae83, + 0x107f06, + 0x122746, + 0x13747, + 0x2176c6, + 0x227045, + 0x39e607, + 0x2d2107, + 0x5b30e843, + 0x33c8c7, + 0x371fc3, + 0x20fb85, + 0x26ff84, + 0x26ed48, + 0x36a50c, + 0x2ad885, + 0x2a2886, + 0x214307, + 0x345847, + 0x252e47, + 0x254d08, + 0x30600f, + 0x3371c5, + 0x246e07, + 0x37c407, + 0x2a424a, + 0x2cebc9, + 0x308045, + 0x30b7ca, + 0x136686, + 0x2c0c85, + 0x36c104, + 0x2bdf86, + 0x2f1a47, + 0x382947, + 0x348748, + 0x21b545, + 0x2fb046, + 0x21d105, + 0x36dd45, + 0x289684, + 0x326bc7, + 0x2eb24a, + 0x23fc48, + 0x366446, + 0xbe83, + 0x2d8345, + 0x318a86, + 0x3b4e86, + 0x34a246, + 0x21f743, + 0x399787, + 0x37c385, + 0x20ec83, + 0x2d58cd, + 0x20ae43, + 0x348848, + 0x38c844, + 0x275b05, + 0x2a4146, + 0x23d186, + 0x355b07, + 0x204a07, + 0x289085, + 0x241d03, + 0x3268c7, + 0x3650c9, + 0x340a49, + 0x30dc0a, + 0x249a82, + 0x20fb44, + 0x2de284, + 0x349b07, + 0x398208, + 0x2e4f89, + 0x221a49, + 0x2e5dc7, + 0x35c346, + 0xe8346, + 0x2e9484, + 0x2e9a8a, + 0x2edc08, + 0x2efc09, + 0x2de106, + 0x2b1985, + 0x23fb08, + 0x2c358a, + 0x2b5d83, + 0x31e206, + 0x2e5ec7, + 0x2311c5, + 0x38c705, + 0x3a26c3, + 0x2702c4, + 0x22b985, + 0x282ac7, + 0x2ff185, + 0x337c86, + 0x14aa05, + 0x2a3203, + 0x3b8a09, + 0x2758cc, + 0x2ca74c, + 0x2cbb08, + 0x2baec7, + 0x2fbc08, + 0x2fc24a, + 0x2fcc4b, + 0x293748, + 0x23c908, + 0x23d286, + 0x201985, + 0x320fca, + 0x369705, + 0x20e982, + 0x3c0507, + 0x261146, + 0x367405, + 0x36b749, + 0x277385, + 0x36d785, + 0x35c909, + 0x3189c6, + 0x3b6988, + 0x20fc43, + 0x217806, + 0x274c46, + 0x30a485, + 0x30a489, + 0x2e56c9, + 0x251b47, + 0x10c984, + 0x30c987, + 0x221949, + 0x23d605, + 0x40788, + 0x346245, + 0x332285, + 0x3c1309, + 0x203402, + 0x250904, + 0x203c82, + 0x205842, + 0x3c0d85, + 0x30a688, + 0x2b7c05, + 0x2c2483, + 0x2c2485, + 0x2ce1c3, + 0x20ff02, + 0x208f84, + 0x2c7d03, + 0x206f02, + 0x340484, + 0x2def43, + 0x204882, + 0x21fc43, + 0x28cb84, + 0x2f01c3, + 0x256784, + 0x200a02, + 0x219443, + 0x21d403, + 0x206a42, + 0x2f4682, + 0x2e5509, + 0x20ad42, + 0x2889c4, + 0x200442, + 0x23f984, + 0x35c304, + 0x287304, + 0x204182, + 0x23c542, + 0x3295c3, + 0x23b943, + 0x24d644, + 0x2b5c04, + 0x2eddc4, + 0x30b6c4, + 0x309043, + 0x335a83, + 0x336604, + 0x30eac4, + 0x30f306, + 0x2145c2, + 0x202c42, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x200742, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x201b03, + 0x30e843, + 0x26ff84, + 0x2e57c4, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x2ea0c4, + 0x31a803, + 0x2a6503, + 0x36aac4, + 0x346046, + 0x20b583, + 0x15da87, + 0x22fac3, + 0x21e903, + 0x2b0c43, + 0x20fbc3, + 0x20be83, + 0x339d45, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x210e03, + 0x2333c3, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x214bc3, + 0x20ec83, + 0x238184, + 0xaff03, + 0x241d03, + 0x209944, + 0x2bdd85, + 0x15da87, + 0x202c42, + 0x209d42, + 0x202542, + 0x2032c2, + 0xae43, + 0x200342, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0xae43, + 0x241d03, + 0x207c03, + 0x28b304, + 0xcd588, + 0x20be03, + 0x20ae43, + 0x10ec3, + 0x13b5c4, + 0x253404, + 0xcd588, + 0x20be03, + 0x254a04, + 0x26ff84, + 0x20ae43, + 0x2056c2, + 0x241d03, + 0x24f3c3, + 0x702c4, + 0x2f5805, + 0x20e982, + 0x30ec03, + 0xf89, + 0xd3686, + 0xfcc8, + 0x200742, + 0xcd588, + 0x202c42, + 0x237583, + 0x30e843, + 0x201342, + 0xae43, + 0x241d03, + 0x200742, + 0x1b6447, + 0x11c889, + 0x5483, + 0xcd588, + 0x1226c3, + 0x5f33d587, + 0xbe03, + 0x1c6548, + 0x237583, + 0x30e843, + 0x178d46, + 0x214bc3, + 0x5b388, + 0xc0248, + 0x40e06, + 0x21f743, + 0xc6788, + 0x97c03, + 0xdbd45, + 0x37787, + 0xec83, + 0x6c83, + 0x41d03, + 0x4bc2, + 0x16c18a, + 0x1c0e43, + 0x30c5c4, + 0x105e0b, + 0x1063c8, + 0x8d302, + 0x200742, + 0x202c42, + 0x20be03, + 0x237583, + 0x2d2484, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x20be03, + 0x237583, + 0x30e843, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x209943, + 0x207c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x10ec3, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x230882, + 0x200101, + 0x200742, + 0x200301, + 0x32a782, + 0xcd588, + 0x21fe85, + 0x200781, + 0xbe03, + 0x2014c1, + 0x200041, + 0x200141, + 0x24e082, + 0x378184, + 0x24e083, + 0x201401, + 0x200901, + 0x200541, + 0x200a81, + 0x316307, + 0x337dcf, + 0x2fa486, + 0x200641, + 0x33b606, + 0x200081, + 0x2001c1, + 0x3c35ce, + 0x200341, + 0x241d03, + 0x201681, + 0x254285, + 0x204bc2, + 0x3a25c5, + 0x2002c1, + 0x200a01, + 0x200401, + 0x20e982, + 0x200441, + 0x203f81, + 0x20d601, + 0x201181, + 0x200dc1, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x21a003, + 0x20be03, + 0x30e843, + 0x8d248, + 0x21f743, + 0x20ec83, + 0x5e8c3, + 0x241d03, + 0x14e0f48, + 0x10d08, + 0xcd588, + 0xae43, + 0x24704, + 0x4cec4, + 0x14e0f4a, + 0xcd588, + 0xaff03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x205583, + 0xcd588, + 0x20be03, + 0x237583, + 0x2d2484, + 0x241d03, + 0x252385, + 0x317c44, + 0x20be03, + 0x20ec83, + 0x241d03, + 0x2fc8a, + 0xfd504, + 0x112a46, + 0x202c42, + 0x20be03, + 0x234d09, + 0x237583, + 0x2a82c9, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2e9288, + 0x397b87, + 0x2f5805, + 0x1b7888, + 0x1b6447, + 0x19848a, + 0x101d0b, + 0x13b847, + 0x45388, + 0x3a80a, + 0x13dc8, + 0x11c889, + 0x2b847, + 0x67fc7, + 0x1c28c8, + 0x1c6548, + 0x470cf, + 0x26505, + 0x1c6847, + 0x178d46, + 0x4c207, + 0x108186, + 0x5b388, + 0x9b986, + 0x1187c7, + 0x142349, + 0x1b5207, + 0xe68c9, + 0xb8209, + 0xbdb06, + 0xc0248, + 0xbeb45, + 0x77fca, + 0xc6788, + 0x97c03, + 0xcea08, + 0x37787, + 0x1ac045, + 0x4dc90, + 0x6c83, + 0xaff03, + 0x1c3147, + 0x1d5c5, + 0xe4308, + 0x605c5, + 0x1c0e43, + 0x142748, + 0x132146, + 0x199bc9, + 0xaab87, + 0x124b, + 0x137a84, + 0xfe3c4, + 0x105e0b, + 0x1063c8, + 0x107e07, + 0x117485, + 0x20be03, + 0x237583, + 0x203d43, + 0x241d03, + 0x244a03, + 0x30e843, + 0xaff03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x7e24b, + 0x200742, + 0x202c42, + 0x241d03, + 0xcd588, + 0x200742, + 0x202c42, + 0x202542, + 0x201342, + 0x200b82, + 0x20ec83, + 0x200342, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x202542, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x226444, + 0x20ec83, + 0x207783, + 0x241d03, + 0x30c5c4, + 0x203f83, + 0x30e843, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x3afd87, + 0x20be03, + 0x279947, + 0x2e6686, + 0x216543, + 0x208883, + 0x30e843, + 0x204d03, + 0x26ff84, + 0x38b344, + 0x2b9906, + 0x20c743, + 0x20ec83, + 0x241d03, + 0x252385, + 0x309e84, + 0x320dc3, + 0x20d203, + 0x3c0507, + 0x23b1c5, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x98747, + 0x2149c2, + 0x26e443, + 0x20df43, + 0x2b6c03, + 0x6760be03, + 0x20b2c2, + 0x237583, + 0x206c03, + 0x30e843, + 0x26ff84, + 0x3c32c3, + 0x3371c3, + 0x21f743, + 0x226444, + 0x67a02f02, + 0x20ec83, + 0x241d03, + 0x235cc3, + 0x214c43, + 0x230882, + 0x203f83, + 0xcd588, + 0x30e843, + 0x10ec3, + 0x31b0c4, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x39e304, + 0x21a484, + 0x2d0e06, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x261146, + 0x4170b, + 0x29546, + 0xeb94a, + 0x10b34a, + 0xcd588, + 0x21d0c4, + 0x68e0be03, + 0x2b6bc4, + 0x237583, + 0x268984, + 0x30e843, + 0x357d43, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x55a43, + 0x33840b, + 0x3b1a0a, + 0x3c580c, + 0xd80c8, + 0x200742, + 0x202c42, + 0x202542, + 0x232585, + 0x26ff84, + 0x201e42, + 0x21f743, + 0x21a484, + 0x2032c2, + 0x200342, + 0x207c02, + 0x230882, + 0xb6c03, + 0x4d5c2, + 0x386389, + 0x3a3088, + 0x310449, + 0x34e049, + 0x23e48a, + 0x24e90a, + 0x219382, + 0x2efa02, + 0x2c42, + 0x20be03, + 0x230a42, + 0x246fc6, + 0x368802, + 0x207582, + 0x30208e, + 0x21948e, + 0x27bf47, + 0x20ec07, + 0x2e89c2, + 0x237583, + 0x30e843, + 0x209182, + 0x201342, + 0x6ff83, + 0x23d94f, + 0x247302, + 0x2f9507, + 0x2ad447, + 0x314407, + 0x2b0fcc, + 0x2b9b8c, + 0x207144, + 0x27d34a, + 0x2193c2, + 0x209602, + 0x2b9844, + 0x2028c2, + 0x2c10c2, + 0x2b9dc4, + 0x217f42, + 0x201542, + 0xf003, + 0x29ba07, + 0x233805, + 0x20e302, + 0x24c184, + 0x37c0c2, + 0x2d7c88, + 0x20ec83, + 0x39f108, + 0x206a82, + 0x207305, + 0x388206, + 0x241d03, + 0x208302, + 0x2e51c7, + 0x4bc2, + 0x272585, + 0x204905, + 0x212182, + 0x2030c2, + 0x293c0a, + 0x288f0a, + 0x23a382, + 0x29a184, + 0x2040c2, + 0x20fa08, + 0x200d82, + 0x39d588, + 0x302ac7, + 0x3038c9, + 0x204982, + 0x3086c5, + 0x36ba05, + 0x21b60b, + 0x2c418c, + 0x230548, + 0x31c188, + 0x2145c2, + 0x355bc2, + 0x200742, + 0xcd588, + 0x202c42, + 0x20be03, + 0x202542, + 0x2032c2, + 0xae43, + 0x200342, + 0x241d03, + 0x207c02, + 0x200742, + 0x6a202c42, + 0x6a70e843, + 0x20f003, + 0x201e42, + 0x20ec83, + 0x338c03, + 0x241d03, + 0x2e2d83, + 0x379586, + 0x1607c03, + 0xcd588, + 0x6e247, + 0x14a345, + 0xa7e0d, + 0xa5f4a, + 0x85047, + 0x6ae00a42, + 0x6b200602, + 0x6b600282, + 0x6ba02b82, + 0x6be12442, + 0x6c203cc2, + 0x15da87, + 0x6c602c42, + 0x6ca1b282, + 0x6ce1f9c2, + 0x6d202e02, + 0x219483, + 0x22644, + 0x282dc3, + 0x6d615902, + 0x6da039c2, + 0x55087, + 0x6de02202, + 0x6e200902, + 0x6e600542, + 0x6ea07d02, + 0x6ee03882, + 0x6f201342, + 0xc0f85, + 0x24c4c3, + 0x23a2c4, + 0x6f6028c2, + 0x6fa0dd82, + 0x6fe00682, + 0xb714b, + 0x702000c2, + 0x70a54ac2, + 0x70e01e42, + 0x71200b82, + 0x71603282, + 0x71a05a02, + 0x71e0d682, + 0x7226cd82, + 0x72602f02, + 0x72a04d42, + 0x72e032c2, + 0x7323e0c2, + 0x7362a402, + 0x73a11e82, + 0xafd44, + 0x339b43, + 0x73e0e882, + 0x742190c2, + 0x74606482, + 0x74a02882, + 0x74e00342, + 0x75206f02, + 0x7e3c7, + 0x756057c2, + 0x75a00502, + 0x75e07c02, + 0x76209f82, + 0xf778c, + 0x76627882, + 0x76a2c0c2, + 0x76e0a902, + 0x77206d02, + 0x77611d82, + 0x77a3e602, + 0x77e0fc42, + 0x78213802, + 0x78674fc2, + 0x78a4f1c2, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x11343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x707c32c3, + 0x211343, + 0x339dc4, + 0x3a2f86, + 0x2f0ec3, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x200189, + 0x24d5c2, + 0x391283, + 0x2b8503, + 0x202f85, + 0x206c03, + 0x3c32c3, + 0x211343, + 0x29ea43, + 0x233d43, + 0x3bd849, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x7920be03, + 0x237583, + 0x332683, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0xcd588, + 0x202c42, + 0x20be03, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x253404, + 0x202c42, + 0x20be03, + 0x322183, + 0x237583, + 0x254a04, + 0x203d43, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x24f3c3, + 0x2f5805, + 0x233d43, + 0x203f83, + 0xae43, + 0x202c42, + 0x20be03, + 0x3c32c3, + 0x20ec83, + 0x241d03, + 0x200742, + 0x2b6c03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x234106, + 0x26ff84, + 0x214bc3, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x20be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0x144be47, + 0x20be03, + 0x29546, + 0x237583, + 0x30e843, + 0xd91c6, + 0x20ec83, + 0x241d03, + 0x318fc8, + 0x31bfc9, + 0x330349, + 0x33af08, + 0x38a348, + 0x38a349, + 0x22c10d, + 0x24b00f, + 0x2ec510, + 0x354d0d, + 0x36ebcc, + 0x38bc4b, + 0xaf148, + 0xc7bc5, + 0x200742, + 0x23b005, + 0x20bfc3, + 0x7c602c42, + 0x237583, + 0x30e843, + 0x2d7ac7, + 0x20fbc3, + 0x21f743, + 0x20ec83, + 0x228803, + 0x20c0c3, + 0x20ae43, + 0x241d03, + 0x253786, + 0x20e982, + 0x203f83, + 0xcd588, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x207c03, + 0xf84, + 0x154ab06, + 0x200742, + 0x202c42, + 0x30e843, + 0x21f743, + 0x241d03, +} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 1 bits] unused +// [ 1 bits] wildcard bit +// [ 2 bits] node type +// [14 bits] high nodes index (exclusive) of children +// [14 bits] low nodes index (inclusive) of children +var children = [...]uint32{ + 0x0, + 0x10000000, + 0x20000000, + 0x40000000, + 0x50000000, + 0x60000000, + 0x185460f, + 0x1858615, + 0x187c616, + 0x19d861f, + 0x19ec676, + 0x1a0067b, + 0x1a14680, + 0x1a34685, + 0x1a3868d, + 0x1a5068e, + 0x1a78694, + 0x1a7c69e, + 0x1a9469f, + 0x1a986a5, + 0x1a9c6a6, + 0x1ad86a7, + 0x1adc6b6, + 0x21ae46b7, + 0x1b2c6b9, + 0x1b306cb, + 0x1b506cc, + 0x1b646d4, + 0x1b686d9, + 0x1b986da, + 0x1bb46e6, + 0x1bdc6ed, + 0x1bec6f7, + 0x1bf06fb, + 0x1c886fc, + 0x1c9c722, + 0x1cb0727, + 0x1ce072c, + 0x1cf0738, + 0x1d0473c, + 0x1da8741, + 0x1fa076a, + 0x1fa47e8, + 0x20107e9, + 0x207c804, + 0x209481f, + 0x20a8825, + 0x20b082a, + 0x20c482c, + 0x20c8831, + 0x20e4832, + 0x2134839, + 0x215084d, + 0x2154854, + 0x2158855, + 0x2174856, + 0x21b085d, + 0x621b486c, + 0x21cc86d, + 0x21e0873, + 0x21e4878, + 0x21f4879, + 0x22a487d, + 0x22a88a9, + 0x222b88aa, + 0x222bc8ae, + 0x222c08af, + 0x22f88b0, + 0x22fc8be, + 0x278c8bf, + 0x228349e3, + 0x22838a0d, + 0x2283ca0e, + 0x22848a0f, + 0x2284ca12, + 0x22858a13, + 0x2285ca16, + 0x22860a17, + 0x22864a18, + 0x22868a19, + 0x2286ca1a, + 0x22878a1b, + 0x2287ca1e, + 0x22888a1f, + 0x2288ca22, + 0x22890a23, + 0x22894a24, + 0x228a0a25, + 0x228a4a28, + 0x228b0a29, + 0x228b4a2c, + 0x228b8a2d, + 0x228bca2e, + 0x28c0a2f, + 0x228c4a30, + 0x228d0a31, + 0x228d4a34, + 0x28dca35, + 0x291ca37, + 0x2293ca47, + 0x22940a4f, + 0x22944a50, + 0x2948a51, + 0x2294ca52, + 0x2950a53, + 0x296ca54, + 0x2984a5b, + 0x2988a61, + 0x2998a62, + 0x29a4a66, + 0x29d8a69, + 0x29dca76, + 0x29f0a77, + 0x229f8a7c, + 0x2ab8a7e, + 0x22abcaae, + 0x2ac4aaf, + 0x2ac8ab1, + 0x2ae0ab2, + 0x2af4ab8, + 0x2b1cabd, + 0x2b3cac7, + 0x2b6cacf, + 0x2b94adb, + 0x2b98ae5, + 0x2bbcae6, + 0x2bc0aef, + 0x2bd4af0, + 0x2bd8af5, + 0x2bdcaf6, + 0x2bfcaf7, + 0x2c1caff, + 0x2c20b07, + 0x22c24b08, + 0x2c28b09, + 0x2c2cb0a, + 0x2c3cb0b, + 0x2c40b0f, + 0x2cb8b10, + 0x2cbcb2e, + 0x2cd8b2f, + 0x2ce8b36, + 0x2cfcb3a, + 0x2d14b3f, + 0x2d2cb45, + 0x2d44b4b, + 0x2d48b51, + 0x2d60b52, + 0x2d7cb58, + 0x2d9cb5f, + 0x2db4b67, + 0x2e14b6d, + 0x2e30b85, + 0x2e38b8c, + 0x2e3cb8e, + 0x2e50b8f, + 0x2e94b94, + 0x2f14ba5, + 0x2f40bc5, + 0x2f44bd0, + 0x2f4cbd1, + 0x2f6cbd3, + 0x2f70bdb, + 0x2f94bdc, + 0x2f9cbe5, + 0x2fd8be7, + 0x301cbf6, + 0x3020c07, + 0x3094c08, + 0x3098c25, + 0x2309cc26, + 0x230a0c27, + 0x230a4c28, + 0x230b4c29, + 0x230b8c2d, + 0x230bcc2e, + 0x230c0c2f, + 0x230c4c30, + 0x30dcc31, + 0x3100c37, + 0x3120c40, + 0x36e4c48, + 0x36f0db9, + 0x3710dbc, + 0x38ccdc4, + 0x399ce33, + 0x3a0ce67, + 0x3a64e83, + 0x3b4ce99, + 0x3ba4ed3, + 0x3be0ee9, + 0x3cdcef8, + 0x3da8f37, + 0x3e40f6a, + 0x3ed0f90, + 0x3f34fb4, + 0x416cfcd, + 0x422505b, + 0x42f1089, + 0x433d0bc, + 0x43c50cf, + 0x44010f1, + 0x4451100, + 0x44c9114, + 0x644cd132, + 0x644d1133, + 0x644d5134, + 0x4551135, + 0x45ad154, + 0x462916b, + 0x46a118a, + 0x47211a8, + 0x478d1c8, + 0x48b91e3, + 0x491122e, + 0x64915244, + 0x49ad245, + 0x4a3526b, + 0x4a8128d, + 0x4ae92a0, + 0x4b912ba, + 0x4c592e4, + 0x4cc1316, + 0x4dd5330, + 0x64dd9375, + 0x64ddd376, + 0x4e39377, + 0x4e9538e, + 0x4f253a5, + 0x4fa13c9, + 0x4fe53e8, + 0x50c93f9, + 0x50fd432, + 0x515d43f, + 0x51d1457, + 0x5259474, + 0x5299496, + 0x53094a6, + 0x6530d4c2, + 0x53314c3, + 0x53354cc, + 0x534d4cd, + 0x53694d3, + 0x53ad4da, + 0x53bd4eb, + 0x53d54ef, + 0x544d4f5, + 0x5455513, + 0x5469515, + 0x548551a, + 0x54b1521, + 0x54b552c, + 0x54bd52d, + 0x54d152f, + 0x54ed534, + 0x54f953b, + 0x550153e, + 0x553d540, + 0x555154f, + 0x5559554, + 0x5565556, + 0x556d559, + 0x559155b, + 0x55b5564, + 0x55cd56d, + 0x55d1573, + 0x55d9574, + 0x55dd576, + 0x5645577, + 0x5649591, + 0x566d592, + 0x569159b, + 0x56ad5a4, + 0x56bd5ab, + 0x56d15af, + 0x56d55b4, + 0x56dd5b5, + 0x56f15b7, + 0x57015bc, + 0x57055c0, + 0x57215c1, + 0x5fb15c8, + 0x5fe97ec, + 0x60157fa, + 0x6031805, + 0x605180c, + 0x6071814, + 0x60b581c, + 0x60bd82d, + 0x260c182f, + 0x260c5830, + 0x60cd831, + 0x6245833, + 0x26249891, + 0x26259892, + 0x26261896, + 0x2626d898, + 0x627189b, + 0x627589c, + 0x629d89d, + 0x62c58a7, + 0x62c98b1, + 0x63018b2, + 0x63218c0, + 0x6e798c8, + 0x6e7db9e, + 0x6e81b9f, + 0x26e85ba0, + 0x6e89ba1, + 0x26e8dba2, + 0x6e91ba3, + 0x26e9dba4, + 0x6ea1ba7, + 0x6ea5ba8, + 0x26ea9ba9, + 0x6eadbaa, + 0x26eb5bab, + 0x6eb9bad, + 0x6ebdbae, + 0x26ecdbaf, + 0x6ed1bb3, + 0x6ed5bb4, + 0x6ed9bb5, + 0x6eddbb6, + 0x26ee1bb7, + 0x6ee5bb8, + 0x6ee9bb9, + 0x6eedbba, + 0x6ef1bbb, + 0x26ef9bbc, + 0x6efdbbe, + 0x6f01bbf, + 0x6f05bc0, + 0x26f09bc1, + 0x6f0dbc2, + 0x26f15bc3, + 0x26f19bc5, + 0x6f35bc6, + 0x6f45bcd, + 0x6f89bd1, + 0x6f8dbe2, + 0x6fb1be3, + 0x6fb5bec, + 0x6fb9bed, + 0x7145bee, + 0x27149c51, + 0x27151c52, + 0x27155c54, + 0x27159c55, + 0x7161c56, + 0x723dc58, + 0x27249c8f, + 0x2724dc92, + 0x27251c93, + 0x27255c94, + 0x7259c95, + 0x7285c96, + 0x7289ca1, + 0x72adca2, + 0x72b9cab, + 0x72d9cae, + 0x72ddcb6, + 0x7315cb7, + 0x75adcc5, + 0x7669d6b, + 0x767dd9a, + 0x76b1d9f, + 0x76e1dac, + 0x76fddb8, + 0x7725dbf, + 0x7745dc9, + 0x7761dd1, + 0x7789dd8, + 0x7799de2, + 0x779dde6, + 0x77a1de7, + 0x77d5de8, + 0x77e1df5, + 0x7801df8, + 0x7879e00, + 0x2787de1e, + 0x78a1e1f, + 0x78c1e28, + 0x78d5e30, + 0x78e9e35, + 0x78ede3a, + 0x790de3b, + 0x79b1e43, + 0x79cde6c, + 0x79f1e73, + 0x79f9e7c, + 0x7a05e7e, + 0x7a0de81, + 0x7a21e83, + 0x7a41e88, + 0x7a4de90, + 0x7a59e93, + 0x7a89e96, + 0x7b5dea2, + 0x7b61ed7, + 0x7b75ed8, + 0x7b7dedd, + 0x7b95edf, + 0x7b99ee5, + 0x7ba5ee6, + 0x7ba9ee9, + 0x7bc5eea, + 0x7c01ef1, + 0x7c05f00, + 0x7c25f01, + 0x7c75f09, + 0x7c91f1d, + 0x7ce5f24, + 0x7ce9f39, + 0x7cedf3a, + 0x7cf1f3b, + 0x7d35f3c, + 0x7d45f4d, + 0x7d85f51, + 0x7d89f61, + 0x7db9f62, + 0x7f01f6e, + 0x7f29fc0, + 0x7f55fca, + 0x7f65fd5, + 0x7f6dfd9, + 0x807dfdb, + 0x808a01f, + 0x8096022, + 0x80a2025, + 0x80ae028, + 0x80ba02b, + 0x80c602e, + 0x80d2031, + 0x80de034, + 0x80ea037, + 0x80f603a, + 0x810203d, + 0x810e040, + 0x811a043, + 0x8122046, + 0x812e048, + 0x813a04b, + 0x814604e, + 0x8152051, + 0x815e054, + 0x816a057, + 0x817605a, + 0x818205d, + 0x818e060, + 0x819a063, + 0x81a6066, + 0x81d2069, + 0x81de074, + 0x81ea077, + 0x81f607a, + 0x820207d, + 0x820e080, + 0x8216083, + 0x8222085, + 0x822e088, + 0x823a08b, + 0x824608e, + 0x8252091, + 0x825e094, + 0x826a097, + 0x827609a, + 0x828209d, + 0x828e0a0, + 0x829a0a3, + 0x82a60a6, + 0x82b20a9, + 0x82ba0ac, + 0x82c60ae, + 0x82d20b1, + 0x82de0b4, + 0x82ea0b7, + 0x82f60ba, + 0x83020bd, + 0x830e0c0, + 0x831a0c3, + 0x831e0c6, + 0x832a0c7, + 0x83460ca, + 0x834a0d1, + 0x835a0d2, + 0x83760d6, + 0x83ba0dd, + 0x83be0ee, + 0x83d20ef, + 0x84060f4, + 0x8416101, + 0x8436105, + 0x844e10d, + 0x8466113, + 0x846e119, + 0x284b211b, + 0x84b612c, + 0x84e212d, + 0x84ea138, + 0x84fe13a, +} + +// max children 500 (capacity 1023) +// max text offset 29102 (capacity 32767) +// max text length 36 (capacity 63) +// max hi 8511 (capacity 16383) +// max lo 8506 (capacity 16383) diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go new file mode 100644 index 0000000..228010c --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table_test.go @@ -0,0 +1,16959 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +var rules = [...]string{ + "ac", + "com.ac", + "edu.ac", + "gov.ac", + "net.ac", + "mil.ac", + "org.ac", + "ad", + "nom.ad", + "ae", + "co.ae", + "net.ae", + "org.ae", + "sch.ae", + "ac.ae", + "gov.ae", + "mil.ae", + "aero", + "accident-investigation.aero", + "accident-prevention.aero", + "aerobatic.aero", + "aeroclub.aero", + "aerodrome.aero", + "agents.aero", + "aircraft.aero", + "airline.aero", + "airport.aero", + "air-surveillance.aero", + "airtraffic.aero", + "air-traffic-control.aero", + "ambulance.aero", + "amusement.aero", + "association.aero", + "author.aero", + "ballooning.aero", + "broker.aero", + "caa.aero", + "cargo.aero", + "catering.aero", + "certification.aero", + "championship.aero", + "charter.aero", + "civilaviation.aero", + "club.aero", + "conference.aero", + "consultant.aero", + "consulting.aero", + "control.aero", + "council.aero", + "crew.aero", + "design.aero", + "dgca.aero", + "educator.aero", + "emergency.aero", + "engine.aero", + "engineer.aero", + "entertainment.aero", + "equipment.aero", + "exchange.aero", + "express.aero", + "federation.aero", + "flight.aero", + "freight.aero", + "fuel.aero", + "gliding.aero", + "government.aero", + "groundhandling.aero", + "group.aero", + "hanggliding.aero", + "homebuilt.aero", + "insurance.aero", + "journal.aero", + "journalist.aero", + "leasing.aero", + "logistics.aero", + "magazine.aero", + "maintenance.aero", + "media.aero", + "microlight.aero", + "modelling.aero", + "navigation.aero", + "parachuting.aero", + "paragliding.aero", + "passenger-association.aero", + "pilot.aero", + "press.aero", + "production.aero", + "recreation.aero", + "repbody.aero", + "res.aero", + "research.aero", + "rotorcraft.aero", + "safety.aero", + "scientist.aero", + "services.aero", + "show.aero", + "skydiving.aero", + "software.aero", + "student.aero", + "trader.aero", + "trading.aero", + "trainer.aero", + "union.aero", + "workinggroup.aero", + "works.aero", + "af", + "gov.af", + "com.af", + "org.af", + "net.af", + "edu.af", + "ag", + "com.ag", + "org.ag", + "net.ag", + "co.ag", + "nom.ag", + "ai", + "off.ai", + "com.ai", + "net.ai", + "org.ai", + "al", + "com.al", + "edu.al", + "gov.al", + "mil.al", + "net.al", + "org.al", + "am", + "ao", + "ed.ao", + "gv.ao", + "og.ao", + "co.ao", + "pb.ao", + "it.ao", + "aq", + "ar", + "com.ar", + "edu.ar", + "gob.ar", + "gov.ar", + "int.ar", + "mil.ar", + "musica.ar", + "net.ar", + "org.ar", + "tur.ar", + "arpa", + "e164.arpa", + "in-addr.arpa", + "ip6.arpa", + "iris.arpa", + "uri.arpa", + "urn.arpa", + "as", + "gov.as", + "asia", + "at", + "ac.at", + "co.at", + "gv.at", + "or.at", + "au", + "com.au", + "net.au", + "org.au", + "edu.au", + "gov.au", + "asn.au", + "id.au", + "info.au", + "conf.au", + "oz.au", + "act.au", + "nsw.au", + "nt.au", + "qld.au", + "sa.au", + "tas.au", + "vic.au", + "wa.au", + "act.edu.au", + "nsw.edu.au", + "nt.edu.au", + "qld.edu.au", + "sa.edu.au", + "tas.edu.au", + "vic.edu.au", + "wa.edu.au", + "qld.gov.au", + "sa.gov.au", + "tas.gov.au", + "vic.gov.au", + "wa.gov.au", + "aw", + "com.aw", + "ax", + "az", + "com.az", + "net.az", + "int.az", + "gov.az", + "org.az", + "edu.az", + "info.az", + "pp.az", + "mil.az", + "name.az", + "pro.az", + "biz.az", + "ba", + "com.ba", + "edu.ba", + "gov.ba", + "mil.ba", + "net.ba", + "org.ba", + "bb", + "biz.bb", + "co.bb", + "com.bb", + "edu.bb", + "gov.bb", + "info.bb", + "net.bb", + "org.bb", + "store.bb", + "tv.bb", + "*.bd", + "be", + "ac.be", + "bf", + "gov.bf", + "bg", + "a.bg", + "b.bg", + "c.bg", + "d.bg", + "e.bg", + "f.bg", + "g.bg", + "h.bg", + "i.bg", + "j.bg", + "k.bg", + "l.bg", + "m.bg", + "n.bg", + "o.bg", + "p.bg", + "q.bg", + "r.bg", + "s.bg", + "t.bg", + "u.bg", + "v.bg", + "w.bg", + "x.bg", + "y.bg", + "z.bg", + "0.bg", + "1.bg", + "2.bg", + "3.bg", + "4.bg", + "5.bg", + "6.bg", + "7.bg", + "8.bg", + "9.bg", + "bh", + "com.bh", + "edu.bh", + "net.bh", + "org.bh", + "gov.bh", + "bi", + "co.bi", + "com.bi", + "edu.bi", + "or.bi", + "org.bi", + "biz", + "bj", + "asso.bj", + "barreau.bj", + "gouv.bj", + "bm", + "com.bm", + "edu.bm", + "gov.bm", + "net.bm", + "org.bm", + "*.bn", + "bo", + "com.bo", + "edu.bo", + "gob.bo", + "int.bo", + "org.bo", + "net.bo", + "mil.bo", + "tv.bo", + "web.bo", + "academia.bo", + "agro.bo", + "arte.bo", + "blog.bo", + "bolivia.bo", + "ciencia.bo", + "cooperativa.bo", + "democracia.bo", + "deporte.bo", + "ecologia.bo", + "economia.bo", + "empresa.bo", + "indigena.bo", + "industria.bo", + "info.bo", + "medicina.bo", + "movimiento.bo", + "musica.bo", + "natural.bo", + "nombre.bo", + "noticias.bo", + "patria.bo", + "politica.bo", + "profesional.bo", + "plurinacional.bo", + "pueblo.bo", + "revista.bo", + "salud.bo", + "tecnologia.bo", + "tksat.bo", + "transporte.bo", + "wiki.bo", + "br", + "9guacu.br", + "abc.br", + "adm.br", + "adv.br", + "agr.br", + "aju.br", + "am.br", + "anani.br", + "aparecida.br", + "arq.br", + "art.br", + "ato.br", + "b.br", + "belem.br", + "bhz.br", + "bio.br", + "blog.br", + "bmd.br", + "boavista.br", + "bsb.br", + "campinagrande.br", + "campinas.br", + "caxias.br", + "cim.br", + "cng.br", + "cnt.br", + "com.br", + "contagem.br", + "coop.br", + "cri.br", + "cuiaba.br", + "curitiba.br", + "def.br", + "ecn.br", + "eco.br", + "edu.br", + "emp.br", + "eng.br", + "esp.br", + "etc.br", + "eti.br", + "far.br", + "feira.br", + "flog.br", + "floripa.br", + "fm.br", + "fnd.br", + "fortal.br", + "fot.br", + "foz.br", + "fst.br", + "g12.br", + "ggf.br", + "goiania.br", + "gov.br", + "ac.gov.br", + "al.gov.br", + "am.gov.br", + "ap.gov.br", + "ba.gov.br", + "ce.gov.br", + "df.gov.br", + "es.gov.br", + "go.gov.br", + "ma.gov.br", + "mg.gov.br", + "ms.gov.br", + "mt.gov.br", + "pa.gov.br", + "pb.gov.br", + "pe.gov.br", + "pi.gov.br", + "pr.gov.br", + "rj.gov.br", + "rn.gov.br", + "ro.gov.br", + "rr.gov.br", + "rs.gov.br", + "sc.gov.br", + "se.gov.br", + "sp.gov.br", + "to.gov.br", + "gru.br", + "imb.br", + "ind.br", + "inf.br", + "jab.br", + "jampa.br", + "jdf.br", + "joinville.br", + "jor.br", + "jus.br", + "leg.br", + "lel.br", + "londrina.br", + "macapa.br", + "maceio.br", + "manaus.br", + "maringa.br", + "mat.br", + "med.br", + "mil.br", + "morena.br", + "mp.br", + "mus.br", + "natal.br", + "net.br", + "niteroi.br", + "*.nom.br", + "not.br", + "ntr.br", + "odo.br", + "org.br", + "osasco.br", + "palmas.br", + "poa.br", + "ppg.br", + "pro.br", + "psc.br", + "psi.br", + "pvh.br", + "qsl.br", + "radio.br", + "rec.br", + "recife.br", + "ribeirao.br", + "rio.br", + "riobranco.br", + "riopreto.br", + "salvador.br", + "sampa.br", + "santamaria.br", + "santoandre.br", + "saobernardo.br", + "saogonca.br", + "sjc.br", + "slg.br", + "slz.br", + "sorocaba.br", + "srv.br", + "taxi.br", + "teo.br", + "the.br", + "tmp.br", + "trd.br", + "tur.br", + "tv.br", + "udi.br", + "vet.br", + "vix.br", + "vlog.br", + "wiki.br", + "zlg.br", + "bs", + "com.bs", + "net.bs", + "org.bs", + "edu.bs", + "gov.bs", + "bt", + "com.bt", + "edu.bt", + "gov.bt", + "net.bt", + "org.bt", + "bv", + "bw", + "co.bw", + "org.bw", + "by", + "gov.by", + "mil.by", + "com.by", + "of.by", + "bz", + "com.bz", + "net.bz", + "org.bz", + "edu.bz", + "gov.bz", + "ca", + "ab.ca", + "bc.ca", + "mb.ca", + "nb.ca", + "nf.ca", + "nl.ca", + "ns.ca", + "nt.ca", + "nu.ca", + "on.ca", + "pe.ca", + "qc.ca", + "sk.ca", + "yk.ca", + "gc.ca", + "cat", + "cc", + "cd", + "gov.cd", + "cf", + "cg", + "ch", + "ci", + "org.ci", + "or.ci", + "com.ci", + "co.ci", + "edu.ci", + "ed.ci", + "ac.ci", + "net.ci", + "go.ci", + "asso.ci", + "xn--aroport-bya.ci", + "int.ci", + "presse.ci", + "md.ci", + "gouv.ci", + "*.ck", + "!www.ck", + "cl", + "gov.cl", + "gob.cl", + "co.cl", + "mil.cl", + "cm", + "co.cm", + "com.cm", + "gov.cm", + "net.cm", + "cn", + "ac.cn", + "com.cn", + "edu.cn", + "gov.cn", + "net.cn", + "org.cn", + "mil.cn", + "xn--55qx5d.cn", + "xn--io0a7i.cn", + "xn--od0alg.cn", + "ah.cn", + "bj.cn", + "cq.cn", + "fj.cn", + "gd.cn", + "gs.cn", + "gz.cn", + "gx.cn", + "ha.cn", + "hb.cn", + "he.cn", + "hi.cn", + "hl.cn", + "hn.cn", + "jl.cn", + "js.cn", + "jx.cn", + "ln.cn", + "nm.cn", + "nx.cn", + "qh.cn", + "sc.cn", + "sd.cn", + "sh.cn", + "sn.cn", + "sx.cn", + "tj.cn", + "xj.cn", + "xz.cn", + "yn.cn", + "zj.cn", + "hk.cn", + "mo.cn", + "tw.cn", + "co", + "arts.co", + "com.co", + "edu.co", + "firm.co", + "gov.co", + "info.co", + "int.co", + "mil.co", + "net.co", + "nom.co", + "org.co", + "rec.co", + "web.co", + "com", + "coop", + "cr", + "ac.cr", + "co.cr", + "ed.cr", + "fi.cr", + "go.cr", + "or.cr", + "sa.cr", + "cu", + "com.cu", + "edu.cu", + "org.cu", + "net.cu", + "gov.cu", + "inf.cu", + "cv", + "cw", + "com.cw", + "edu.cw", + "net.cw", + "org.cw", + "cx", + "gov.cx", + "cy", + "ac.cy", + "biz.cy", + "com.cy", + "ekloges.cy", + "gov.cy", + "ltd.cy", + "name.cy", + "net.cy", + "org.cy", + "parliament.cy", + "press.cy", + "pro.cy", + "tm.cy", + "cz", + "de", + "dj", + "dk", + "dm", + "com.dm", + "net.dm", + "org.dm", + "edu.dm", + "gov.dm", + "do", + "art.do", + "com.do", + "edu.do", + "gob.do", + "gov.do", + "mil.do", + "net.do", + "org.do", + "sld.do", + "web.do", + "dz", + "com.dz", + "org.dz", + "net.dz", + "gov.dz", + "edu.dz", + "asso.dz", + "pol.dz", + "art.dz", + "ec", + "com.ec", + "info.ec", + "net.ec", + "fin.ec", + "k12.ec", + "med.ec", + "pro.ec", + "org.ec", + "edu.ec", + "gov.ec", + "gob.ec", + "mil.ec", + "edu", + "ee", + "edu.ee", + "gov.ee", + "riik.ee", + "lib.ee", + "med.ee", + "com.ee", + "pri.ee", + "aip.ee", + "org.ee", + "fie.ee", + "eg", + "com.eg", + "edu.eg", + "eun.eg", + "gov.eg", + "mil.eg", + "name.eg", + "net.eg", + "org.eg", + "sci.eg", + "*.er", + "es", + "com.es", + "nom.es", + "org.es", + "gob.es", + "edu.es", + "et", + "com.et", + "gov.et", + "org.et", + "edu.et", + "biz.et", + "name.et", + "info.et", + "net.et", + "eu", + "fi", + "aland.fi", + "*.fj", + "*.fk", + "fm", + "fo", + "fr", + "com.fr", + "asso.fr", + "nom.fr", + "prd.fr", + "presse.fr", + "tm.fr", + "aeroport.fr", + "assedic.fr", + "avocat.fr", + "avoues.fr", + "cci.fr", + "chambagri.fr", + "chirurgiens-dentistes.fr", + "experts-comptables.fr", + "geometre-expert.fr", + "gouv.fr", + "greta.fr", + "huissier-justice.fr", + "medecin.fr", + "notaires.fr", + "pharmacien.fr", + "port.fr", + "veterinaire.fr", + "ga", + "gb", + "gd", + "ge", + "com.ge", + "edu.ge", + "gov.ge", + "org.ge", + "mil.ge", + "net.ge", + "pvt.ge", + "gf", + "gg", + "co.gg", + "net.gg", + "org.gg", + "gh", + "com.gh", + "edu.gh", + "gov.gh", + "org.gh", + "mil.gh", + "gi", + "com.gi", + "ltd.gi", + "gov.gi", + "mod.gi", + "edu.gi", + "org.gi", + "gl", + "co.gl", + "com.gl", + "edu.gl", + "net.gl", + "org.gl", + "gm", + "gn", + "ac.gn", + "com.gn", + "edu.gn", + "gov.gn", + "org.gn", + "net.gn", + "gov", + "gp", + "com.gp", + "net.gp", + "mobi.gp", + "edu.gp", + "org.gp", + "asso.gp", + "gq", + "gr", + "com.gr", + "edu.gr", + "net.gr", + "org.gr", + "gov.gr", + "gs", + "gt", + "com.gt", + "edu.gt", + "gob.gt", + "ind.gt", + "mil.gt", + "net.gt", + "org.gt", + "*.gu", + "gw", + "gy", + "co.gy", + "com.gy", + "edu.gy", + "gov.gy", + "net.gy", + "org.gy", + "hk", + "com.hk", + "edu.hk", + "gov.hk", + "idv.hk", + "net.hk", + "org.hk", + "xn--55qx5d.hk", + "xn--wcvs22d.hk", + "xn--lcvr32d.hk", + "xn--mxtq1m.hk", + "xn--gmqw5a.hk", + "xn--ciqpn.hk", + "xn--gmq050i.hk", + "xn--zf0avx.hk", + "xn--io0a7i.hk", + "xn--mk0axi.hk", + "xn--od0alg.hk", + "xn--od0aq3b.hk", + "xn--tn0ag.hk", + "xn--uc0atv.hk", + "xn--uc0ay4a.hk", + "hm", + "hn", + "com.hn", + "edu.hn", + "org.hn", + "net.hn", + "mil.hn", + "gob.hn", + "hr", + "iz.hr", + "from.hr", + "name.hr", + "com.hr", + "ht", + "com.ht", + "shop.ht", + "firm.ht", + "info.ht", + "adult.ht", + "net.ht", + "pro.ht", + "org.ht", + "med.ht", + "art.ht", + "coop.ht", + "pol.ht", + "asso.ht", + "edu.ht", + "rel.ht", + "gouv.ht", + "perso.ht", + "hu", + "co.hu", + "info.hu", + "org.hu", + "priv.hu", + "sport.hu", + "tm.hu", + "2000.hu", + "agrar.hu", + "bolt.hu", + "casino.hu", + "city.hu", + "erotica.hu", + "erotika.hu", + "film.hu", + "forum.hu", + "games.hu", + "hotel.hu", + "ingatlan.hu", + "jogasz.hu", + "konyvelo.hu", + "lakas.hu", + "media.hu", + "news.hu", + "reklam.hu", + "sex.hu", + "shop.hu", + "suli.hu", + "szex.hu", + "tozsde.hu", + "utazas.hu", + "video.hu", + "id", + "ac.id", + "biz.id", + "co.id", + "desa.id", + "go.id", + "mil.id", + "my.id", + "net.id", + "or.id", + "sch.id", + "web.id", + "ie", + "gov.ie", + "il", + "ac.il", + "co.il", + "gov.il", + "idf.il", + "k12.il", + "muni.il", + "net.il", + "org.il", + "im", + "ac.im", + "co.im", + "com.im", + "ltd.co.im", + "net.im", + "org.im", + "plc.co.im", + "tt.im", + "tv.im", + "in", + "co.in", + "firm.in", + "net.in", + "org.in", + "gen.in", + "ind.in", + "nic.in", + "ac.in", + "edu.in", + "res.in", + "gov.in", + "mil.in", + "info", + "int", + "eu.int", + "io", + "com.io", + "iq", + "gov.iq", + "edu.iq", + "mil.iq", + "com.iq", + "org.iq", + "net.iq", + "ir", + "ac.ir", + "co.ir", + "gov.ir", + "id.ir", + "net.ir", + "org.ir", + "sch.ir", + "xn--mgba3a4f16a.ir", + "xn--mgba3a4fra.ir", + "is", + "net.is", + "com.is", + "edu.is", + "gov.is", + "org.is", + "int.is", + "it", + "gov.it", + "edu.it", + "abr.it", + "abruzzo.it", + "aosta-valley.it", + "aostavalley.it", + "bas.it", + "basilicata.it", + "cal.it", + "calabria.it", + "cam.it", + "campania.it", + "emilia-romagna.it", + "emiliaromagna.it", + "emr.it", + "friuli-v-giulia.it", + "friuli-ve-giulia.it", + "friuli-vegiulia.it", + "friuli-venezia-giulia.it", + "friuli-veneziagiulia.it", + "friuli-vgiulia.it", + "friuliv-giulia.it", + "friulive-giulia.it", + "friulivegiulia.it", + "friulivenezia-giulia.it", + "friuliveneziagiulia.it", + "friulivgiulia.it", + "fvg.it", + "laz.it", + "lazio.it", + "lig.it", + "liguria.it", + "lom.it", + "lombardia.it", + "lombardy.it", + "lucania.it", + "mar.it", + "marche.it", + "mol.it", + "molise.it", + "piedmont.it", + "piemonte.it", + "pmn.it", + "pug.it", + "puglia.it", + "sar.it", + "sardegna.it", + "sardinia.it", + "sic.it", + "sicilia.it", + "sicily.it", + "taa.it", + "tos.it", + "toscana.it", + "trentino-a-adige.it", + "trentino-aadige.it", + "trentino-alto-adige.it", + "trentino-altoadige.it", + "trentino-s-tirol.it", + "trentino-stirol.it", + "trentino-sud-tirol.it", + "trentino-sudtirol.it", + "trentino-sued-tirol.it", + "trentino-suedtirol.it", + "trentinoa-adige.it", + "trentinoaadige.it", + "trentinoalto-adige.it", + "trentinoaltoadige.it", + "trentinos-tirol.it", + "trentinostirol.it", + "trentinosud-tirol.it", + "trentinosudtirol.it", + "trentinosued-tirol.it", + "trentinosuedtirol.it", + "tuscany.it", + "umb.it", + "umbria.it", + "val-d-aosta.it", + "val-daosta.it", + "vald-aosta.it", + "valdaosta.it", + "valle-aosta.it", + "valle-d-aosta.it", + "valle-daosta.it", + "valleaosta.it", + "valled-aosta.it", + "valledaosta.it", + "vallee-aoste.it", + "valleeaoste.it", + "vao.it", + "vda.it", + "ven.it", + "veneto.it", + "ag.it", + "agrigento.it", + "al.it", + "alessandria.it", + "alto-adige.it", + "altoadige.it", + "an.it", + "ancona.it", + "andria-barletta-trani.it", + "andria-trani-barletta.it", + "andriabarlettatrani.it", + "andriatranibarletta.it", + "ao.it", + "aosta.it", + "aoste.it", + "ap.it", + "aq.it", + "aquila.it", + "ar.it", + "arezzo.it", + "ascoli-piceno.it", + "ascolipiceno.it", + "asti.it", + "at.it", + "av.it", + "avellino.it", + "ba.it", + "balsan.it", + "bari.it", + "barletta-trani-andria.it", + "barlettatraniandria.it", + "belluno.it", + "benevento.it", + "bergamo.it", + "bg.it", + "bi.it", + "biella.it", + "bl.it", + "bn.it", + "bo.it", + "bologna.it", + "bolzano.it", + "bozen.it", + "br.it", + "brescia.it", + "brindisi.it", + "bs.it", + "bt.it", + "bz.it", + "ca.it", + "cagliari.it", + "caltanissetta.it", + "campidano-medio.it", + "campidanomedio.it", + "campobasso.it", + "carbonia-iglesias.it", + "carboniaiglesias.it", + "carrara-massa.it", + "carraramassa.it", + "caserta.it", + "catania.it", + "catanzaro.it", + "cb.it", + "ce.it", + "cesena-forli.it", + "cesenaforli.it", + "ch.it", + "chieti.it", + "ci.it", + "cl.it", + "cn.it", + "co.it", + "como.it", + "cosenza.it", + "cr.it", + "cremona.it", + "crotone.it", + "cs.it", + "ct.it", + "cuneo.it", + "cz.it", + "dell-ogliastra.it", + "dellogliastra.it", + "en.it", + "enna.it", + "fc.it", + "fe.it", + "fermo.it", + "ferrara.it", + "fg.it", + "fi.it", + "firenze.it", + "florence.it", + "fm.it", + "foggia.it", + "forli-cesena.it", + "forlicesena.it", + "fr.it", + "frosinone.it", + "ge.it", + "genoa.it", + "genova.it", + "go.it", + "gorizia.it", + "gr.it", + "grosseto.it", + "iglesias-carbonia.it", + "iglesiascarbonia.it", + "im.it", + "imperia.it", + "is.it", + "isernia.it", + "kr.it", + "la-spezia.it", + "laquila.it", + "laspezia.it", + "latina.it", + "lc.it", + "le.it", + "lecce.it", + "lecco.it", + "li.it", + "livorno.it", + "lo.it", + "lodi.it", + "lt.it", + "lu.it", + "lucca.it", + "macerata.it", + "mantova.it", + "massa-carrara.it", + "massacarrara.it", + "matera.it", + "mb.it", + "mc.it", + "me.it", + "medio-campidano.it", + "mediocampidano.it", + "messina.it", + "mi.it", + "milan.it", + "milano.it", + "mn.it", + "mo.it", + "modena.it", + "monza-brianza.it", + "monza-e-della-brianza.it", + "monza.it", + "monzabrianza.it", + "monzaebrianza.it", + "monzaedellabrianza.it", + "ms.it", + "mt.it", + "na.it", + "naples.it", + "napoli.it", + "no.it", + "novara.it", + "nu.it", + "nuoro.it", + "og.it", + "ogliastra.it", + "olbia-tempio.it", + "olbiatempio.it", + "or.it", + "oristano.it", + "ot.it", + "pa.it", + "padova.it", + "padua.it", + "palermo.it", + "parma.it", + "pavia.it", + "pc.it", + "pd.it", + "pe.it", + "perugia.it", + "pesaro-urbino.it", + "pesarourbino.it", + "pescara.it", + "pg.it", + "pi.it", + "piacenza.it", + "pisa.it", + "pistoia.it", + "pn.it", + "po.it", + "pordenone.it", + "potenza.it", + "pr.it", + "prato.it", + "pt.it", + "pu.it", + "pv.it", + "pz.it", + "ra.it", + "ragusa.it", + "ravenna.it", + "rc.it", + "re.it", + "reggio-calabria.it", + "reggio-emilia.it", + "reggiocalabria.it", + "reggioemilia.it", + "rg.it", + "ri.it", + "rieti.it", + "rimini.it", + "rm.it", + "rn.it", + "ro.it", + "roma.it", + "rome.it", + "rovigo.it", + "sa.it", + "salerno.it", + "sassari.it", + "savona.it", + "si.it", + "siena.it", + "siracusa.it", + "so.it", + "sondrio.it", + "sp.it", + "sr.it", + "ss.it", + "suedtirol.it", + "sv.it", + "ta.it", + "taranto.it", + "te.it", + "tempio-olbia.it", + "tempioolbia.it", + "teramo.it", + "terni.it", + "tn.it", + "to.it", + "torino.it", + "tp.it", + "tr.it", + "trani-andria-barletta.it", + "trani-barletta-andria.it", + "traniandriabarletta.it", + "tranibarlettaandria.it", + "trapani.it", + "trentino.it", + "trento.it", + "treviso.it", + "trieste.it", + "ts.it", + "turin.it", + "tv.it", + "ud.it", + "udine.it", + "urbino-pesaro.it", + "urbinopesaro.it", + "va.it", + "varese.it", + "vb.it", + "vc.it", + "ve.it", + "venezia.it", + "venice.it", + "verbania.it", + "vercelli.it", + "verona.it", + "vi.it", + "vibo-valentia.it", + "vibovalentia.it", + "vicenza.it", + "viterbo.it", + "vr.it", + "vs.it", + "vt.it", + "vv.it", + "je", + "co.je", + "net.je", + "org.je", + "*.jm", + "jo", + "com.jo", + "org.jo", + "net.jo", + "edu.jo", + "sch.jo", + "gov.jo", + "mil.jo", + "name.jo", + "jobs", + "jp", + "ac.jp", + "ad.jp", + "co.jp", + "ed.jp", + "go.jp", + "gr.jp", + "lg.jp", + "ne.jp", + "or.jp", + "aichi.jp", + "akita.jp", + "aomori.jp", + "chiba.jp", + "ehime.jp", + "fukui.jp", + "fukuoka.jp", + "fukushima.jp", + "gifu.jp", + "gunma.jp", + "hiroshima.jp", + "hokkaido.jp", + "hyogo.jp", + "ibaraki.jp", + "ishikawa.jp", + "iwate.jp", + "kagawa.jp", + "kagoshima.jp", + "kanagawa.jp", + "kochi.jp", + "kumamoto.jp", + "kyoto.jp", + "mie.jp", + "miyagi.jp", + "miyazaki.jp", + "nagano.jp", + "nagasaki.jp", + "nara.jp", + "niigata.jp", + "oita.jp", + "okayama.jp", + "okinawa.jp", + "osaka.jp", + "saga.jp", + "saitama.jp", + "shiga.jp", + "shimane.jp", + "shizuoka.jp", + "tochigi.jp", + "tokushima.jp", + "tokyo.jp", + "tottori.jp", + "toyama.jp", + "wakayama.jp", + "yamagata.jp", + "yamaguchi.jp", + "yamanashi.jp", + "xn--4pvxs.jp", + "xn--vgu402c.jp", + "xn--c3s14m.jp", + "xn--f6qx53a.jp", + "xn--8pvr4u.jp", + "xn--uist22h.jp", + "xn--djrs72d6uy.jp", + "xn--mkru45i.jp", + "xn--0trq7p7nn.jp", + "xn--8ltr62k.jp", + "xn--2m4a15e.jp", + "xn--efvn9s.jp", + "xn--32vp30h.jp", + "xn--4it797k.jp", + "xn--1lqs71d.jp", + "xn--5rtp49c.jp", + "xn--5js045d.jp", + "xn--ehqz56n.jp", + "xn--1lqs03n.jp", + "xn--qqqt11m.jp", + "xn--kbrq7o.jp", + "xn--pssu33l.jp", + "xn--ntsq17g.jp", + "xn--uisz3g.jp", + "xn--6btw5a.jp", + "xn--1ctwo.jp", + "xn--6orx2r.jp", + "xn--rht61e.jp", + "xn--rht27z.jp", + "xn--djty4k.jp", + "xn--nit225k.jp", + "xn--rht3d.jp", + "xn--klty5x.jp", + "xn--kltx9a.jp", + "xn--kltp7d.jp", + "xn--uuwu58a.jp", + "xn--zbx025d.jp", + "xn--ntso0iqx3a.jp", + "xn--elqq16h.jp", + "xn--4it168d.jp", + "xn--klt787d.jp", + "xn--rny31h.jp", + "xn--7t0a264c.jp", + "xn--5rtq34k.jp", + "xn--k7yn95e.jp", + "xn--tor131o.jp", + "xn--d5qv7z876c.jp", + "*.kawasaki.jp", + "*.kitakyushu.jp", + "*.kobe.jp", + "*.nagoya.jp", + "*.sapporo.jp", + "*.sendai.jp", + "*.yokohama.jp", + "!city.kawasaki.jp", + "!city.kitakyushu.jp", + "!city.kobe.jp", + "!city.nagoya.jp", + "!city.sapporo.jp", + "!city.sendai.jp", + "!city.yokohama.jp", + "aisai.aichi.jp", + "ama.aichi.jp", + "anjo.aichi.jp", + "asuke.aichi.jp", + "chiryu.aichi.jp", + "chita.aichi.jp", + "fuso.aichi.jp", + "gamagori.aichi.jp", + "handa.aichi.jp", + "hazu.aichi.jp", + "hekinan.aichi.jp", + "higashiura.aichi.jp", + "ichinomiya.aichi.jp", + "inazawa.aichi.jp", + "inuyama.aichi.jp", + "isshiki.aichi.jp", + "iwakura.aichi.jp", + "kanie.aichi.jp", + "kariya.aichi.jp", + "kasugai.aichi.jp", + "kira.aichi.jp", + "kiyosu.aichi.jp", + "komaki.aichi.jp", + "konan.aichi.jp", + "kota.aichi.jp", + "mihama.aichi.jp", + "miyoshi.aichi.jp", + "nishio.aichi.jp", + "nisshin.aichi.jp", + "obu.aichi.jp", + "oguchi.aichi.jp", + "oharu.aichi.jp", + "okazaki.aichi.jp", + "owariasahi.aichi.jp", + "seto.aichi.jp", + "shikatsu.aichi.jp", + "shinshiro.aichi.jp", + "shitara.aichi.jp", + "tahara.aichi.jp", + "takahama.aichi.jp", + "tobishima.aichi.jp", + "toei.aichi.jp", + "togo.aichi.jp", + "tokai.aichi.jp", + "tokoname.aichi.jp", + "toyoake.aichi.jp", + "toyohashi.aichi.jp", + "toyokawa.aichi.jp", + "toyone.aichi.jp", + "toyota.aichi.jp", + "tsushima.aichi.jp", + "yatomi.aichi.jp", + "akita.akita.jp", + "daisen.akita.jp", + "fujisato.akita.jp", + "gojome.akita.jp", + "hachirogata.akita.jp", + "happou.akita.jp", + "higashinaruse.akita.jp", + "honjo.akita.jp", + "honjyo.akita.jp", + "ikawa.akita.jp", + "kamikoani.akita.jp", + "kamioka.akita.jp", + "katagami.akita.jp", + "kazuno.akita.jp", + "kitaakita.akita.jp", + "kosaka.akita.jp", + "kyowa.akita.jp", + "misato.akita.jp", + "mitane.akita.jp", + "moriyoshi.akita.jp", + "nikaho.akita.jp", + "noshiro.akita.jp", + "odate.akita.jp", + "oga.akita.jp", + "ogata.akita.jp", + "semboku.akita.jp", + "yokote.akita.jp", + "yurihonjo.akita.jp", + "aomori.aomori.jp", + "gonohe.aomori.jp", + "hachinohe.aomori.jp", + "hashikami.aomori.jp", + "hiranai.aomori.jp", + "hirosaki.aomori.jp", + "itayanagi.aomori.jp", + "kuroishi.aomori.jp", + "misawa.aomori.jp", + "mutsu.aomori.jp", + "nakadomari.aomori.jp", + "noheji.aomori.jp", + "oirase.aomori.jp", + "owani.aomori.jp", + "rokunohe.aomori.jp", + "sannohe.aomori.jp", + "shichinohe.aomori.jp", + "shingo.aomori.jp", + "takko.aomori.jp", + "towada.aomori.jp", + "tsugaru.aomori.jp", + "tsuruta.aomori.jp", + "abiko.chiba.jp", + "asahi.chiba.jp", + "chonan.chiba.jp", + "chosei.chiba.jp", + "choshi.chiba.jp", + "chuo.chiba.jp", + "funabashi.chiba.jp", + "futtsu.chiba.jp", + "hanamigawa.chiba.jp", + "ichihara.chiba.jp", + "ichikawa.chiba.jp", + "ichinomiya.chiba.jp", + "inzai.chiba.jp", + "isumi.chiba.jp", + "kamagaya.chiba.jp", + "kamogawa.chiba.jp", + "kashiwa.chiba.jp", + "katori.chiba.jp", + "katsuura.chiba.jp", + "kimitsu.chiba.jp", + "kisarazu.chiba.jp", + "kozaki.chiba.jp", + "kujukuri.chiba.jp", + "kyonan.chiba.jp", + "matsudo.chiba.jp", + "midori.chiba.jp", + "mihama.chiba.jp", + "minamiboso.chiba.jp", + "mobara.chiba.jp", + "mutsuzawa.chiba.jp", + "nagara.chiba.jp", + "nagareyama.chiba.jp", + "narashino.chiba.jp", + "narita.chiba.jp", + "noda.chiba.jp", + "oamishirasato.chiba.jp", + "omigawa.chiba.jp", + "onjuku.chiba.jp", + "otaki.chiba.jp", + "sakae.chiba.jp", + "sakura.chiba.jp", + "shimofusa.chiba.jp", + "shirako.chiba.jp", + "shiroi.chiba.jp", + "shisui.chiba.jp", + "sodegaura.chiba.jp", + "sosa.chiba.jp", + "tako.chiba.jp", + "tateyama.chiba.jp", + "togane.chiba.jp", + "tohnosho.chiba.jp", + "tomisato.chiba.jp", + "urayasu.chiba.jp", + "yachimata.chiba.jp", + "yachiyo.chiba.jp", + "yokaichiba.chiba.jp", + "yokoshibahikari.chiba.jp", + "yotsukaido.chiba.jp", + "ainan.ehime.jp", + "honai.ehime.jp", + "ikata.ehime.jp", + "imabari.ehime.jp", + "iyo.ehime.jp", + "kamijima.ehime.jp", + "kihoku.ehime.jp", + "kumakogen.ehime.jp", + "masaki.ehime.jp", + "matsuno.ehime.jp", + "matsuyama.ehime.jp", + "namikata.ehime.jp", + "niihama.ehime.jp", + "ozu.ehime.jp", + "saijo.ehime.jp", + "seiyo.ehime.jp", + "shikokuchuo.ehime.jp", + "tobe.ehime.jp", + "toon.ehime.jp", + "uchiko.ehime.jp", + "uwajima.ehime.jp", + "yawatahama.ehime.jp", + "echizen.fukui.jp", + "eiheiji.fukui.jp", + "fukui.fukui.jp", + "ikeda.fukui.jp", + "katsuyama.fukui.jp", + "mihama.fukui.jp", + "minamiechizen.fukui.jp", + "obama.fukui.jp", + "ohi.fukui.jp", + "ono.fukui.jp", + "sabae.fukui.jp", + "sakai.fukui.jp", + "takahama.fukui.jp", + "tsuruga.fukui.jp", + "wakasa.fukui.jp", + "ashiya.fukuoka.jp", + "buzen.fukuoka.jp", + "chikugo.fukuoka.jp", + "chikuho.fukuoka.jp", + "chikujo.fukuoka.jp", + "chikushino.fukuoka.jp", + "chikuzen.fukuoka.jp", + "chuo.fukuoka.jp", + "dazaifu.fukuoka.jp", + "fukuchi.fukuoka.jp", + "hakata.fukuoka.jp", + "higashi.fukuoka.jp", + "hirokawa.fukuoka.jp", + "hisayama.fukuoka.jp", + "iizuka.fukuoka.jp", + "inatsuki.fukuoka.jp", + "kaho.fukuoka.jp", + "kasuga.fukuoka.jp", + "kasuya.fukuoka.jp", + "kawara.fukuoka.jp", + "keisen.fukuoka.jp", + "koga.fukuoka.jp", + "kurate.fukuoka.jp", + "kurogi.fukuoka.jp", + "kurume.fukuoka.jp", + "minami.fukuoka.jp", + "miyako.fukuoka.jp", + "miyama.fukuoka.jp", + "miyawaka.fukuoka.jp", + "mizumaki.fukuoka.jp", + "munakata.fukuoka.jp", + "nakagawa.fukuoka.jp", + "nakama.fukuoka.jp", + "nishi.fukuoka.jp", + "nogata.fukuoka.jp", + "ogori.fukuoka.jp", + "okagaki.fukuoka.jp", + "okawa.fukuoka.jp", + "oki.fukuoka.jp", + "omuta.fukuoka.jp", + "onga.fukuoka.jp", + "onojo.fukuoka.jp", + "oto.fukuoka.jp", + "saigawa.fukuoka.jp", + "sasaguri.fukuoka.jp", + "shingu.fukuoka.jp", + "shinyoshitomi.fukuoka.jp", + "shonai.fukuoka.jp", + "soeda.fukuoka.jp", + "sue.fukuoka.jp", + "tachiarai.fukuoka.jp", + "tagawa.fukuoka.jp", + "takata.fukuoka.jp", + "toho.fukuoka.jp", + "toyotsu.fukuoka.jp", + "tsuiki.fukuoka.jp", + "ukiha.fukuoka.jp", + "umi.fukuoka.jp", + "usui.fukuoka.jp", + "yamada.fukuoka.jp", + "yame.fukuoka.jp", + "yanagawa.fukuoka.jp", + "yukuhashi.fukuoka.jp", + "aizubange.fukushima.jp", + "aizumisato.fukushima.jp", + "aizuwakamatsu.fukushima.jp", + "asakawa.fukushima.jp", + "bandai.fukushima.jp", + "date.fukushima.jp", + "fukushima.fukushima.jp", + "furudono.fukushima.jp", + "futaba.fukushima.jp", + "hanawa.fukushima.jp", + "higashi.fukushima.jp", + "hirata.fukushima.jp", + "hirono.fukushima.jp", + "iitate.fukushima.jp", + "inawashiro.fukushima.jp", + "ishikawa.fukushima.jp", + "iwaki.fukushima.jp", + "izumizaki.fukushima.jp", + "kagamiishi.fukushima.jp", + "kaneyama.fukushima.jp", + "kawamata.fukushima.jp", + "kitakata.fukushima.jp", + "kitashiobara.fukushima.jp", + "koori.fukushima.jp", + "koriyama.fukushima.jp", + "kunimi.fukushima.jp", + "miharu.fukushima.jp", + "mishima.fukushima.jp", + "namie.fukushima.jp", + "nango.fukushima.jp", + "nishiaizu.fukushima.jp", + "nishigo.fukushima.jp", + "okuma.fukushima.jp", + "omotego.fukushima.jp", + "ono.fukushima.jp", + "otama.fukushima.jp", + "samegawa.fukushima.jp", + "shimogo.fukushima.jp", + "shirakawa.fukushima.jp", + "showa.fukushima.jp", + "soma.fukushima.jp", + "sukagawa.fukushima.jp", + "taishin.fukushima.jp", + "tamakawa.fukushima.jp", + "tanagura.fukushima.jp", + "tenei.fukushima.jp", + "yabuki.fukushima.jp", + "yamato.fukushima.jp", + "yamatsuri.fukushima.jp", + "yanaizu.fukushima.jp", + "yugawa.fukushima.jp", + "anpachi.gifu.jp", + "ena.gifu.jp", + "gifu.gifu.jp", + "ginan.gifu.jp", + "godo.gifu.jp", + "gujo.gifu.jp", + "hashima.gifu.jp", + "hichiso.gifu.jp", + "hida.gifu.jp", + "higashishirakawa.gifu.jp", + "ibigawa.gifu.jp", + "ikeda.gifu.jp", + "kakamigahara.gifu.jp", + "kani.gifu.jp", + "kasahara.gifu.jp", + "kasamatsu.gifu.jp", + "kawaue.gifu.jp", + "kitagata.gifu.jp", + "mino.gifu.jp", + "minokamo.gifu.jp", + "mitake.gifu.jp", + "mizunami.gifu.jp", + "motosu.gifu.jp", + "nakatsugawa.gifu.jp", + "ogaki.gifu.jp", + "sakahogi.gifu.jp", + "seki.gifu.jp", + "sekigahara.gifu.jp", + "shirakawa.gifu.jp", + "tajimi.gifu.jp", + "takayama.gifu.jp", + "tarui.gifu.jp", + "toki.gifu.jp", + "tomika.gifu.jp", + "wanouchi.gifu.jp", + "yamagata.gifu.jp", + "yaotsu.gifu.jp", + "yoro.gifu.jp", + "annaka.gunma.jp", + "chiyoda.gunma.jp", + "fujioka.gunma.jp", + "higashiagatsuma.gunma.jp", + "isesaki.gunma.jp", + "itakura.gunma.jp", + "kanna.gunma.jp", + "kanra.gunma.jp", + "katashina.gunma.jp", + "kawaba.gunma.jp", + "kiryu.gunma.jp", + "kusatsu.gunma.jp", + "maebashi.gunma.jp", + "meiwa.gunma.jp", + "midori.gunma.jp", + "minakami.gunma.jp", + "naganohara.gunma.jp", + "nakanojo.gunma.jp", + "nanmoku.gunma.jp", + "numata.gunma.jp", + "oizumi.gunma.jp", + "ora.gunma.jp", + "ota.gunma.jp", + "shibukawa.gunma.jp", + "shimonita.gunma.jp", + "shinto.gunma.jp", + "showa.gunma.jp", + "takasaki.gunma.jp", + "takayama.gunma.jp", + "tamamura.gunma.jp", + "tatebayashi.gunma.jp", + "tomioka.gunma.jp", + "tsukiyono.gunma.jp", + "tsumagoi.gunma.jp", + "ueno.gunma.jp", + "yoshioka.gunma.jp", + "asaminami.hiroshima.jp", + "daiwa.hiroshima.jp", + "etajima.hiroshima.jp", + "fuchu.hiroshima.jp", + "fukuyama.hiroshima.jp", + "hatsukaichi.hiroshima.jp", + "higashihiroshima.hiroshima.jp", + "hongo.hiroshima.jp", + "jinsekikogen.hiroshima.jp", + "kaita.hiroshima.jp", + "kui.hiroshima.jp", + "kumano.hiroshima.jp", + "kure.hiroshima.jp", + "mihara.hiroshima.jp", + "miyoshi.hiroshima.jp", + "naka.hiroshima.jp", + "onomichi.hiroshima.jp", + "osakikamijima.hiroshima.jp", + "otake.hiroshima.jp", + "saka.hiroshima.jp", + "sera.hiroshima.jp", + "seranishi.hiroshima.jp", + "shinichi.hiroshima.jp", + "shobara.hiroshima.jp", + "takehara.hiroshima.jp", + "abashiri.hokkaido.jp", + "abira.hokkaido.jp", + "aibetsu.hokkaido.jp", + "akabira.hokkaido.jp", + "akkeshi.hokkaido.jp", + "asahikawa.hokkaido.jp", + "ashibetsu.hokkaido.jp", + "ashoro.hokkaido.jp", + "assabu.hokkaido.jp", + "atsuma.hokkaido.jp", + "bibai.hokkaido.jp", + "biei.hokkaido.jp", + "bifuka.hokkaido.jp", + "bihoro.hokkaido.jp", + "biratori.hokkaido.jp", + "chippubetsu.hokkaido.jp", + "chitose.hokkaido.jp", + "date.hokkaido.jp", + "ebetsu.hokkaido.jp", + "embetsu.hokkaido.jp", + "eniwa.hokkaido.jp", + "erimo.hokkaido.jp", + "esan.hokkaido.jp", + "esashi.hokkaido.jp", + "fukagawa.hokkaido.jp", + "fukushima.hokkaido.jp", + "furano.hokkaido.jp", + "furubira.hokkaido.jp", + "haboro.hokkaido.jp", + "hakodate.hokkaido.jp", + "hamatonbetsu.hokkaido.jp", + "hidaka.hokkaido.jp", + "higashikagura.hokkaido.jp", + "higashikawa.hokkaido.jp", + "hiroo.hokkaido.jp", + "hokuryu.hokkaido.jp", + "hokuto.hokkaido.jp", + "honbetsu.hokkaido.jp", + "horokanai.hokkaido.jp", + "horonobe.hokkaido.jp", + "ikeda.hokkaido.jp", + "imakane.hokkaido.jp", + "ishikari.hokkaido.jp", + "iwamizawa.hokkaido.jp", + "iwanai.hokkaido.jp", + "kamifurano.hokkaido.jp", + "kamikawa.hokkaido.jp", + "kamishihoro.hokkaido.jp", + "kamisunagawa.hokkaido.jp", + "kamoenai.hokkaido.jp", + "kayabe.hokkaido.jp", + "kembuchi.hokkaido.jp", + "kikonai.hokkaido.jp", + "kimobetsu.hokkaido.jp", + "kitahiroshima.hokkaido.jp", + "kitami.hokkaido.jp", + "kiyosato.hokkaido.jp", + "koshimizu.hokkaido.jp", + "kunneppu.hokkaido.jp", + "kuriyama.hokkaido.jp", + "kuromatsunai.hokkaido.jp", + "kushiro.hokkaido.jp", + "kutchan.hokkaido.jp", + "kyowa.hokkaido.jp", + "mashike.hokkaido.jp", + "matsumae.hokkaido.jp", + "mikasa.hokkaido.jp", + "minamifurano.hokkaido.jp", + "mombetsu.hokkaido.jp", + "moseushi.hokkaido.jp", + "mukawa.hokkaido.jp", + "muroran.hokkaido.jp", + "naie.hokkaido.jp", + "nakagawa.hokkaido.jp", + "nakasatsunai.hokkaido.jp", + "nakatombetsu.hokkaido.jp", + "nanae.hokkaido.jp", + "nanporo.hokkaido.jp", + "nayoro.hokkaido.jp", + "nemuro.hokkaido.jp", + "niikappu.hokkaido.jp", + "niki.hokkaido.jp", + "nishiokoppe.hokkaido.jp", + "noboribetsu.hokkaido.jp", + "numata.hokkaido.jp", + "obihiro.hokkaido.jp", + "obira.hokkaido.jp", + "oketo.hokkaido.jp", + "okoppe.hokkaido.jp", + "otaru.hokkaido.jp", + "otobe.hokkaido.jp", + "otofuke.hokkaido.jp", + "otoineppu.hokkaido.jp", + "oumu.hokkaido.jp", + "ozora.hokkaido.jp", + "pippu.hokkaido.jp", + "rankoshi.hokkaido.jp", + "rebun.hokkaido.jp", + "rikubetsu.hokkaido.jp", + "rishiri.hokkaido.jp", + "rishirifuji.hokkaido.jp", + "saroma.hokkaido.jp", + "sarufutsu.hokkaido.jp", + "shakotan.hokkaido.jp", + "shari.hokkaido.jp", + "shibecha.hokkaido.jp", + "shibetsu.hokkaido.jp", + "shikabe.hokkaido.jp", + "shikaoi.hokkaido.jp", + "shimamaki.hokkaido.jp", + "shimizu.hokkaido.jp", + "shimokawa.hokkaido.jp", + "shinshinotsu.hokkaido.jp", + "shintoku.hokkaido.jp", + "shiranuka.hokkaido.jp", + "shiraoi.hokkaido.jp", + "shiriuchi.hokkaido.jp", + "sobetsu.hokkaido.jp", + "sunagawa.hokkaido.jp", + "taiki.hokkaido.jp", + "takasu.hokkaido.jp", + "takikawa.hokkaido.jp", + "takinoue.hokkaido.jp", + "teshikaga.hokkaido.jp", + "tobetsu.hokkaido.jp", + "tohma.hokkaido.jp", + "tomakomai.hokkaido.jp", + "tomari.hokkaido.jp", + "toya.hokkaido.jp", + "toyako.hokkaido.jp", + "toyotomi.hokkaido.jp", + "toyoura.hokkaido.jp", + "tsubetsu.hokkaido.jp", + "tsukigata.hokkaido.jp", + "urakawa.hokkaido.jp", + "urausu.hokkaido.jp", + "uryu.hokkaido.jp", + "utashinai.hokkaido.jp", + "wakkanai.hokkaido.jp", + "wassamu.hokkaido.jp", + "yakumo.hokkaido.jp", + "yoichi.hokkaido.jp", + "aioi.hyogo.jp", + "akashi.hyogo.jp", + "ako.hyogo.jp", + "amagasaki.hyogo.jp", + "aogaki.hyogo.jp", + "asago.hyogo.jp", + "ashiya.hyogo.jp", + "awaji.hyogo.jp", + "fukusaki.hyogo.jp", + "goshiki.hyogo.jp", + "harima.hyogo.jp", + "himeji.hyogo.jp", + "ichikawa.hyogo.jp", + "inagawa.hyogo.jp", + "itami.hyogo.jp", + "kakogawa.hyogo.jp", + "kamigori.hyogo.jp", + "kamikawa.hyogo.jp", + "kasai.hyogo.jp", + "kasuga.hyogo.jp", + "kawanishi.hyogo.jp", + "miki.hyogo.jp", + "minamiawaji.hyogo.jp", + "nishinomiya.hyogo.jp", + "nishiwaki.hyogo.jp", + "ono.hyogo.jp", + "sanda.hyogo.jp", + "sannan.hyogo.jp", + "sasayama.hyogo.jp", + "sayo.hyogo.jp", + "shingu.hyogo.jp", + "shinonsen.hyogo.jp", + "shiso.hyogo.jp", + "sumoto.hyogo.jp", + "taishi.hyogo.jp", + "taka.hyogo.jp", + "takarazuka.hyogo.jp", + "takasago.hyogo.jp", + "takino.hyogo.jp", + "tamba.hyogo.jp", + "tatsuno.hyogo.jp", + "toyooka.hyogo.jp", + "yabu.hyogo.jp", + "yashiro.hyogo.jp", + "yoka.hyogo.jp", + "yokawa.hyogo.jp", + "ami.ibaraki.jp", + "asahi.ibaraki.jp", + "bando.ibaraki.jp", + "chikusei.ibaraki.jp", + "daigo.ibaraki.jp", + "fujishiro.ibaraki.jp", + "hitachi.ibaraki.jp", + "hitachinaka.ibaraki.jp", + "hitachiomiya.ibaraki.jp", + "hitachiota.ibaraki.jp", + "ibaraki.ibaraki.jp", + "ina.ibaraki.jp", + "inashiki.ibaraki.jp", + "itako.ibaraki.jp", + "iwama.ibaraki.jp", + "joso.ibaraki.jp", + "kamisu.ibaraki.jp", + "kasama.ibaraki.jp", + "kashima.ibaraki.jp", + "kasumigaura.ibaraki.jp", + "koga.ibaraki.jp", + "miho.ibaraki.jp", + "mito.ibaraki.jp", + "moriya.ibaraki.jp", + "naka.ibaraki.jp", + "namegata.ibaraki.jp", + "oarai.ibaraki.jp", + "ogawa.ibaraki.jp", + "omitama.ibaraki.jp", + "ryugasaki.ibaraki.jp", + "sakai.ibaraki.jp", + "sakuragawa.ibaraki.jp", + "shimodate.ibaraki.jp", + "shimotsuma.ibaraki.jp", + "shirosato.ibaraki.jp", + "sowa.ibaraki.jp", + "suifu.ibaraki.jp", + "takahagi.ibaraki.jp", + "tamatsukuri.ibaraki.jp", + "tokai.ibaraki.jp", + "tomobe.ibaraki.jp", + "tone.ibaraki.jp", + "toride.ibaraki.jp", + "tsuchiura.ibaraki.jp", + "tsukuba.ibaraki.jp", + "uchihara.ibaraki.jp", + "ushiku.ibaraki.jp", + "yachiyo.ibaraki.jp", + "yamagata.ibaraki.jp", + "yawara.ibaraki.jp", + "yuki.ibaraki.jp", + "anamizu.ishikawa.jp", + "hakui.ishikawa.jp", + "hakusan.ishikawa.jp", + "kaga.ishikawa.jp", + "kahoku.ishikawa.jp", + "kanazawa.ishikawa.jp", + "kawakita.ishikawa.jp", + "komatsu.ishikawa.jp", + "nakanoto.ishikawa.jp", + "nanao.ishikawa.jp", + "nomi.ishikawa.jp", + "nonoichi.ishikawa.jp", + "noto.ishikawa.jp", + "shika.ishikawa.jp", + "suzu.ishikawa.jp", + "tsubata.ishikawa.jp", + "tsurugi.ishikawa.jp", + "uchinada.ishikawa.jp", + "wajima.ishikawa.jp", + "fudai.iwate.jp", + "fujisawa.iwate.jp", + "hanamaki.iwate.jp", + "hiraizumi.iwate.jp", + "hirono.iwate.jp", + "ichinohe.iwate.jp", + "ichinoseki.iwate.jp", + "iwaizumi.iwate.jp", + "iwate.iwate.jp", + "joboji.iwate.jp", + "kamaishi.iwate.jp", + "kanegasaki.iwate.jp", + "karumai.iwate.jp", + "kawai.iwate.jp", + "kitakami.iwate.jp", + "kuji.iwate.jp", + "kunohe.iwate.jp", + "kuzumaki.iwate.jp", + "miyako.iwate.jp", + "mizusawa.iwate.jp", + "morioka.iwate.jp", + "ninohe.iwate.jp", + "noda.iwate.jp", + "ofunato.iwate.jp", + "oshu.iwate.jp", + "otsuchi.iwate.jp", + "rikuzentakata.iwate.jp", + "shiwa.iwate.jp", + "shizukuishi.iwate.jp", + "sumita.iwate.jp", + "tanohata.iwate.jp", + "tono.iwate.jp", + "yahaba.iwate.jp", + "yamada.iwate.jp", + "ayagawa.kagawa.jp", + "higashikagawa.kagawa.jp", + "kanonji.kagawa.jp", + "kotohira.kagawa.jp", + "manno.kagawa.jp", + "marugame.kagawa.jp", + "mitoyo.kagawa.jp", + "naoshima.kagawa.jp", + "sanuki.kagawa.jp", + "tadotsu.kagawa.jp", + "takamatsu.kagawa.jp", + "tonosho.kagawa.jp", + "uchinomi.kagawa.jp", + "utazu.kagawa.jp", + "zentsuji.kagawa.jp", + "akune.kagoshima.jp", + "amami.kagoshima.jp", + "hioki.kagoshima.jp", + "isa.kagoshima.jp", + "isen.kagoshima.jp", + "izumi.kagoshima.jp", + "kagoshima.kagoshima.jp", + "kanoya.kagoshima.jp", + "kawanabe.kagoshima.jp", + "kinko.kagoshima.jp", + "kouyama.kagoshima.jp", + "makurazaki.kagoshima.jp", + "matsumoto.kagoshima.jp", + "minamitane.kagoshima.jp", + "nakatane.kagoshima.jp", + "nishinoomote.kagoshima.jp", + "satsumasendai.kagoshima.jp", + "soo.kagoshima.jp", + "tarumizu.kagoshima.jp", + "yusui.kagoshima.jp", + "aikawa.kanagawa.jp", + "atsugi.kanagawa.jp", + "ayase.kanagawa.jp", + "chigasaki.kanagawa.jp", + "ebina.kanagawa.jp", + "fujisawa.kanagawa.jp", + "hadano.kanagawa.jp", + "hakone.kanagawa.jp", + "hiratsuka.kanagawa.jp", + "isehara.kanagawa.jp", + "kaisei.kanagawa.jp", + "kamakura.kanagawa.jp", + "kiyokawa.kanagawa.jp", + "matsuda.kanagawa.jp", + "minamiashigara.kanagawa.jp", + "miura.kanagawa.jp", + "nakai.kanagawa.jp", + "ninomiya.kanagawa.jp", + "odawara.kanagawa.jp", + "oi.kanagawa.jp", + "oiso.kanagawa.jp", + "sagamihara.kanagawa.jp", + "samukawa.kanagawa.jp", + "tsukui.kanagawa.jp", + "yamakita.kanagawa.jp", + "yamato.kanagawa.jp", + "yokosuka.kanagawa.jp", + "yugawara.kanagawa.jp", + "zama.kanagawa.jp", + "zushi.kanagawa.jp", + "aki.kochi.jp", + "geisei.kochi.jp", + "hidaka.kochi.jp", + "higashitsuno.kochi.jp", + "ino.kochi.jp", + "kagami.kochi.jp", + "kami.kochi.jp", + "kitagawa.kochi.jp", + "kochi.kochi.jp", + "mihara.kochi.jp", + "motoyama.kochi.jp", + "muroto.kochi.jp", + "nahari.kochi.jp", + "nakamura.kochi.jp", + "nankoku.kochi.jp", + "nishitosa.kochi.jp", + "niyodogawa.kochi.jp", + "ochi.kochi.jp", + "okawa.kochi.jp", + "otoyo.kochi.jp", + "otsuki.kochi.jp", + "sakawa.kochi.jp", + "sukumo.kochi.jp", + "susaki.kochi.jp", + "tosa.kochi.jp", + "tosashimizu.kochi.jp", + "toyo.kochi.jp", + "tsuno.kochi.jp", + "umaji.kochi.jp", + "yasuda.kochi.jp", + "yusuhara.kochi.jp", + "amakusa.kumamoto.jp", + "arao.kumamoto.jp", + "aso.kumamoto.jp", + "choyo.kumamoto.jp", + "gyokuto.kumamoto.jp", + "kamiamakusa.kumamoto.jp", + "kikuchi.kumamoto.jp", + "kumamoto.kumamoto.jp", + "mashiki.kumamoto.jp", + "mifune.kumamoto.jp", + "minamata.kumamoto.jp", + "minamioguni.kumamoto.jp", + "nagasu.kumamoto.jp", + "nishihara.kumamoto.jp", + "oguni.kumamoto.jp", + "ozu.kumamoto.jp", + "sumoto.kumamoto.jp", + "takamori.kumamoto.jp", + "uki.kumamoto.jp", + "uto.kumamoto.jp", + "yamaga.kumamoto.jp", + "yamato.kumamoto.jp", + "yatsushiro.kumamoto.jp", + "ayabe.kyoto.jp", + "fukuchiyama.kyoto.jp", + "higashiyama.kyoto.jp", + "ide.kyoto.jp", + "ine.kyoto.jp", + "joyo.kyoto.jp", + "kameoka.kyoto.jp", + "kamo.kyoto.jp", + "kita.kyoto.jp", + "kizu.kyoto.jp", + "kumiyama.kyoto.jp", + "kyotamba.kyoto.jp", + "kyotanabe.kyoto.jp", + "kyotango.kyoto.jp", + "maizuru.kyoto.jp", + "minami.kyoto.jp", + "minamiyamashiro.kyoto.jp", + "miyazu.kyoto.jp", + "muko.kyoto.jp", + "nagaokakyo.kyoto.jp", + "nakagyo.kyoto.jp", + "nantan.kyoto.jp", + "oyamazaki.kyoto.jp", + "sakyo.kyoto.jp", + "seika.kyoto.jp", + "tanabe.kyoto.jp", + "uji.kyoto.jp", + "ujitawara.kyoto.jp", + "wazuka.kyoto.jp", + "yamashina.kyoto.jp", + "yawata.kyoto.jp", + "asahi.mie.jp", + "inabe.mie.jp", + "ise.mie.jp", + "kameyama.mie.jp", + "kawagoe.mie.jp", + "kiho.mie.jp", + "kisosaki.mie.jp", + "kiwa.mie.jp", + "komono.mie.jp", + "kumano.mie.jp", + "kuwana.mie.jp", + "matsusaka.mie.jp", + "meiwa.mie.jp", + "mihama.mie.jp", + "minamiise.mie.jp", + "misugi.mie.jp", + "miyama.mie.jp", + "nabari.mie.jp", + "shima.mie.jp", + "suzuka.mie.jp", + "tado.mie.jp", + "taiki.mie.jp", + "taki.mie.jp", + "tamaki.mie.jp", + "toba.mie.jp", + "tsu.mie.jp", + "udono.mie.jp", + "ureshino.mie.jp", + "watarai.mie.jp", + "yokkaichi.mie.jp", + "furukawa.miyagi.jp", + "higashimatsushima.miyagi.jp", + "ishinomaki.miyagi.jp", + "iwanuma.miyagi.jp", + "kakuda.miyagi.jp", + "kami.miyagi.jp", + "kawasaki.miyagi.jp", + "marumori.miyagi.jp", + "matsushima.miyagi.jp", + "minamisanriku.miyagi.jp", + "misato.miyagi.jp", + "murata.miyagi.jp", + "natori.miyagi.jp", + "ogawara.miyagi.jp", + "ohira.miyagi.jp", + "onagawa.miyagi.jp", + "osaki.miyagi.jp", + "rifu.miyagi.jp", + "semine.miyagi.jp", + "shibata.miyagi.jp", + "shichikashuku.miyagi.jp", + "shikama.miyagi.jp", + "shiogama.miyagi.jp", + "shiroishi.miyagi.jp", + "tagajo.miyagi.jp", + "taiwa.miyagi.jp", + "tome.miyagi.jp", + "tomiya.miyagi.jp", + "wakuya.miyagi.jp", + "watari.miyagi.jp", + "yamamoto.miyagi.jp", + "zao.miyagi.jp", + "aya.miyazaki.jp", + "ebino.miyazaki.jp", + "gokase.miyazaki.jp", + "hyuga.miyazaki.jp", + "kadogawa.miyazaki.jp", + "kawaminami.miyazaki.jp", + "kijo.miyazaki.jp", + "kitagawa.miyazaki.jp", + "kitakata.miyazaki.jp", + "kitaura.miyazaki.jp", + "kobayashi.miyazaki.jp", + "kunitomi.miyazaki.jp", + "kushima.miyazaki.jp", + "mimata.miyazaki.jp", + "miyakonojo.miyazaki.jp", + "miyazaki.miyazaki.jp", + "morotsuka.miyazaki.jp", + "nichinan.miyazaki.jp", + "nishimera.miyazaki.jp", + "nobeoka.miyazaki.jp", + "saito.miyazaki.jp", + "shiiba.miyazaki.jp", + "shintomi.miyazaki.jp", + "takaharu.miyazaki.jp", + "takanabe.miyazaki.jp", + "takazaki.miyazaki.jp", + "tsuno.miyazaki.jp", + "achi.nagano.jp", + "agematsu.nagano.jp", + "anan.nagano.jp", + "aoki.nagano.jp", + "asahi.nagano.jp", + "azumino.nagano.jp", + "chikuhoku.nagano.jp", + "chikuma.nagano.jp", + "chino.nagano.jp", + "fujimi.nagano.jp", + "hakuba.nagano.jp", + "hara.nagano.jp", + "hiraya.nagano.jp", + "iida.nagano.jp", + "iijima.nagano.jp", + "iiyama.nagano.jp", + "iizuna.nagano.jp", + "ikeda.nagano.jp", + "ikusaka.nagano.jp", + "ina.nagano.jp", + "karuizawa.nagano.jp", + "kawakami.nagano.jp", + "kiso.nagano.jp", + "kisofukushima.nagano.jp", + "kitaaiki.nagano.jp", + "komagane.nagano.jp", + "komoro.nagano.jp", + "matsukawa.nagano.jp", + "matsumoto.nagano.jp", + "miasa.nagano.jp", + "minamiaiki.nagano.jp", + "minamimaki.nagano.jp", + "minamiminowa.nagano.jp", + "minowa.nagano.jp", + "miyada.nagano.jp", + "miyota.nagano.jp", + "mochizuki.nagano.jp", + "nagano.nagano.jp", + "nagawa.nagano.jp", + "nagiso.nagano.jp", + "nakagawa.nagano.jp", + "nakano.nagano.jp", + "nozawaonsen.nagano.jp", + "obuse.nagano.jp", + "ogawa.nagano.jp", + "okaya.nagano.jp", + "omachi.nagano.jp", + "omi.nagano.jp", + "ookuwa.nagano.jp", + "ooshika.nagano.jp", + "otaki.nagano.jp", + "otari.nagano.jp", + "sakae.nagano.jp", + "sakaki.nagano.jp", + "saku.nagano.jp", + "sakuho.nagano.jp", + "shimosuwa.nagano.jp", + "shinanomachi.nagano.jp", + "shiojiri.nagano.jp", + "suwa.nagano.jp", + "suzaka.nagano.jp", + "takagi.nagano.jp", + "takamori.nagano.jp", + "takayama.nagano.jp", + "tateshina.nagano.jp", + "tatsuno.nagano.jp", + "togakushi.nagano.jp", + "togura.nagano.jp", + "tomi.nagano.jp", + "ueda.nagano.jp", + "wada.nagano.jp", + "yamagata.nagano.jp", + "yamanouchi.nagano.jp", + "yasaka.nagano.jp", + "yasuoka.nagano.jp", + "chijiwa.nagasaki.jp", + "futsu.nagasaki.jp", + "goto.nagasaki.jp", + "hasami.nagasaki.jp", + "hirado.nagasaki.jp", + "iki.nagasaki.jp", + "isahaya.nagasaki.jp", + "kawatana.nagasaki.jp", + "kuchinotsu.nagasaki.jp", + "matsuura.nagasaki.jp", + "nagasaki.nagasaki.jp", + "obama.nagasaki.jp", + "omura.nagasaki.jp", + "oseto.nagasaki.jp", + "saikai.nagasaki.jp", + "sasebo.nagasaki.jp", + "seihi.nagasaki.jp", + "shimabara.nagasaki.jp", + "shinkamigoto.nagasaki.jp", + "togitsu.nagasaki.jp", + "tsushima.nagasaki.jp", + "unzen.nagasaki.jp", + "ando.nara.jp", + "gose.nara.jp", + "heguri.nara.jp", + "higashiyoshino.nara.jp", + "ikaruga.nara.jp", + "ikoma.nara.jp", + "kamikitayama.nara.jp", + "kanmaki.nara.jp", + "kashiba.nara.jp", + "kashihara.nara.jp", + "katsuragi.nara.jp", + "kawai.nara.jp", + "kawakami.nara.jp", + "kawanishi.nara.jp", + "koryo.nara.jp", + "kurotaki.nara.jp", + "mitsue.nara.jp", + "miyake.nara.jp", + "nara.nara.jp", + "nosegawa.nara.jp", + "oji.nara.jp", + "ouda.nara.jp", + "oyodo.nara.jp", + "sakurai.nara.jp", + "sango.nara.jp", + "shimoichi.nara.jp", + "shimokitayama.nara.jp", + "shinjo.nara.jp", + "soni.nara.jp", + "takatori.nara.jp", + "tawaramoto.nara.jp", + "tenkawa.nara.jp", + "tenri.nara.jp", + "uda.nara.jp", + "yamatokoriyama.nara.jp", + "yamatotakada.nara.jp", + "yamazoe.nara.jp", + "yoshino.nara.jp", + "aga.niigata.jp", + "agano.niigata.jp", + "gosen.niigata.jp", + "itoigawa.niigata.jp", + "izumozaki.niigata.jp", + "joetsu.niigata.jp", + "kamo.niigata.jp", + "kariwa.niigata.jp", + "kashiwazaki.niigata.jp", + "minamiuonuma.niigata.jp", + "mitsuke.niigata.jp", + "muika.niigata.jp", + "murakami.niigata.jp", + "myoko.niigata.jp", + "nagaoka.niigata.jp", + "niigata.niigata.jp", + "ojiya.niigata.jp", + "omi.niigata.jp", + "sado.niigata.jp", + "sanjo.niigata.jp", + "seiro.niigata.jp", + "seirou.niigata.jp", + "sekikawa.niigata.jp", + "shibata.niigata.jp", + "tagami.niigata.jp", + "tainai.niigata.jp", + "tochio.niigata.jp", + "tokamachi.niigata.jp", + "tsubame.niigata.jp", + "tsunan.niigata.jp", + "uonuma.niigata.jp", + "yahiko.niigata.jp", + "yoita.niigata.jp", + "yuzawa.niigata.jp", + "beppu.oita.jp", + "bungoono.oita.jp", + "bungotakada.oita.jp", + "hasama.oita.jp", + "hiji.oita.jp", + "himeshima.oita.jp", + "hita.oita.jp", + "kamitsue.oita.jp", + "kokonoe.oita.jp", + "kuju.oita.jp", + "kunisaki.oita.jp", + "kusu.oita.jp", + "oita.oita.jp", + "saiki.oita.jp", + "taketa.oita.jp", + "tsukumi.oita.jp", + "usa.oita.jp", + "usuki.oita.jp", + "yufu.oita.jp", + "akaiwa.okayama.jp", + "asakuchi.okayama.jp", + "bizen.okayama.jp", + "hayashima.okayama.jp", + "ibara.okayama.jp", + "kagamino.okayama.jp", + "kasaoka.okayama.jp", + "kibichuo.okayama.jp", + "kumenan.okayama.jp", + "kurashiki.okayama.jp", + "maniwa.okayama.jp", + "misaki.okayama.jp", + "nagi.okayama.jp", + "niimi.okayama.jp", + "nishiawakura.okayama.jp", + "okayama.okayama.jp", + "satosho.okayama.jp", + "setouchi.okayama.jp", + "shinjo.okayama.jp", + "shoo.okayama.jp", + "soja.okayama.jp", + "takahashi.okayama.jp", + "tamano.okayama.jp", + "tsuyama.okayama.jp", + "wake.okayama.jp", + "yakage.okayama.jp", + "aguni.okinawa.jp", + "ginowan.okinawa.jp", + "ginoza.okinawa.jp", + "gushikami.okinawa.jp", + "haebaru.okinawa.jp", + "higashi.okinawa.jp", + "hirara.okinawa.jp", + "iheya.okinawa.jp", + "ishigaki.okinawa.jp", + "ishikawa.okinawa.jp", + "itoman.okinawa.jp", + "izena.okinawa.jp", + "kadena.okinawa.jp", + "kin.okinawa.jp", + "kitadaito.okinawa.jp", + "kitanakagusuku.okinawa.jp", + "kumejima.okinawa.jp", + "kunigami.okinawa.jp", + "minamidaito.okinawa.jp", + "motobu.okinawa.jp", + "nago.okinawa.jp", + "naha.okinawa.jp", + "nakagusuku.okinawa.jp", + "nakijin.okinawa.jp", + "nanjo.okinawa.jp", + "nishihara.okinawa.jp", + "ogimi.okinawa.jp", + "okinawa.okinawa.jp", + "onna.okinawa.jp", + "shimoji.okinawa.jp", + "taketomi.okinawa.jp", + "tarama.okinawa.jp", + "tokashiki.okinawa.jp", + "tomigusuku.okinawa.jp", + "tonaki.okinawa.jp", + "urasoe.okinawa.jp", + "uruma.okinawa.jp", + "yaese.okinawa.jp", + "yomitan.okinawa.jp", + "yonabaru.okinawa.jp", + "yonaguni.okinawa.jp", + "zamami.okinawa.jp", + "abeno.osaka.jp", + "chihayaakasaka.osaka.jp", + "chuo.osaka.jp", + "daito.osaka.jp", + "fujiidera.osaka.jp", + "habikino.osaka.jp", + "hannan.osaka.jp", + "higashiosaka.osaka.jp", + "higashisumiyoshi.osaka.jp", + "higashiyodogawa.osaka.jp", + "hirakata.osaka.jp", + "ibaraki.osaka.jp", + "ikeda.osaka.jp", + "izumi.osaka.jp", + "izumiotsu.osaka.jp", + "izumisano.osaka.jp", + "kadoma.osaka.jp", + "kaizuka.osaka.jp", + "kanan.osaka.jp", + "kashiwara.osaka.jp", + "katano.osaka.jp", + "kawachinagano.osaka.jp", + "kishiwada.osaka.jp", + "kita.osaka.jp", + "kumatori.osaka.jp", + "matsubara.osaka.jp", + "minato.osaka.jp", + "minoh.osaka.jp", + "misaki.osaka.jp", + "moriguchi.osaka.jp", + "neyagawa.osaka.jp", + "nishi.osaka.jp", + "nose.osaka.jp", + "osakasayama.osaka.jp", + "sakai.osaka.jp", + "sayama.osaka.jp", + "sennan.osaka.jp", + "settsu.osaka.jp", + "shijonawate.osaka.jp", + "shimamoto.osaka.jp", + "suita.osaka.jp", + "tadaoka.osaka.jp", + "taishi.osaka.jp", + "tajiri.osaka.jp", + "takaishi.osaka.jp", + "takatsuki.osaka.jp", + "tondabayashi.osaka.jp", + "toyonaka.osaka.jp", + "toyono.osaka.jp", + "yao.osaka.jp", + "ariake.saga.jp", + "arita.saga.jp", + "fukudomi.saga.jp", + "genkai.saga.jp", + "hamatama.saga.jp", + "hizen.saga.jp", + "imari.saga.jp", + "kamimine.saga.jp", + "kanzaki.saga.jp", + "karatsu.saga.jp", + "kashima.saga.jp", + "kitagata.saga.jp", + "kitahata.saga.jp", + "kiyama.saga.jp", + "kouhoku.saga.jp", + "kyuragi.saga.jp", + "nishiarita.saga.jp", + "ogi.saga.jp", + "omachi.saga.jp", + "ouchi.saga.jp", + "saga.saga.jp", + "shiroishi.saga.jp", + "taku.saga.jp", + "tara.saga.jp", + "tosu.saga.jp", + "yoshinogari.saga.jp", + "arakawa.saitama.jp", + "asaka.saitama.jp", + "chichibu.saitama.jp", + "fujimi.saitama.jp", + "fujimino.saitama.jp", + "fukaya.saitama.jp", + "hanno.saitama.jp", + "hanyu.saitama.jp", + "hasuda.saitama.jp", + "hatogaya.saitama.jp", + "hatoyama.saitama.jp", + "hidaka.saitama.jp", + "higashichichibu.saitama.jp", + "higashimatsuyama.saitama.jp", + "honjo.saitama.jp", + "ina.saitama.jp", + "iruma.saitama.jp", + "iwatsuki.saitama.jp", + "kamiizumi.saitama.jp", + "kamikawa.saitama.jp", + "kamisato.saitama.jp", + "kasukabe.saitama.jp", + "kawagoe.saitama.jp", + "kawaguchi.saitama.jp", + "kawajima.saitama.jp", + "kazo.saitama.jp", + "kitamoto.saitama.jp", + "koshigaya.saitama.jp", + "kounosu.saitama.jp", + "kuki.saitama.jp", + "kumagaya.saitama.jp", + "matsubushi.saitama.jp", + "minano.saitama.jp", + "misato.saitama.jp", + "miyashiro.saitama.jp", + "miyoshi.saitama.jp", + "moroyama.saitama.jp", + "nagatoro.saitama.jp", + "namegawa.saitama.jp", + "niiza.saitama.jp", + "ogano.saitama.jp", + "ogawa.saitama.jp", + "ogose.saitama.jp", + "okegawa.saitama.jp", + "omiya.saitama.jp", + "otaki.saitama.jp", + "ranzan.saitama.jp", + "ryokami.saitama.jp", + "saitama.saitama.jp", + "sakado.saitama.jp", + "satte.saitama.jp", + "sayama.saitama.jp", + "shiki.saitama.jp", + "shiraoka.saitama.jp", + "soka.saitama.jp", + "sugito.saitama.jp", + "toda.saitama.jp", + "tokigawa.saitama.jp", + "tokorozawa.saitama.jp", + "tsurugashima.saitama.jp", + "urawa.saitama.jp", + "warabi.saitama.jp", + "yashio.saitama.jp", + "yokoze.saitama.jp", + "yono.saitama.jp", + "yorii.saitama.jp", + "yoshida.saitama.jp", + "yoshikawa.saitama.jp", + "yoshimi.saitama.jp", + "aisho.shiga.jp", + "gamo.shiga.jp", + "higashiomi.shiga.jp", + "hikone.shiga.jp", + "koka.shiga.jp", + "konan.shiga.jp", + "kosei.shiga.jp", + "koto.shiga.jp", + "kusatsu.shiga.jp", + "maibara.shiga.jp", + "moriyama.shiga.jp", + "nagahama.shiga.jp", + "nishiazai.shiga.jp", + "notogawa.shiga.jp", + "omihachiman.shiga.jp", + "otsu.shiga.jp", + "ritto.shiga.jp", + "ryuoh.shiga.jp", + "takashima.shiga.jp", + "takatsuki.shiga.jp", + "torahime.shiga.jp", + "toyosato.shiga.jp", + "yasu.shiga.jp", + "akagi.shimane.jp", + "ama.shimane.jp", + "gotsu.shimane.jp", + "hamada.shimane.jp", + "higashiizumo.shimane.jp", + "hikawa.shimane.jp", + "hikimi.shimane.jp", + "izumo.shimane.jp", + "kakinoki.shimane.jp", + "masuda.shimane.jp", + "matsue.shimane.jp", + "misato.shimane.jp", + "nishinoshima.shimane.jp", + "ohda.shimane.jp", + "okinoshima.shimane.jp", + "okuizumo.shimane.jp", + "shimane.shimane.jp", + "tamayu.shimane.jp", + "tsuwano.shimane.jp", + "unnan.shimane.jp", + "yakumo.shimane.jp", + "yasugi.shimane.jp", + "yatsuka.shimane.jp", + "arai.shizuoka.jp", + "atami.shizuoka.jp", + "fuji.shizuoka.jp", + "fujieda.shizuoka.jp", + "fujikawa.shizuoka.jp", + "fujinomiya.shizuoka.jp", + "fukuroi.shizuoka.jp", + "gotemba.shizuoka.jp", + "haibara.shizuoka.jp", + "hamamatsu.shizuoka.jp", + "higashiizu.shizuoka.jp", + "ito.shizuoka.jp", + "iwata.shizuoka.jp", + "izu.shizuoka.jp", + "izunokuni.shizuoka.jp", + "kakegawa.shizuoka.jp", + "kannami.shizuoka.jp", + "kawanehon.shizuoka.jp", + "kawazu.shizuoka.jp", + "kikugawa.shizuoka.jp", + "kosai.shizuoka.jp", + "makinohara.shizuoka.jp", + "matsuzaki.shizuoka.jp", + "minamiizu.shizuoka.jp", + "mishima.shizuoka.jp", + "morimachi.shizuoka.jp", + "nishiizu.shizuoka.jp", + "numazu.shizuoka.jp", + "omaezaki.shizuoka.jp", + "shimada.shizuoka.jp", + "shimizu.shizuoka.jp", + "shimoda.shizuoka.jp", + "shizuoka.shizuoka.jp", + "susono.shizuoka.jp", + "yaizu.shizuoka.jp", + "yoshida.shizuoka.jp", + "ashikaga.tochigi.jp", + "bato.tochigi.jp", + "haga.tochigi.jp", + "ichikai.tochigi.jp", + "iwafune.tochigi.jp", + "kaminokawa.tochigi.jp", + "kanuma.tochigi.jp", + "karasuyama.tochigi.jp", + "kuroiso.tochigi.jp", + "mashiko.tochigi.jp", + "mibu.tochigi.jp", + "moka.tochigi.jp", + "motegi.tochigi.jp", + "nasu.tochigi.jp", + "nasushiobara.tochigi.jp", + "nikko.tochigi.jp", + "nishikata.tochigi.jp", + "nogi.tochigi.jp", + "ohira.tochigi.jp", + "ohtawara.tochigi.jp", + "oyama.tochigi.jp", + "sakura.tochigi.jp", + "sano.tochigi.jp", + "shimotsuke.tochigi.jp", + "shioya.tochigi.jp", + "takanezawa.tochigi.jp", + "tochigi.tochigi.jp", + "tsuga.tochigi.jp", + "ujiie.tochigi.jp", + "utsunomiya.tochigi.jp", + "yaita.tochigi.jp", + "aizumi.tokushima.jp", + "anan.tokushima.jp", + "ichiba.tokushima.jp", + "itano.tokushima.jp", + "kainan.tokushima.jp", + "komatsushima.tokushima.jp", + "matsushige.tokushima.jp", + "mima.tokushima.jp", + "minami.tokushima.jp", + "miyoshi.tokushima.jp", + "mugi.tokushima.jp", + "nakagawa.tokushima.jp", + "naruto.tokushima.jp", + "sanagochi.tokushima.jp", + "shishikui.tokushima.jp", + "tokushima.tokushima.jp", + "wajiki.tokushima.jp", + "adachi.tokyo.jp", + "akiruno.tokyo.jp", + "akishima.tokyo.jp", + "aogashima.tokyo.jp", + "arakawa.tokyo.jp", + "bunkyo.tokyo.jp", + "chiyoda.tokyo.jp", + "chofu.tokyo.jp", + "chuo.tokyo.jp", + "edogawa.tokyo.jp", + "fuchu.tokyo.jp", + "fussa.tokyo.jp", + "hachijo.tokyo.jp", + "hachioji.tokyo.jp", + "hamura.tokyo.jp", + "higashikurume.tokyo.jp", + "higashimurayama.tokyo.jp", + "higashiyamato.tokyo.jp", + "hino.tokyo.jp", + "hinode.tokyo.jp", + "hinohara.tokyo.jp", + "inagi.tokyo.jp", + "itabashi.tokyo.jp", + "katsushika.tokyo.jp", + "kita.tokyo.jp", + "kiyose.tokyo.jp", + "kodaira.tokyo.jp", + "koganei.tokyo.jp", + "kokubunji.tokyo.jp", + "komae.tokyo.jp", + "koto.tokyo.jp", + "kouzushima.tokyo.jp", + "kunitachi.tokyo.jp", + "machida.tokyo.jp", + "meguro.tokyo.jp", + "minato.tokyo.jp", + "mitaka.tokyo.jp", + "mizuho.tokyo.jp", + "musashimurayama.tokyo.jp", + "musashino.tokyo.jp", + "nakano.tokyo.jp", + "nerima.tokyo.jp", + "ogasawara.tokyo.jp", + "okutama.tokyo.jp", + "ome.tokyo.jp", + "oshima.tokyo.jp", + "ota.tokyo.jp", + "setagaya.tokyo.jp", + "shibuya.tokyo.jp", + "shinagawa.tokyo.jp", + "shinjuku.tokyo.jp", + "suginami.tokyo.jp", + "sumida.tokyo.jp", + "tachikawa.tokyo.jp", + "taito.tokyo.jp", + "tama.tokyo.jp", + "toshima.tokyo.jp", + "chizu.tottori.jp", + "hino.tottori.jp", + "kawahara.tottori.jp", + "koge.tottori.jp", + "kotoura.tottori.jp", + "misasa.tottori.jp", + "nanbu.tottori.jp", + "nichinan.tottori.jp", + "sakaiminato.tottori.jp", + "tottori.tottori.jp", + "wakasa.tottori.jp", + "yazu.tottori.jp", + "yonago.tottori.jp", + "asahi.toyama.jp", + "fuchu.toyama.jp", + "fukumitsu.toyama.jp", + "funahashi.toyama.jp", + "himi.toyama.jp", + "imizu.toyama.jp", + "inami.toyama.jp", + "johana.toyama.jp", + "kamiichi.toyama.jp", + "kurobe.toyama.jp", + "nakaniikawa.toyama.jp", + "namerikawa.toyama.jp", + "nanto.toyama.jp", + "nyuzen.toyama.jp", + "oyabe.toyama.jp", + "taira.toyama.jp", + "takaoka.toyama.jp", + "tateyama.toyama.jp", + "toga.toyama.jp", + "tonami.toyama.jp", + "toyama.toyama.jp", + "unazuki.toyama.jp", + "uozu.toyama.jp", + "yamada.toyama.jp", + "arida.wakayama.jp", + "aridagawa.wakayama.jp", + "gobo.wakayama.jp", + "hashimoto.wakayama.jp", + "hidaka.wakayama.jp", + "hirogawa.wakayama.jp", + "inami.wakayama.jp", + "iwade.wakayama.jp", + "kainan.wakayama.jp", + "kamitonda.wakayama.jp", + "katsuragi.wakayama.jp", + "kimino.wakayama.jp", + "kinokawa.wakayama.jp", + "kitayama.wakayama.jp", + "koya.wakayama.jp", + "koza.wakayama.jp", + "kozagawa.wakayama.jp", + "kudoyama.wakayama.jp", + "kushimoto.wakayama.jp", + "mihama.wakayama.jp", + "misato.wakayama.jp", + "nachikatsuura.wakayama.jp", + "shingu.wakayama.jp", + "shirahama.wakayama.jp", + "taiji.wakayama.jp", + "tanabe.wakayama.jp", + "wakayama.wakayama.jp", + "yuasa.wakayama.jp", + "yura.wakayama.jp", + "asahi.yamagata.jp", + "funagata.yamagata.jp", + "higashine.yamagata.jp", + "iide.yamagata.jp", + "kahoku.yamagata.jp", + "kaminoyama.yamagata.jp", + "kaneyama.yamagata.jp", + "kawanishi.yamagata.jp", + "mamurogawa.yamagata.jp", + "mikawa.yamagata.jp", + "murayama.yamagata.jp", + "nagai.yamagata.jp", + "nakayama.yamagata.jp", + "nanyo.yamagata.jp", + "nishikawa.yamagata.jp", + "obanazawa.yamagata.jp", + "oe.yamagata.jp", + "oguni.yamagata.jp", + "ohkura.yamagata.jp", + "oishida.yamagata.jp", + "sagae.yamagata.jp", + "sakata.yamagata.jp", + "sakegawa.yamagata.jp", + "shinjo.yamagata.jp", + "shirataka.yamagata.jp", + "shonai.yamagata.jp", + "takahata.yamagata.jp", + "tendo.yamagata.jp", + "tozawa.yamagata.jp", + "tsuruoka.yamagata.jp", + "yamagata.yamagata.jp", + "yamanobe.yamagata.jp", + "yonezawa.yamagata.jp", + "yuza.yamagata.jp", + "abu.yamaguchi.jp", + "hagi.yamaguchi.jp", + "hikari.yamaguchi.jp", + "hofu.yamaguchi.jp", + "iwakuni.yamaguchi.jp", + "kudamatsu.yamaguchi.jp", + "mitou.yamaguchi.jp", + "nagato.yamaguchi.jp", + "oshima.yamaguchi.jp", + "shimonoseki.yamaguchi.jp", + "shunan.yamaguchi.jp", + "tabuse.yamaguchi.jp", + "tokuyama.yamaguchi.jp", + "toyota.yamaguchi.jp", + "ube.yamaguchi.jp", + "yuu.yamaguchi.jp", + "chuo.yamanashi.jp", + "doshi.yamanashi.jp", + "fuefuki.yamanashi.jp", + "fujikawa.yamanashi.jp", + "fujikawaguchiko.yamanashi.jp", + "fujiyoshida.yamanashi.jp", + "hayakawa.yamanashi.jp", + "hokuto.yamanashi.jp", + "ichikawamisato.yamanashi.jp", + "kai.yamanashi.jp", + "kofu.yamanashi.jp", + "koshu.yamanashi.jp", + "kosuge.yamanashi.jp", + "minami-alps.yamanashi.jp", + "minobu.yamanashi.jp", + "nakamichi.yamanashi.jp", + "nanbu.yamanashi.jp", + "narusawa.yamanashi.jp", + "nirasaki.yamanashi.jp", + "nishikatsura.yamanashi.jp", + "oshino.yamanashi.jp", + "otsuki.yamanashi.jp", + "showa.yamanashi.jp", + "tabayama.yamanashi.jp", + "tsuru.yamanashi.jp", + "uenohara.yamanashi.jp", + "yamanakako.yamanashi.jp", + "yamanashi.yamanashi.jp", + "ke", + "ac.ke", + "co.ke", + "go.ke", + "info.ke", + "me.ke", + "mobi.ke", + "ne.ke", + "or.ke", + "sc.ke", + "kg", + "org.kg", + "net.kg", + "com.kg", + "edu.kg", + "gov.kg", + "mil.kg", + "*.kh", + "ki", + "edu.ki", + "biz.ki", + "net.ki", + "org.ki", + "gov.ki", + "info.ki", + "com.ki", + "km", + "org.km", + "nom.km", + "gov.km", + "prd.km", + "tm.km", + "edu.km", + "mil.km", + "ass.km", + "com.km", + "coop.km", + "asso.km", + "presse.km", + "medecin.km", + "notaires.km", + "pharmaciens.km", + "veterinaire.km", + "gouv.km", + "kn", + "net.kn", + "org.kn", + "edu.kn", + "gov.kn", + "kp", + "com.kp", + "edu.kp", + "gov.kp", + "org.kp", + "rep.kp", + "tra.kp", + "kr", + "ac.kr", + "co.kr", + "es.kr", + "go.kr", + "hs.kr", + "kg.kr", + "mil.kr", + "ms.kr", + "ne.kr", + "or.kr", + "pe.kr", + "re.kr", + "sc.kr", + "busan.kr", + "chungbuk.kr", + "chungnam.kr", + "daegu.kr", + "daejeon.kr", + "gangwon.kr", + "gwangju.kr", + "gyeongbuk.kr", + "gyeonggi.kr", + "gyeongnam.kr", + "incheon.kr", + "jeju.kr", + "jeonbuk.kr", + "jeonnam.kr", + "seoul.kr", + "ulsan.kr", + "*.kw", + "ky", + "edu.ky", + "gov.ky", + "com.ky", + "org.ky", + "net.ky", + "kz", + "org.kz", + "edu.kz", + "net.kz", + "gov.kz", + "mil.kz", + "com.kz", + "la", + "int.la", + "net.la", + "info.la", + "edu.la", + "gov.la", + "per.la", + "com.la", + "org.la", + "lb", + "com.lb", + "edu.lb", + "gov.lb", + "net.lb", + "org.lb", + "lc", + "com.lc", + "net.lc", + "co.lc", + "org.lc", + "edu.lc", + "gov.lc", + "li", + "lk", + "gov.lk", + "sch.lk", + "net.lk", + "int.lk", + "com.lk", + "org.lk", + "edu.lk", + "ngo.lk", + "soc.lk", + "web.lk", + "ltd.lk", + "assn.lk", + "grp.lk", + "hotel.lk", + "ac.lk", + "lr", + "com.lr", + "edu.lr", + "gov.lr", + "org.lr", + "net.lr", + "ls", + "co.ls", + "org.ls", + "lt", + "gov.lt", + "lu", + "lv", + "com.lv", + "edu.lv", + "gov.lv", + "org.lv", + "mil.lv", + "id.lv", + "net.lv", + "asn.lv", + "conf.lv", + "ly", + "com.ly", + "net.ly", + "gov.ly", + "plc.ly", + "edu.ly", + "sch.ly", + "med.ly", + "org.ly", + "id.ly", + "ma", + "co.ma", + "net.ma", + "gov.ma", + "org.ma", + "ac.ma", + "press.ma", + "mc", + "tm.mc", + "asso.mc", + "md", + "me", + "co.me", + "net.me", + "org.me", + "edu.me", + "ac.me", + "gov.me", + "its.me", + "priv.me", + "mg", + "org.mg", + "nom.mg", + "gov.mg", + "prd.mg", + "tm.mg", + "edu.mg", + "mil.mg", + "com.mg", + "co.mg", + "mh", + "mil", + "mk", + "com.mk", + "org.mk", + "net.mk", + "edu.mk", + "gov.mk", + "inf.mk", + "name.mk", + "ml", + "com.ml", + "edu.ml", + "gouv.ml", + "gov.ml", + "net.ml", + "org.ml", + "presse.ml", + "*.mm", + "mn", + "gov.mn", + "edu.mn", + "org.mn", + "mo", + "com.mo", + "net.mo", + "org.mo", + "edu.mo", + "gov.mo", + "mobi", + "mp", + "mq", + "mr", + "gov.mr", + "ms", + "com.ms", + "edu.ms", + "gov.ms", + "net.ms", + "org.ms", + "mt", + "com.mt", + "edu.mt", + "net.mt", + "org.mt", + "mu", + "com.mu", + "net.mu", + "org.mu", + "gov.mu", + "ac.mu", + "co.mu", + "or.mu", + "museum", + "academy.museum", + "agriculture.museum", + "air.museum", + "airguard.museum", + "alabama.museum", + "alaska.museum", + "amber.museum", + "ambulance.museum", + "american.museum", + "americana.museum", + "americanantiques.museum", + "americanart.museum", + "amsterdam.museum", + "and.museum", + "annefrank.museum", + "anthro.museum", + "anthropology.museum", + "antiques.museum", + "aquarium.museum", + "arboretum.museum", + "archaeological.museum", + "archaeology.museum", + "architecture.museum", + "art.museum", + "artanddesign.museum", + "artcenter.museum", + "artdeco.museum", + "arteducation.museum", + "artgallery.museum", + "arts.museum", + "artsandcrafts.museum", + "asmatart.museum", + "assassination.museum", + "assisi.museum", + "association.museum", + "astronomy.museum", + "atlanta.museum", + "austin.museum", + "australia.museum", + "automotive.museum", + "aviation.museum", + "axis.museum", + "badajoz.museum", + "baghdad.museum", + "bahn.museum", + "bale.museum", + "baltimore.museum", + "barcelona.museum", + "baseball.museum", + "basel.museum", + "baths.museum", + "bauern.museum", + "beauxarts.museum", + "beeldengeluid.museum", + "bellevue.museum", + "bergbau.museum", + "berkeley.museum", + "berlin.museum", + "bern.museum", + "bible.museum", + "bilbao.museum", + "bill.museum", + "birdart.museum", + "birthplace.museum", + "bonn.museum", + "boston.museum", + "botanical.museum", + "botanicalgarden.museum", + "botanicgarden.museum", + "botany.museum", + "brandywinevalley.museum", + "brasil.museum", + "bristol.museum", + "british.museum", + "britishcolumbia.museum", + "broadcast.museum", + "brunel.museum", + "brussel.museum", + "brussels.museum", + "bruxelles.museum", + "building.museum", + "burghof.museum", + "bus.museum", + "bushey.museum", + "cadaques.museum", + "california.museum", + "cambridge.museum", + "can.museum", + "canada.museum", + "capebreton.museum", + "carrier.museum", + "cartoonart.museum", + "casadelamoneda.museum", + "castle.museum", + "castres.museum", + "celtic.museum", + "center.museum", + "chattanooga.museum", + "cheltenham.museum", + "chesapeakebay.museum", + "chicago.museum", + "children.museum", + "childrens.museum", + "childrensgarden.museum", + "chiropractic.museum", + "chocolate.museum", + "christiansburg.museum", + "cincinnati.museum", + "cinema.museum", + "circus.museum", + "civilisation.museum", + "civilization.museum", + "civilwar.museum", + "clinton.museum", + "clock.museum", + "coal.museum", + "coastaldefence.museum", + "cody.museum", + "coldwar.museum", + "collection.museum", + "colonialwilliamsburg.museum", + "coloradoplateau.museum", + "columbia.museum", + "columbus.museum", + "communication.museum", + "communications.museum", + "community.museum", + "computer.museum", + "computerhistory.museum", + "xn--comunicaes-v6a2o.museum", + "contemporary.museum", + "contemporaryart.museum", + "convent.museum", + "copenhagen.museum", + "corporation.museum", + "xn--correios-e-telecomunicaes-ghc29a.museum", + "corvette.museum", + "costume.museum", + "countryestate.museum", + "county.museum", + "crafts.museum", + "cranbrook.museum", + "creation.museum", + "cultural.museum", + "culturalcenter.museum", + "culture.museum", + "cyber.museum", + "cymru.museum", + "dali.museum", + "dallas.museum", + "database.museum", + "ddr.museum", + "decorativearts.museum", + "delaware.museum", + "delmenhorst.museum", + "denmark.museum", + "depot.museum", + "design.museum", + "detroit.museum", + "dinosaur.museum", + "discovery.museum", + "dolls.museum", + "donostia.museum", + "durham.museum", + "eastafrica.museum", + "eastcoast.museum", + "education.museum", + "educational.museum", + "egyptian.museum", + "eisenbahn.museum", + "elburg.museum", + "elvendrell.museum", + "embroidery.museum", + "encyclopedic.museum", + "england.museum", + "entomology.museum", + "environment.museum", + "environmentalconservation.museum", + "epilepsy.museum", + "essex.museum", + "estate.museum", + "ethnology.museum", + "exeter.museum", + "exhibition.museum", + "family.museum", + "farm.museum", + "farmequipment.museum", + "farmers.museum", + "farmstead.museum", + "field.museum", + "figueres.museum", + "filatelia.museum", + "film.museum", + "fineart.museum", + "finearts.museum", + "finland.museum", + "flanders.museum", + "florida.museum", + "force.museum", + "fortmissoula.museum", + "fortworth.museum", + "foundation.museum", + "francaise.museum", + "frankfurt.museum", + "franziskaner.museum", + "freemasonry.museum", + "freiburg.museum", + "fribourg.museum", + "frog.museum", + "fundacio.museum", + "furniture.museum", + "gallery.museum", + "garden.museum", + "gateway.museum", + "geelvinck.museum", + "gemological.museum", + "geology.museum", + "georgia.museum", + "giessen.museum", + "glas.museum", + "glass.museum", + "gorge.museum", + "grandrapids.museum", + "graz.museum", + "guernsey.museum", + "halloffame.museum", + "hamburg.museum", + "handson.museum", + "harvestcelebration.museum", + "hawaii.museum", + "health.museum", + "heimatunduhren.museum", + "hellas.museum", + "helsinki.museum", + "hembygdsforbund.museum", + "heritage.museum", + "histoire.museum", + "historical.museum", + "historicalsociety.museum", + "historichouses.museum", + "historisch.museum", + "historisches.museum", + "history.museum", + "historyofscience.museum", + "horology.museum", + "house.museum", + "humanities.museum", + "illustration.museum", + "imageandsound.museum", + "indian.museum", + "indiana.museum", + "indianapolis.museum", + "indianmarket.museum", + "intelligence.museum", + "interactive.museum", + "iraq.museum", + "iron.museum", + "isleofman.museum", + "jamison.museum", + "jefferson.museum", + "jerusalem.museum", + "jewelry.museum", + "jewish.museum", + "jewishart.museum", + "jfk.museum", + "journalism.museum", + "judaica.museum", + "judygarland.museum", + "juedisches.museum", + "juif.museum", + "karate.museum", + "karikatur.museum", + "kids.museum", + "koebenhavn.museum", + "koeln.museum", + "kunst.museum", + "kunstsammlung.museum", + "kunstunddesign.museum", + "labor.museum", + "labour.museum", + "lajolla.museum", + "lancashire.museum", + "landes.museum", + "lans.museum", + "xn--lns-qla.museum", + "larsson.museum", + "lewismiller.museum", + "lincoln.museum", + "linz.museum", + "living.museum", + "livinghistory.museum", + "localhistory.museum", + "london.museum", + "losangeles.museum", + "louvre.museum", + "loyalist.museum", + "lucerne.museum", + "luxembourg.museum", + "luzern.museum", + "mad.museum", + "madrid.museum", + "mallorca.museum", + "manchester.museum", + "mansion.museum", + "mansions.museum", + "manx.museum", + "marburg.museum", + "maritime.museum", + "maritimo.museum", + "maryland.museum", + "marylhurst.museum", + "media.museum", + "medical.museum", + "medizinhistorisches.museum", + "meeres.museum", + "memorial.museum", + "mesaverde.museum", + "michigan.museum", + "midatlantic.museum", + "military.museum", + "mill.museum", + "miners.museum", + "mining.museum", + "minnesota.museum", + "missile.museum", + "missoula.museum", + "modern.museum", + "moma.museum", + "money.museum", + "monmouth.museum", + "monticello.museum", + "montreal.museum", + "moscow.museum", + "motorcycle.museum", + "muenchen.museum", + "muenster.museum", + "mulhouse.museum", + "muncie.museum", + "museet.museum", + "museumcenter.museum", + "museumvereniging.museum", + "music.museum", + "national.museum", + "nationalfirearms.museum", + "nationalheritage.museum", + "nativeamerican.museum", + "naturalhistory.museum", + "naturalhistorymuseum.museum", + "naturalsciences.museum", + "nature.museum", + "naturhistorisches.museum", + "natuurwetenschappen.museum", + "naumburg.museum", + "naval.museum", + "nebraska.museum", + "neues.museum", + "newhampshire.museum", + "newjersey.museum", + "newmexico.museum", + "newport.museum", + "newspaper.museum", + "newyork.museum", + "niepce.museum", + "norfolk.museum", + "north.museum", + "nrw.museum", + "nuernberg.museum", + "nuremberg.museum", + "nyc.museum", + "nyny.museum", + "oceanographic.museum", + "oceanographique.museum", + "omaha.museum", + "online.museum", + "ontario.museum", + "openair.museum", + "oregon.museum", + "oregontrail.museum", + "otago.museum", + "oxford.museum", + "pacific.museum", + "paderborn.museum", + "palace.museum", + "paleo.museum", + "palmsprings.museum", + "panama.museum", + "paris.museum", + "pasadena.museum", + "pharmacy.museum", + "philadelphia.museum", + "philadelphiaarea.museum", + "philately.museum", + "phoenix.museum", + "photography.museum", + "pilots.museum", + "pittsburgh.museum", + "planetarium.museum", + "plantation.museum", + "plants.museum", + "plaza.museum", + "portal.museum", + "portland.museum", + "portlligat.museum", + "posts-and-telecommunications.museum", + "preservation.museum", + "presidio.museum", + "press.museum", + "project.museum", + "public.museum", + "pubol.museum", + "quebec.museum", + "railroad.museum", + "railway.museum", + "research.museum", + "resistance.museum", + "riodejaneiro.museum", + "rochester.museum", + "rockart.museum", + "roma.museum", + "russia.museum", + "saintlouis.museum", + "salem.museum", + "salvadordali.museum", + "salzburg.museum", + "sandiego.museum", + "sanfrancisco.museum", + "santabarbara.museum", + "santacruz.museum", + "santafe.museum", + "saskatchewan.museum", + "satx.museum", + "savannahga.museum", + "schlesisches.museum", + "schoenbrunn.museum", + "schokoladen.museum", + "school.museum", + "schweiz.museum", + "science.museum", + "scienceandhistory.museum", + "scienceandindustry.museum", + "sciencecenter.museum", + "sciencecenters.museum", + "science-fiction.museum", + "sciencehistory.museum", + "sciences.museum", + "sciencesnaturelles.museum", + "scotland.museum", + "seaport.museum", + "settlement.museum", + "settlers.museum", + "shell.museum", + "sherbrooke.museum", + "sibenik.museum", + "silk.museum", + "ski.museum", + "skole.museum", + "society.museum", + "sologne.museum", + "soundandvision.museum", + "southcarolina.museum", + "southwest.museum", + "space.museum", + "spy.museum", + "square.museum", + "stadt.museum", + "stalbans.museum", + "starnberg.museum", + "state.museum", + "stateofdelaware.museum", + "station.museum", + "steam.museum", + "steiermark.museum", + "stjohn.museum", + "stockholm.museum", + "stpetersburg.museum", + "stuttgart.museum", + "suisse.museum", + "surgeonshall.museum", + "surrey.museum", + "svizzera.museum", + "sweden.museum", + "sydney.museum", + "tank.museum", + "tcm.museum", + "technology.museum", + "telekommunikation.museum", + "television.museum", + "texas.museum", + "textile.museum", + "theater.museum", + "time.museum", + "timekeeping.museum", + "topology.museum", + "torino.museum", + "touch.museum", + "town.museum", + "transport.museum", + "tree.museum", + "trolley.museum", + "trust.museum", + "trustee.museum", + "uhren.museum", + "ulm.museum", + "undersea.museum", + "university.museum", + "usa.museum", + "usantiques.museum", + "usarts.museum", + "uscountryestate.museum", + "usculture.museum", + "usdecorativearts.museum", + "usgarden.museum", + "ushistory.museum", + "ushuaia.museum", + "uslivinghistory.museum", + "utah.museum", + "uvic.museum", + "valley.museum", + "vantaa.museum", + "versailles.museum", + "viking.museum", + "village.museum", + "virginia.museum", + "virtual.museum", + "virtuel.museum", + "vlaanderen.museum", + "volkenkunde.museum", + "wales.museum", + "wallonie.museum", + "war.museum", + "washingtondc.museum", + "watchandclock.museum", + "watch-and-clock.museum", + "western.museum", + "westfalen.museum", + "whaling.museum", + "wildlife.museum", + "williamsburg.museum", + "windmill.museum", + "workshop.museum", + "york.museum", + "yorkshire.museum", + "yosemite.museum", + "youth.museum", + "zoological.museum", + "zoology.museum", + "xn--9dbhblg6di.museum", + "xn--h1aegh.museum", + "mv", + "aero.mv", + "biz.mv", + "com.mv", + "coop.mv", + "edu.mv", + "gov.mv", + "info.mv", + "int.mv", + "mil.mv", + "museum.mv", + "name.mv", + "net.mv", + "org.mv", + "pro.mv", + "mw", + "ac.mw", + "biz.mw", + "co.mw", + "com.mw", + "coop.mw", + "edu.mw", + "gov.mw", + "int.mw", + "museum.mw", + "net.mw", + "org.mw", + "mx", + "com.mx", + "org.mx", + "gob.mx", + "edu.mx", + "net.mx", + "my", + "com.my", + "net.my", + "org.my", + "gov.my", + "edu.my", + "mil.my", + "name.my", + "mz", + "ac.mz", + "adv.mz", + "co.mz", + "edu.mz", + "gov.mz", + "mil.mz", + "net.mz", + "org.mz", + "na", + "info.na", + "pro.na", + "name.na", + "school.na", + "or.na", + "dr.na", + "us.na", + "mx.na", + "ca.na", + "in.na", + "cc.na", + "tv.na", + "ws.na", + "mobi.na", + "co.na", + "com.na", + "org.na", + "name", + "nc", + "asso.nc", + "nom.nc", + "ne", + "net", + "nf", + "com.nf", + "net.nf", + "per.nf", + "rec.nf", + "web.nf", + "arts.nf", + "firm.nf", + "info.nf", + "other.nf", + "store.nf", + "ng", + "com.ng", + "edu.ng", + "gov.ng", + "i.ng", + "mil.ng", + "mobi.ng", + "name.ng", + "net.ng", + "org.ng", + "sch.ng", + "ni", + "ac.ni", + "biz.ni", + "co.ni", + "com.ni", + "edu.ni", + "gob.ni", + "in.ni", + "info.ni", + "int.ni", + "mil.ni", + "net.ni", + "nom.ni", + "org.ni", + "web.ni", + "nl", + "bv.nl", + "no", + "fhs.no", + "vgs.no", + "fylkesbibl.no", + "folkebibl.no", + "museum.no", + "idrett.no", + "priv.no", + "mil.no", + "stat.no", + "dep.no", + "kommune.no", + "herad.no", + "aa.no", + "ah.no", + "bu.no", + "fm.no", + "hl.no", + "hm.no", + "jan-mayen.no", + "mr.no", + "nl.no", + "nt.no", + "of.no", + "ol.no", + "oslo.no", + "rl.no", + "sf.no", + "st.no", + "svalbard.no", + "tm.no", + "tr.no", + "va.no", + "vf.no", + "gs.aa.no", + "gs.ah.no", + "gs.bu.no", + "gs.fm.no", + "gs.hl.no", + "gs.hm.no", + "gs.jan-mayen.no", + "gs.mr.no", + "gs.nl.no", + "gs.nt.no", + "gs.of.no", + "gs.ol.no", + "gs.oslo.no", + "gs.rl.no", + "gs.sf.no", + "gs.st.no", + "gs.svalbard.no", + "gs.tm.no", + "gs.tr.no", + "gs.va.no", + "gs.vf.no", + "akrehamn.no", + "xn--krehamn-dxa.no", + "algard.no", + "xn--lgrd-poac.no", + "arna.no", + "brumunddal.no", + "bryne.no", + "bronnoysund.no", + "xn--brnnysund-m8ac.no", + "drobak.no", + "xn--drbak-wua.no", + "egersund.no", + "fetsund.no", + "floro.no", + "xn--flor-jra.no", + "fredrikstad.no", + "hokksund.no", + "honefoss.no", + "xn--hnefoss-q1a.no", + "jessheim.no", + "jorpeland.no", + "xn--jrpeland-54a.no", + "kirkenes.no", + "kopervik.no", + "krokstadelva.no", + "langevag.no", + "xn--langevg-jxa.no", + "leirvik.no", + "mjondalen.no", + "xn--mjndalen-64a.no", + "mo-i-rana.no", + "mosjoen.no", + "xn--mosjen-eya.no", + "nesoddtangen.no", + "orkanger.no", + "osoyro.no", + "xn--osyro-wua.no", + "raholt.no", + "xn--rholt-mra.no", + "sandnessjoen.no", + "xn--sandnessjen-ogb.no", + "skedsmokorset.no", + "slattum.no", + "spjelkavik.no", + "stathelle.no", + "stavern.no", + "stjordalshalsen.no", + "xn--stjrdalshalsen-sqb.no", + "tananger.no", + "tranby.no", + "vossevangen.no", + "afjord.no", + "xn--fjord-lra.no", + "agdenes.no", + "al.no", + "xn--l-1fa.no", + "alesund.no", + "xn--lesund-hua.no", + "alstahaug.no", + "alta.no", + "xn--lt-liac.no", + "alaheadju.no", + "xn--laheadju-7ya.no", + "alvdal.no", + "amli.no", + "xn--mli-tla.no", + "amot.no", + "xn--mot-tla.no", + "andebu.no", + "andoy.no", + "xn--andy-ira.no", + "andasuolo.no", + "ardal.no", + "xn--rdal-poa.no", + "aremark.no", + "arendal.no", + "xn--s-1fa.no", + "aseral.no", + "xn--seral-lra.no", + "asker.no", + "askim.no", + "askvoll.no", + "askoy.no", + "xn--asky-ira.no", + "asnes.no", + "xn--snes-poa.no", + "audnedaln.no", + "aukra.no", + "aure.no", + "aurland.no", + "aurskog-holand.no", + "xn--aurskog-hland-jnb.no", + "austevoll.no", + "austrheim.no", + "averoy.no", + "xn--avery-yua.no", + "balestrand.no", + "ballangen.no", + "balat.no", + "xn--blt-elab.no", + "balsfjord.no", + "bahccavuotna.no", + "xn--bhccavuotna-k7a.no", + "bamble.no", + "bardu.no", + "beardu.no", + "beiarn.no", + "bajddar.no", + "xn--bjddar-pta.no", + "baidar.no", + "xn--bidr-5nac.no", + "berg.no", + "bergen.no", + "berlevag.no", + "xn--berlevg-jxa.no", + "bearalvahki.no", + "xn--bearalvhki-y4a.no", + "bindal.no", + "birkenes.no", + "bjarkoy.no", + "xn--bjarky-fya.no", + "bjerkreim.no", + "bjugn.no", + "bodo.no", + "xn--bod-2na.no", + "badaddja.no", + "xn--bdddj-mrabd.no", + "budejju.no", + "bokn.no", + "bremanger.no", + "bronnoy.no", + "xn--brnny-wuac.no", + "bygland.no", + "bykle.no", + "barum.no", + "xn--brum-voa.no", + "bo.telemark.no", + "xn--b-5ga.telemark.no", + "bo.nordland.no", + "xn--b-5ga.nordland.no", + "bievat.no", + "xn--bievt-0qa.no", + "bomlo.no", + "xn--bmlo-gra.no", + "batsfjord.no", + "xn--btsfjord-9za.no", + "bahcavuotna.no", + "xn--bhcavuotna-s4a.no", + "dovre.no", + "drammen.no", + "drangedal.no", + "dyroy.no", + "xn--dyry-ira.no", + "donna.no", + "xn--dnna-gra.no", + "eid.no", + "eidfjord.no", + "eidsberg.no", + "eidskog.no", + "eidsvoll.no", + "eigersund.no", + "elverum.no", + "enebakk.no", + "engerdal.no", + "etne.no", + "etnedal.no", + "evenes.no", + "evenassi.no", + "xn--eveni-0qa01ga.no", + "evje-og-hornnes.no", + "farsund.no", + "fauske.no", + "fuossko.no", + "fuoisku.no", + "fedje.no", + "fet.no", + "finnoy.no", + "xn--finny-yua.no", + "fitjar.no", + "fjaler.no", + "fjell.no", + "flakstad.no", + "flatanger.no", + "flekkefjord.no", + "flesberg.no", + "flora.no", + "fla.no", + "xn--fl-zia.no", + "folldal.no", + "forsand.no", + "fosnes.no", + "frei.no", + "frogn.no", + "froland.no", + "frosta.no", + "frana.no", + "xn--frna-woa.no", + "froya.no", + "xn--frya-hra.no", + "fusa.no", + "fyresdal.no", + "forde.no", + "xn--frde-gra.no", + "gamvik.no", + "gangaviika.no", + "xn--ggaviika-8ya47h.no", + "gaular.no", + "gausdal.no", + "gildeskal.no", + "xn--gildeskl-g0a.no", + "giske.no", + "gjemnes.no", + "gjerdrum.no", + "gjerstad.no", + "gjesdal.no", + "gjovik.no", + "xn--gjvik-wua.no", + "gloppen.no", + "gol.no", + "gran.no", + "grane.no", + "granvin.no", + "gratangen.no", + "grimstad.no", + "grong.no", + "kraanghke.no", + "xn--kranghke-b0a.no", + "grue.no", + "gulen.no", + "hadsel.no", + "halden.no", + "halsa.no", + "hamar.no", + "hamaroy.no", + "habmer.no", + "xn--hbmer-xqa.no", + "hapmir.no", + "xn--hpmir-xqa.no", + "hammerfest.no", + "hammarfeasta.no", + "xn--hmmrfeasta-s4ac.no", + "haram.no", + "hareid.no", + "harstad.no", + "hasvik.no", + "aknoluokta.no", + "xn--koluokta-7ya57h.no", + "hattfjelldal.no", + "aarborte.no", + "haugesund.no", + "hemne.no", + "hemnes.no", + "hemsedal.no", + "heroy.more-og-romsdal.no", + "xn--hery-ira.xn--mre-og-romsdal-qqb.no", + "heroy.nordland.no", + "xn--hery-ira.nordland.no", + "hitra.no", + "hjartdal.no", + "hjelmeland.no", + "hobol.no", + "xn--hobl-ira.no", + "hof.no", + "hol.no", + "hole.no", + "holmestrand.no", + "holtalen.no", + "xn--holtlen-hxa.no", + "hornindal.no", + "horten.no", + "hurdal.no", + "hurum.no", + "hvaler.no", + "hyllestad.no", + "hagebostad.no", + "xn--hgebostad-g3a.no", + "hoyanger.no", + "xn--hyanger-q1a.no", + "hoylandet.no", + "xn--hylandet-54a.no", + "ha.no", + "xn--h-2fa.no", + "ibestad.no", + "inderoy.no", + "xn--indery-fya.no", + "iveland.no", + "jevnaker.no", + "jondal.no", + "jolster.no", + "xn--jlster-bya.no", + "karasjok.no", + "karasjohka.no", + "xn--krjohka-hwab49j.no", + "karlsoy.no", + "galsa.no", + "xn--gls-elac.no", + "karmoy.no", + "xn--karmy-yua.no", + "kautokeino.no", + "guovdageaidnu.no", + "klepp.no", + "klabu.no", + "xn--klbu-woa.no", + "kongsberg.no", + "kongsvinger.no", + "kragero.no", + "xn--krager-gya.no", + "kristiansand.no", + "kristiansund.no", + "krodsherad.no", + "xn--krdsherad-m8a.no", + "kvalsund.no", + "rahkkeravju.no", + "xn--rhkkervju-01af.no", + "kvam.no", + "kvinesdal.no", + "kvinnherad.no", + "kviteseid.no", + "kvitsoy.no", + "xn--kvitsy-fya.no", + "kvafjord.no", + "xn--kvfjord-nxa.no", + "giehtavuoatna.no", + "kvanangen.no", + "xn--kvnangen-k0a.no", + "navuotna.no", + "xn--nvuotna-hwa.no", + "kafjord.no", + "xn--kfjord-iua.no", + "gaivuotna.no", + "xn--givuotna-8ya.no", + "larvik.no", + "lavangen.no", + "lavagis.no", + "loabat.no", + "xn--loabt-0qa.no", + "lebesby.no", + "davvesiida.no", + "leikanger.no", + "leirfjord.no", + "leka.no", + "leksvik.no", + "lenvik.no", + "leangaviika.no", + "xn--leagaviika-52b.no", + "lesja.no", + "levanger.no", + "lier.no", + "lierne.no", + "lillehammer.no", + "lillesand.no", + "lindesnes.no", + "lindas.no", + "xn--linds-pra.no", + "lom.no", + "loppa.no", + "lahppi.no", + "xn--lhppi-xqa.no", + "lund.no", + "lunner.no", + "luroy.no", + "xn--lury-ira.no", + "luster.no", + "lyngdal.no", + "lyngen.no", + "ivgu.no", + "lardal.no", + "lerdal.no", + "xn--lrdal-sra.no", + "lodingen.no", + "xn--ldingen-q1a.no", + "lorenskog.no", + "xn--lrenskog-54a.no", + "loten.no", + "xn--lten-gra.no", + "malvik.no", + "masoy.no", + "xn--msy-ula0h.no", + "muosat.no", + "xn--muost-0qa.no", + "mandal.no", + "marker.no", + "marnardal.no", + "masfjorden.no", + "meland.no", + "meldal.no", + "melhus.no", + "meloy.no", + "xn--mely-ira.no", + "meraker.no", + "xn--merker-kua.no", + "moareke.no", + "xn--moreke-jua.no", + "midsund.no", + "midtre-gauldal.no", + "modalen.no", + "modum.no", + "molde.no", + "moskenes.no", + "moss.no", + "mosvik.no", + "malselv.no", + "xn--mlselv-iua.no", + "malatvuopmi.no", + "xn--mlatvuopmi-s4a.no", + "namdalseid.no", + "aejrie.no", + "namsos.no", + "namsskogan.no", + "naamesjevuemie.no", + "xn--nmesjevuemie-tcba.no", + "laakesvuemie.no", + "nannestad.no", + "narvik.no", + "narviika.no", + "naustdal.no", + "nedre-eiker.no", + "nes.akershus.no", + "nes.buskerud.no", + "nesna.no", + "nesodden.no", + "nesseby.no", + "unjarga.no", + "xn--unjrga-rta.no", + "nesset.no", + "nissedal.no", + "nittedal.no", + "nord-aurdal.no", + "nord-fron.no", + "nord-odal.no", + "norddal.no", + "nordkapp.no", + "davvenjarga.no", + "xn--davvenjrga-y4a.no", + "nordre-land.no", + "nordreisa.no", + "raisa.no", + "xn--risa-5na.no", + "nore-og-uvdal.no", + "notodden.no", + "naroy.no", + "xn--nry-yla5g.no", + "notteroy.no", + "xn--nttery-byae.no", + "odda.no", + "oksnes.no", + "xn--ksnes-uua.no", + "oppdal.no", + "oppegard.no", + "xn--oppegrd-ixa.no", + "orkdal.no", + "orland.no", + "xn--rland-uua.no", + "orskog.no", + "xn--rskog-uua.no", + "orsta.no", + "xn--rsta-fra.no", + "os.hedmark.no", + "os.hordaland.no", + "osen.no", + "osteroy.no", + "xn--ostery-fya.no", + "ostre-toten.no", + "xn--stre-toten-zcb.no", + "overhalla.no", + "ovre-eiker.no", + "xn--vre-eiker-k8a.no", + "oyer.no", + "xn--yer-zna.no", + "oygarden.no", + "xn--ygarden-p1a.no", + "oystre-slidre.no", + "xn--ystre-slidre-ujb.no", + "porsanger.no", + "porsangu.no", + "xn--porsgu-sta26f.no", + "porsgrunn.no", + "radoy.no", + "xn--rady-ira.no", + "rakkestad.no", + "rana.no", + "ruovat.no", + "randaberg.no", + "rauma.no", + "rendalen.no", + "rennebu.no", + "rennesoy.no", + "xn--rennesy-v1a.no", + "rindal.no", + "ringebu.no", + "ringerike.no", + "ringsaker.no", + "rissa.no", + "risor.no", + "xn--risr-ira.no", + "roan.no", + "rollag.no", + "rygge.no", + "ralingen.no", + "xn--rlingen-mxa.no", + "rodoy.no", + "xn--rdy-0nab.no", + "romskog.no", + "xn--rmskog-bya.no", + "roros.no", + "xn--rros-gra.no", + "rost.no", + "xn--rst-0na.no", + "royken.no", + "xn--ryken-vua.no", + "royrvik.no", + "xn--ryrvik-bya.no", + "rade.no", + "xn--rde-ula.no", + "salangen.no", + "siellak.no", + "saltdal.no", + "salat.no", + "xn--slt-elab.no", + "xn--slat-5na.no", + "samnanger.no", + "sande.more-og-romsdal.no", + "sande.xn--mre-og-romsdal-qqb.no", + "sande.vestfold.no", + "sandefjord.no", + "sandnes.no", + "sandoy.no", + "xn--sandy-yua.no", + "sarpsborg.no", + "sauda.no", + "sauherad.no", + "sel.no", + "selbu.no", + "selje.no", + "seljord.no", + "sigdal.no", + "siljan.no", + "sirdal.no", + "skaun.no", + "skedsmo.no", + "ski.no", + "skien.no", + "skiptvet.no", + "skjervoy.no", + "xn--skjervy-v1a.no", + "skierva.no", + "xn--skierv-uta.no", + "skjak.no", + "xn--skjk-soa.no", + "skodje.no", + "skanland.no", + "xn--sknland-fxa.no", + "skanit.no", + "xn--sknit-yqa.no", + "smola.no", + "xn--smla-hra.no", + "snillfjord.no", + "snasa.no", + "xn--snsa-roa.no", + "snoasa.no", + "snaase.no", + "xn--snase-nra.no", + "sogndal.no", + "sokndal.no", + "sola.no", + "solund.no", + "songdalen.no", + "sortland.no", + "spydeberg.no", + "stange.no", + "stavanger.no", + "steigen.no", + "steinkjer.no", + "stjordal.no", + "xn--stjrdal-s1a.no", + "stokke.no", + "stor-elvdal.no", + "stord.no", + "stordal.no", + "storfjord.no", + "omasvuotna.no", + "strand.no", + "stranda.no", + "stryn.no", + "sula.no", + "suldal.no", + "sund.no", + "sunndal.no", + "surnadal.no", + "sveio.no", + "svelvik.no", + "sykkylven.no", + "sogne.no", + "xn--sgne-gra.no", + "somna.no", + "xn--smna-gra.no", + "sondre-land.no", + "xn--sndre-land-0cb.no", + "sor-aurdal.no", + "xn--sr-aurdal-l8a.no", + "sor-fron.no", + "xn--sr-fron-q1a.no", + "sor-odal.no", + "xn--sr-odal-q1a.no", + "sor-varanger.no", + "xn--sr-varanger-ggb.no", + "matta-varjjat.no", + "xn--mtta-vrjjat-k7af.no", + "sorfold.no", + "xn--srfold-bya.no", + "sorreisa.no", + "xn--srreisa-q1a.no", + "sorum.no", + "xn--srum-gra.no", + "tana.no", + "deatnu.no", + "time.no", + "tingvoll.no", + "tinn.no", + "tjeldsund.no", + "dielddanuorri.no", + "tjome.no", + "xn--tjme-hra.no", + "tokke.no", + "tolga.no", + "torsken.no", + "tranoy.no", + "xn--trany-yua.no", + "tromso.no", + "xn--troms-zua.no", + "tromsa.no", + "romsa.no", + "trondheim.no", + "troandin.no", + "trysil.no", + "trana.no", + "xn--trna-woa.no", + "trogstad.no", + "xn--trgstad-r1a.no", + "tvedestrand.no", + "tydal.no", + "tynset.no", + "tysfjord.no", + "divtasvuodna.no", + "divttasvuotna.no", + "tysnes.no", + "tysvar.no", + "xn--tysvr-vra.no", + "tonsberg.no", + "xn--tnsberg-q1a.no", + "ullensaker.no", + "ullensvang.no", + "ulvik.no", + "utsira.no", + "vadso.no", + "xn--vads-jra.no", + "cahcesuolo.no", + "xn--hcesuolo-7ya35b.no", + "vaksdal.no", + "valle.no", + "vang.no", + "vanylven.no", + "vardo.no", + "xn--vard-jra.no", + "varggat.no", + "xn--vrggt-xqad.no", + "vefsn.no", + "vaapste.no", + "vega.no", + "vegarshei.no", + "xn--vegrshei-c0a.no", + "vennesla.no", + "verdal.no", + "verran.no", + "vestby.no", + "vestnes.no", + "vestre-slidre.no", + "vestre-toten.no", + "vestvagoy.no", + "xn--vestvgy-ixa6o.no", + "vevelstad.no", + "vik.no", + "vikna.no", + "vindafjord.no", + "volda.no", + "voss.no", + "varoy.no", + "xn--vry-yla5g.no", + "vagan.no", + "xn--vgan-qoa.no", + "voagat.no", + "vagsoy.no", + "xn--vgsy-qoa0j.no", + "vaga.no", + "xn--vg-yiab.no", + "valer.ostfold.no", + "xn--vler-qoa.xn--stfold-9xa.no", + "valer.hedmark.no", + "xn--vler-qoa.hedmark.no", + "*.np", + "nr", + "biz.nr", + "info.nr", + "gov.nr", + "edu.nr", + "org.nr", + "net.nr", + "com.nr", + "nu", + "nz", + "ac.nz", + "co.nz", + "cri.nz", + "geek.nz", + "gen.nz", + "govt.nz", + "health.nz", + "iwi.nz", + "kiwi.nz", + "maori.nz", + "mil.nz", + "xn--mori-qsa.nz", + "net.nz", + "org.nz", + "parliament.nz", + "school.nz", + "om", + "co.om", + "com.om", + "edu.om", + "gov.om", + "med.om", + "museum.om", + "net.om", + "org.om", + "pro.om", + "onion", + "org", + "pa", + "ac.pa", + "gob.pa", + "com.pa", + "org.pa", + "sld.pa", + "edu.pa", + "net.pa", + "ing.pa", + "abo.pa", + "med.pa", + "nom.pa", + "pe", + "edu.pe", + "gob.pe", + "nom.pe", + "mil.pe", + "org.pe", + "com.pe", + "net.pe", + "pf", + "com.pf", + "org.pf", + "edu.pf", + "*.pg", + "ph", + "com.ph", + "net.ph", + "org.ph", + "gov.ph", + "edu.ph", + "ngo.ph", + "mil.ph", + "i.ph", + "pk", + "com.pk", + "net.pk", + "edu.pk", + "org.pk", + "fam.pk", + "biz.pk", + "web.pk", + "gov.pk", + "gob.pk", + "gok.pk", + "gon.pk", + "gop.pk", + "gos.pk", + "info.pk", + "pl", + "com.pl", + "net.pl", + "org.pl", + "aid.pl", + "agro.pl", + "atm.pl", + "auto.pl", + "biz.pl", + "edu.pl", + "gmina.pl", + "gsm.pl", + "info.pl", + "mail.pl", + "miasta.pl", + "media.pl", + "mil.pl", + "nieruchomosci.pl", + "nom.pl", + "pc.pl", + "powiat.pl", + "priv.pl", + "realestate.pl", + "rel.pl", + "sex.pl", + "shop.pl", + "sklep.pl", + "sos.pl", + "szkola.pl", + "targi.pl", + "tm.pl", + "tourism.pl", + "travel.pl", + "turystyka.pl", + "gov.pl", + "ap.gov.pl", + "ic.gov.pl", + "is.gov.pl", + "us.gov.pl", + "kmpsp.gov.pl", + "kppsp.gov.pl", + "kwpsp.gov.pl", + "psp.gov.pl", + "wskr.gov.pl", + "kwp.gov.pl", + "mw.gov.pl", + "ug.gov.pl", + "um.gov.pl", + "umig.gov.pl", + "ugim.gov.pl", + "upow.gov.pl", + "uw.gov.pl", + "starostwo.gov.pl", + "pa.gov.pl", + "po.gov.pl", + "psse.gov.pl", + "pup.gov.pl", + "rzgw.gov.pl", + "sa.gov.pl", + "so.gov.pl", + "sr.gov.pl", + "wsa.gov.pl", + "sko.gov.pl", + "uzs.gov.pl", + "wiih.gov.pl", + "winb.gov.pl", + "pinb.gov.pl", + "wios.gov.pl", + "witd.gov.pl", + "wzmiuw.gov.pl", + "piw.gov.pl", + "wiw.gov.pl", + "griw.gov.pl", + "wif.gov.pl", + "oum.gov.pl", + "sdn.gov.pl", + "zp.gov.pl", + "uppo.gov.pl", + "mup.gov.pl", + "wuoz.gov.pl", + "konsulat.gov.pl", + "oirm.gov.pl", + "augustow.pl", + "babia-gora.pl", + "bedzin.pl", + "beskidy.pl", + "bialowieza.pl", + "bialystok.pl", + "bielawa.pl", + "bieszczady.pl", + "boleslawiec.pl", + "bydgoszcz.pl", + "bytom.pl", + "cieszyn.pl", + "czeladz.pl", + "czest.pl", + "dlugoleka.pl", + "elblag.pl", + "elk.pl", + "glogow.pl", + "gniezno.pl", + "gorlice.pl", + "grajewo.pl", + "ilawa.pl", + "jaworzno.pl", + "jelenia-gora.pl", + "jgora.pl", + "kalisz.pl", + "kazimierz-dolny.pl", + "karpacz.pl", + "kartuzy.pl", + "kaszuby.pl", + "katowice.pl", + "kepno.pl", + "ketrzyn.pl", + "klodzko.pl", + "kobierzyce.pl", + "kolobrzeg.pl", + "konin.pl", + "konskowola.pl", + "kutno.pl", + "lapy.pl", + "lebork.pl", + "legnica.pl", + "lezajsk.pl", + "limanowa.pl", + "lomza.pl", + "lowicz.pl", + "lubin.pl", + "lukow.pl", + "malbork.pl", + "malopolska.pl", + "mazowsze.pl", + "mazury.pl", + "mielec.pl", + "mielno.pl", + "mragowo.pl", + "naklo.pl", + "nowaruda.pl", + "nysa.pl", + "olawa.pl", + "olecko.pl", + "olkusz.pl", + "olsztyn.pl", + "opoczno.pl", + "opole.pl", + "ostroda.pl", + "ostroleka.pl", + "ostrowiec.pl", + "ostrowwlkp.pl", + "pila.pl", + "pisz.pl", + "podhale.pl", + "podlasie.pl", + "polkowice.pl", + "pomorze.pl", + "pomorskie.pl", + "prochowice.pl", + "pruszkow.pl", + "przeworsk.pl", + "pulawy.pl", + "radom.pl", + "rawa-maz.pl", + "rybnik.pl", + "rzeszow.pl", + "sanok.pl", + "sejny.pl", + "slask.pl", + "slupsk.pl", + "sosnowiec.pl", + "stalowa-wola.pl", + "skoczow.pl", + "starachowice.pl", + "stargard.pl", + "suwalki.pl", + "swidnica.pl", + "swiebodzin.pl", + "swinoujscie.pl", + "szczecin.pl", + "szczytno.pl", + "tarnobrzeg.pl", + "tgory.pl", + "turek.pl", + "tychy.pl", + "ustka.pl", + "walbrzych.pl", + "warmia.pl", + "warszawa.pl", + "waw.pl", + "wegrow.pl", + "wielun.pl", + "wlocl.pl", + "wloclawek.pl", + "wodzislaw.pl", + "wolomin.pl", + "wroclaw.pl", + "zachpomor.pl", + "zagan.pl", + "zarow.pl", + "zgora.pl", + "zgorzelec.pl", + "pm", + "pn", + "gov.pn", + "co.pn", + "org.pn", + "edu.pn", + "net.pn", + "post", + "pr", + "com.pr", + "net.pr", + "org.pr", + "gov.pr", + "edu.pr", + "isla.pr", + "pro.pr", + "biz.pr", + "info.pr", + "name.pr", + "est.pr", + "prof.pr", + "ac.pr", + "pro", + "aaa.pro", + "aca.pro", + "acct.pro", + "avocat.pro", + "bar.pro", + "cpa.pro", + "eng.pro", + "jur.pro", + "law.pro", + "med.pro", + "recht.pro", + "ps", + "edu.ps", + "gov.ps", + "sec.ps", + "plo.ps", + "com.ps", + "org.ps", + "net.ps", + "pt", + "net.pt", + "gov.pt", + "org.pt", + "edu.pt", + "int.pt", + "publ.pt", + "com.pt", + "nome.pt", + "pw", + "co.pw", + "ne.pw", + "or.pw", + "ed.pw", + "go.pw", + "belau.pw", + "py", + "com.py", + "coop.py", + "edu.py", + "gov.py", + "mil.py", + "net.py", + "org.py", + "qa", + "com.qa", + "edu.qa", + "gov.qa", + "mil.qa", + "name.qa", + "net.qa", + "org.qa", + "sch.qa", + "re", + "asso.re", + "com.re", + "nom.re", + "ro", + "arts.ro", + "com.ro", + "firm.ro", + "info.ro", + "nom.ro", + "nt.ro", + "org.ro", + "rec.ro", + "store.ro", + "tm.ro", + "www.ro", + "rs", + "ac.rs", + "co.rs", + "edu.rs", + "gov.rs", + "in.rs", + "org.rs", + "ru", + "ac.ru", + "edu.ru", + "gov.ru", + "int.ru", + "mil.ru", + "test.ru", + "rw", + "gov.rw", + "net.rw", + "edu.rw", + "ac.rw", + "com.rw", + "co.rw", + "int.rw", + "mil.rw", + "gouv.rw", + "sa", + "com.sa", + "net.sa", + "org.sa", + "gov.sa", + "med.sa", + "pub.sa", + "edu.sa", + "sch.sa", + "sb", + "com.sb", + "edu.sb", + "gov.sb", + "net.sb", + "org.sb", + "sc", + "com.sc", + "gov.sc", + "net.sc", + "org.sc", + "edu.sc", + "sd", + "com.sd", + "net.sd", + "org.sd", + "edu.sd", + "med.sd", + "tv.sd", + "gov.sd", + "info.sd", + "se", + "a.se", + "ac.se", + "b.se", + "bd.se", + "brand.se", + "c.se", + "d.se", + "e.se", + "f.se", + "fh.se", + "fhsk.se", + "fhv.se", + "g.se", + "h.se", + "i.se", + "k.se", + "komforb.se", + "kommunalforbund.se", + "komvux.se", + "l.se", + "lanbib.se", + "m.se", + "n.se", + "naturbruksgymn.se", + "o.se", + "org.se", + "p.se", + "parti.se", + "pp.se", + "press.se", + "r.se", + "s.se", + "t.se", + "tm.se", + "u.se", + "w.se", + "x.se", + "y.se", + "z.se", + "sg", + "com.sg", + "net.sg", + "org.sg", + "gov.sg", + "edu.sg", + "per.sg", + "sh", + "com.sh", + "net.sh", + "gov.sh", + "org.sh", + "mil.sh", + "si", + "sj", + "sk", + "sl", + "com.sl", + "net.sl", + "edu.sl", + "gov.sl", + "org.sl", + "sm", + "sn", + "art.sn", + "com.sn", + "edu.sn", + "gouv.sn", + "org.sn", + "perso.sn", + "univ.sn", + "so", + "com.so", + "net.so", + "org.so", + "sr", + "st", + "co.st", + "com.st", + "consulado.st", + "edu.st", + "embaixada.st", + "gov.st", + "mil.st", + "net.st", + "org.st", + "principe.st", + "saotome.st", + "store.st", + "su", + "sv", + "com.sv", + "edu.sv", + "gob.sv", + "org.sv", + "red.sv", + "sx", + "gov.sx", + "sy", + "edu.sy", + "gov.sy", + "net.sy", + "mil.sy", + "com.sy", + "org.sy", + "sz", + "co.sz", + "ac.sz", + "org.sz", + "tc", + "td", + "tel", + "tf", + "tg", + "th", + "ac.th", + "co.th", + "go.th", + "in.th", + "mi.th", + "net.th", + "or.th", + "tj", + "ac.tj", + "biz.tj", + "co.tj", + "com.tj", + "edu.tj", + "go.tj", + "gov.tj", + "int.tj", + "mil.tj", + "name.tj", + "net.tj", + "nic.tj", + "org.tj", + "test.tj", + "web.tj", + "tk", + "tl", + "gov.tl", + "tm", + "com.tm", + "co.tm", + "org.tm", + "net.tm", + "nom.tm", + "gov.tm", + "mil.tm", + "edu.tm", + "tn", + "com.tn", + "ens.tn", + "fin.tn", + "gov.tn", + "ind.tn", + "intl.tn", + "nat.tn", + "net.tn", + "org.tn", + "info.tn", + "perso.tn", + "tourism.tn", + "edunet.tn", + "rnrt.tn", + "rns.tn", + "rnu.tn", + "mincom.tn", + "agrinet.tn", + "defense.tn", + "turen.tn", + "to", + "com.to", + "gov.to", + "net.to", + "org.to", + "edu.to", + "mil.to", + "tr", + "com.tr", + "info.tr", + "biz.tr", + "net.tr", + "org.tr", + "web.tr", + "gen.tr", + "tv.tr", + "av.tr", + "dr.tr", + "bbs.tr", + "name.tr", + "tel.tr", + "gov.tr", + "bel.tr", + "pol.tr", + "mil.tr", + "k12.tr", + "edu.tr", + "kep.tr", + "nc.tr", + "gov.nc.tr", + "travel", + "tt", + "co.tt", + "com.tt", + "org.tt", + "net.tt", + "biz.tt", + "info.tt", + "pro.tt", + "int.tt", + "coop.tt", + "jobs.tt", + "mobi.tt", + "travel.tt", + "museum.tt", + "aero.tt", + "name.tt", + "gov.tt", + "edu.tt", + "tv", + "tw", + "edu.tw", + "gov.tw", + "mil.tw", + "com.tw", + "net.tw", + "org.tw", + "idv.tw", + "game.tw", + "ebiz.tw", + "club.tw", + "xn--zf0ao64a.tw", + "xn--uc0atv.tw", + "xn--czrw28b.tw", + "tz", + "ac.tz", + "co.tz", + "go.tz", + "hotel.tz", + "info.tz", + "me.tz", + "mil.tz", + "mobi.tz", + "ne.tz", + "or.tz", + "sc.tz", + "tv.tz", + "ua", + "com.ua", + "edu.ua", + "gov.ua", + "in.ua", + "net.ua", + "org.ua", + "cherkassy.ua", + "cherkasy.ua", + "chernigov.ua", + "chernihiv.ua", + "chernivtsi.ua", + "chernovtsy.ua", + "ck.ua", + "cn.ua", + "cr.ua", + "crimea.ua", + "cv.ua", + "dn.ua", + "dnepropetrovsk.ua", + "dnipropetrovsk.ua", + "dominic.ua", + "donetsk.ua", + "dp.ua", + "if.ua", + "ivano-frankivsk.ua", + "kh.ua", + "kharkiv.ua", + "kharkov.ua", + "kherson.ua", + "khmelnitskiy.ua", + "khmelnytskyi.ua", + "kiev.ua", + "kirovograd.ua", + "km.ua", + "kr.ua", + "krym.ua", + "ks.ua", + "kv.ua", + "kyiv.ua", + "lg.ua", + "lt.ua", + "lugansk.ua", + "lutsk.ua", + "lv.ua", + "lviv.ua", + "mk.ua", + "mykolaiv.ua", + "nikolaev.ua", + "od.ua", + "odesa.ua", + "odessa.ua", + "pl.ua", + "poltava.ua", + "rivne.ua", + "rovno.ua", + "rv.ua", + "sb.ua", + "sebastopol.ua", + "sevastopol.ua", + "sm.ua", + "sumy.ua", + "te.ua", + "ternopil.ua", + "uz.ua", + "uzhgorod.ua", + "vinnica.ua", + "vinnytsia.ua", + "vn.ua", + "volyn.ua", + "yalta.ua", + "zaporizhzhe.ua", + "zaporizhzhia.ua", + "zhitomir.ua", + "zhytomyr.ua", + "zp.ua", + "zt.ua", + "ug", + "co.ug", + "or.ug", + "ac.ug", + "sc.ug", + "go.ug", + "ne.ug", + "com.ug", + "org.ug", + "uk", + "ac.uk", + "co.uk", + "gov.uk", + "ltd.uk", + "me.uk", + "net.uk", + "nhs.uk", + "org.uk", + "plc.uk", + "police.uk", + "*.sch.uk", + "us", + "dni.us", + "fed.us", + "isa.us", + "kids.us", + "nsn.us", + "ak.us", + "al.us", + "ar.us", + "as.us", + "az.us", + "ca.us", + "co.us", + "ct.us", + "dc.us", + "de.us", + "fl.us", + "ga.us", + "gu.us", + "hi.us", + "ia.us", + "id.us", + "il.us", + "in.us", + "ks.us", + "ky.us", + "la.us", + "ma.us", + "md.us", + "me.us", + "mi.us", + "mn.us", + "mo.us", + "ms.us", + "mt.us", + "nc.us", + "nd.us", + "ne.us", + "nh.us", + "nj.us", + "nm.us", + "nv.us", + "ny.us", + "oh.us", + "ok.us", + "or.us", + "pa.us", + "pr.us", + "ri.us", + "sc.us", + "sd.us", + "tn.us", + "tx.us", + "ut.us", + "vi.us", + "vt.us", + "va.us", + "wa.us", + "wi.us", + "wv.us", + "wy.us", + "k12.ak.us", + "k12.al.us", + "k12.ar.us", + "k12.as.us", + "k12.az.us", + "k12.ca.us", + "k12.co.us", + "k12.ct.us", + "k12.dc.us", + "k12.de.us", + "k12.fl.us", + "k12.ga.us", + "k12.gu.us", + "k12.ia.us", + "k12.id.us", + "k12.il.us", + "k12.in.us", + "k12.ks.us", + "k12.ky.us", + "k12.la.us", + "k12.ma.us", + "k12.md.us", + "k12.me.us", + "k12.mi.us", + "k12.mn.us", + "k12.mo.us", + "k12.ms.us", + "k12.mt.us", + "k12.nc.us", + "k12.ne.us", + "k12.nh.us", + "k12.nj.us", + "k12.nm.us", + "k12.nv.us", + "k12.ny.us", + "k12.oh.us", + "k12.ok.us", + "k12.or.us", + "k12.pa.us", + "k12.pr.us", + "k12.ri.us", + "k12.sc.us", + "k12.tn.us", + "k12.tx.us", + "k12.ut.us", + "k12.vi.us", + "k12.vt.us", + "k12.va.us", + "k12.wa.us", + "k12.wi.us", + "k12.wy.us", + "cc.ak.us", + "cc.al.us", + "cc.ar.us", + "cc.as.us", + "cc.az.us", + "cc.ca.us", + "cc.co.us", + "cc.ct.us", + "cc.dc.us", + "cc.de.us", + "cc.fl.us", + "cc.ga.us", + "cc.gu.us", + "cc.hi.us", + "cc.ia.us", + "cc.id.us", + "cc.il.us", + "cc.in.us", + "cc.ks.us", + "cc.ky.us", + "cc.la.us", + "cc.ma.us", + "cc.md.us", + "cc.me.us", + "cc.mi.us", + "cc.mn.us", + "cc.mo.us", + "cc.ms.us", + "cc.mt.us", + "cc.nc.us", + "cc.nd.us", + "cc.ne.us", + "cc.nh.us", + "cc.nj.us", + "cc.nm.us", + "cc.nv.us", + "cc.ny.us", + "cc.oh.us", + "cc.ok.us", + "cc.or.us", + "cc.pa.us", + "cc.pr.us", + "cc.ri.us", + "cc.sc.us", + "cc.sd.us", + "cc.tn.us", + "cc.tx.us", + "cc.ut.us", + "cc.vi.us", + "cc.vt.us", + "cc.va.us", + "cc.wa.us", + "cc.wi.us", + "cc.wv.us", + "cc.wy.us", + "lib.ak.us", + "lib.al.us", + "lib.ar.us", + "lib.as.us", + "lib.az.us", + "lib.ca.us", + "lib.co.us", + "lib.ct.us", + "lib.dc.us", + "lib.fl.us", + "lib.ga.us", + "lib.gu.us", + "lib.hi.us", + "lib.ia.us", + "lib.id.us", + "lib.il.us", + "lib.in.us", + "lib.ks.us", + "lib.ky.us", + "lib.la.us", + "lib.ma.us", + "lib.md.us", + "lib.me.us", + "lib.mi.us", + "lib.mn.us", + "lib.mo.us", + "lib.ms.us", + "lib.mt.us", + "lib.nc.us", + "lib.nd.us", + "lib.ne.us", + "lib.nh.us", + "lib.nj.us", + "lib.nm.us", + "lib.nv.us", + "lib.ny.us", + "lib.oh.us", + "lib.ok.us", + "lib.or.us", + "lib.pa.us", + "lib.pr.us", + "lib.ri.us", + "lib.sc.us", + "lib.sd.us", + "lib.tn.us", + "lib.tx.us", + "lib.ut.us", + "lib.vi.us", + "lib.vt.us", + "lib.va.us", + "lib.wa.us", + "lib.wi.us", + "lib.wy.us", + "pvt.k12.ma.us", + "chtr.k12.ma.us", + "paroch.k12.ma.us", + "ann-arbor.mi.us", + "cog.mi.us", + "dst.mi.us", + "eaton.mi.us", + "gen.mi.us", + "mus.mi.us", + "tec.mi.us", + "washtenaw.mi.us", + "uy", + "com.uy", + "edu.uy", + "gub.uy", + "mil.uy", + "net.uy", + "org.uy", + "uz", + "co.uz", + "com.uz", + "net.uz", + "org.uz", + "va", + "vc", + "com.vc", + "net.vc", + "org.vc", + "gov.vc", + "mil.vc", + "edu.vc", + "ve", + "arts.ve", + "co.ve", + "com.ve", + "e12.ve", + "edu.ve", + "firm.ve", + "gob.ve", + "gov.ve", + "info.ve", + "int.ve", + "mil.ve", + "net.ve", + "org.ve", + "rec.ve", + "store.ve", + "tec.ve", + "web.ve", + "vg", + "vi", + "co.vi", + "com.vi", + "k12.vi", + "net.vi", + "org.vi", + "vn", + "com.vn", + "net.vn", + "org.vn", + "edu.vn", + "gov.vn", + "int.vn", + "ac.vn", + "biz.vn", + "info.vn", + "name.vn", + "pro.vn", + "health.vn", + "vu", + "com.vu", + "edu.vu", + "net.vu", + "org.vu", + "wf", + "ws", + "com.ws", + "net.ws", + "org.ws", + "gov.ws", + "edu.ws", + "yt", + "xn--mgbaam7a8h", + "xn--y9a3aq", + "xn--54b7fta0cc", + "xn--90ae", + "xn--90ais", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--lgbbat1ad8j", + "xn--wgbh1c", + "xn--e1a4c", + "xn--node", + "xn--qxam", + "xn--j6w193g", + "xn--2scrj9c", + "xn--3hcrj9c", + "xn--45br5cyl", + "xn--h2breg3eve", + "xn--h2brj9c8c", + "xn--mgbgu82a", + "xn--rvc1e0am3e", + "xn--h2brj9c", + "xn--mgbbh1a71e", + "xn--fpcrj9c3d", + "xn--gecrj9c", + "xn--s9brj9c", + "xn--45brj9c", + "xn--xkc2dl3a5ee0h", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgbtx2b", + "xn--mgbayh7gpa", + "xn--3e0b707e", + "xn--80ao21a", + "xn--fzc2c9e2c", + "xn--xkc2al3hye2a", + "xn--mgbc0a9azcg", + "xn--d1alf", + "xn--l1acc", + "xn--mix891f", + "xn--mix082f", + "xn--mgbx4cd0ab", + "xn--mgb9awbf", + "xn--mgbai9azgqp6j", + "xn--mgbai9a5eva00b", + "xn--ygbi2ammx", + "xn--90a3ac", + "xn--o1ac.xn--90a3ac", + "xn--c1avg.xn--90a3ac", + "xn--90azh.xn--90a3ac", + "xn--d1at.xn--90a3ac", + "xn--o1ach.xn--90a3ac", + "xn--80au.xn--90a3ac", + "xn--p1ai", + "xn--wgbl6a", + "xn--mgberp4a5d4ar", + "xn--mgberp4a5d4a87g", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbpl2fh", + "xn--yfro4i67o", + "xn--clchc0ea0b2g2a9gcd", + "xn--ogbpf8fl", + "xn--mgbtf8fl", + "xn--o3cw4h", + "xn--12c1fe0br.xn--o3cw4h", + "xn--12co0c3b4eva.xn--o3cw4h", + "xn--h3cuzk1di.xn--o3cw4h", + "xn--o3cyx2a.xn--o3cw4h", + "xn--m3ch0j3a.xn--o3cw4h", + "xn--12cfi8ixb8l.xn--o3cw4h", + "xn--pgbs0dh", + "xn--kpry57d", + "xn--kprw13d", + "xn--nnx388a", + "xn--j1amh", + "xn--mgb2ddes", + "xxx", + "*.ye", + "ac.za", + "agric.za", + "alt.za", + "co.za", + "edu.za", + "gov.za", + "grondar.za", + "law.za", + "mil.za", + "net.za", + "ngo.za", + "nis.za", + "nom.za", + "org.za", + "school.za", + "tm.za", + "web.za", + "zm", + "ac.zm", + "biz.zm", + "co.zm", + "com.zm", + "edu.zm", + "gov.zm", + "info.zm", + "mil.zm", + "net.zm", + "org.zm", + "sch.zm", + "zw", + "ac.zw", + "co.zw", + "gov.zw", + "mil.zw", + "org.zw", + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "adac", + "ads", + "adult", + "aeg", + "aetna", + "afamilycompany", + "afl", + "africa", + "agakhan", + "agency", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "aol", + "apartments", + "app", + "apple", + "aquarelle", + "arab", + "aramco", + "archi", + "army", + "art", + "arte", + "asda", + "associates", + "athleta", + "attorney", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aws", + "axa", + "azure", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bharti", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bms", + "bmw", + "bnl", + "bnpparibas", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bzh", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "ceb", + "center", + "ceo", + "cern", + "cfa", + "cfd", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "christmas", + "chrome", + "chrysler", + "church", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "coach", + "codes", + "coffee", + "college", + "cologne", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cuisinella", + "cymru", + "cyou", + "dabur", + "dad", + "dance", + "data", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dnp", + "docs", + "doctor", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dvr", + "earth", + "eat", + "eco", + "edeka", + "education", + "email", + "emerck", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "ericsson", + "erni", + "esq", + "estate", + "esurance", + "etisalat", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "fly", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gbiz", + "gdn", + "gea", + "gent", + "genting", + "george", + "ggee", + "gift", + "gifts", + "gives", + "giving", + "glade", + "glass", + "gle", + "global", + "globo", + "gmail", + "gmbh", + "gmo", + "gmx", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "grocery", + "group", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hkt", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "hospital", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hsbc", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "ieee", + "ifm", + "ikano", + "imamat", + "imdb", + "immo", + "immobilien", + "industries", + "infiniti", + "ing", + "ink", + "institute", + "insurance", + "insure", + "intel", + "international", + "intuit", + "investments", + "ipiranga", + "irish", + "iselect", + "ismaili", + "ist", + "istanbul", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jmp", + "jnj", + "joburg", + "jot", + "joy", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "koeln", + "komatsu", + "kosher", + "kpmg", + "kpn", + "krd", + "kred", + "kuokgroup", + "kyoto", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "ltd", + "ltda", + "lundbeck", + "lupin", + "luxe", + "luxury", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "map", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mckinsey", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "merckmsd", + "metlife", + "miami", + "microsoft", + "mini", + "mint", + "mit", + "mitsubishi", + "mlb", + "mls", + "mma", + "mobile", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "msd", + "mtn", + "mtpc", + "mtr", + "mutual", + "nab", + "nadex", + "nagoya", + "nationwide", + "natura", + "navy", + "nba", + "nec", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nfl", + "ngo", + "nhk", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "nra", + "nrw", + "ntt", + "nyc", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "omega", + "one", + "ong", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "organic", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "page", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "pccw", + "pet", + "pfizer", + "pharmacy", + "phd", + "philips", + "phone", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "pramerica", + "praxi", + "press", + "prime", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "pub", + "pwc", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "radio", + "raid", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rsvp", + "rugby", + "ruhr", + "run", + "rwe", + "ryukyu", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sbi", + "sbs", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "search", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "silk", + "sina", + "singles", + "site", + "ski", + "skin", + "sky", + "skype", + "sling", + "smart", + "smile", + "sncf", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "srl", + "srt", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "sucks", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "swatch", + "swiftcover", + "swiss", + "sydney", + "symantec", + "systems", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tci", + "tdk", + "team", + "tech", + "technology", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "thd", + "theater", + "theatre", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tjmaxx", + "tjx", + "tkmaxx", + "tmall", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "trade", + "trading", + "training", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tube", + "tui", + "tunes", + "tushu", + "tvs", + "ubank", + "ubs", + "uconnect", + "unicom", + "university", + "uno", + "uol", + "ups", + "vacations", + "vana", + "vanguard", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45q11c", + "xn--4gbrim", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fjq720a", + "xn--flw351e", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gk3at1e", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kpu716f", + "xn--kput3i", + "xn--mgba3a3ejt", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbab2bd", + "xn--mgbb9fbpob", + "xn--mgbca7dzdo", + "xn--mgbi4ecexp", + "xn--mgbt3dhd", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--p1acf", + "xn--pbt977c", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--rhqv96g", + "xn--rovu88b", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--xhq521b", + "xn--zfr164b", + "xperia", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yun", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zone", + "zuerich", + "cc.ua", + "inf.ua", + "ltd.ua", + "1password.ca", + "1password.com", + "1password.eu", + "beep.pl", + "*.compute.estate", + "*.alces.network", + "alwaysdata.net", + "cloudfront.net", + "*.compute.amazonaws.com", + "*.compute-1.amazonaws.com", + "*.compute.amazonaws.com.cn", + "us-east-1.amazonaws.com", + "cn-north-1.eb.amazonaws.com.cn", + "elasticbeanstalk.com", + "ap-northeast-1.elasticbeanstalk.com", + "ap-northeast-2.elasticbeanstalk.com", + "ap-south-1.elasticbeanstalk.com", + "ap-southeast-1.elasticbeanstalk.com", + "ap-southeast-2.elasticbeanstalk.com", + "ca-central-1.elasticbeanstalk.com", + "eu-central-1.elasticbeanstalk.com", + "eu-west-1.elasticbeanstalk.com", + "eu-west-2.elasticbeanstalk.com", + "eu-west-3.elasticbeanstalk.com", + "sa-east-1.elasticbeanstalk.com", + "us-east-1.elasticbeanstalk.com", + "us-east-2.elasticbeanstalk.com", + "us-gov-west-1.elasticbeanstalk.com", + "us-west-1.elasticbeanstalk.com", + "us-west-2.elasticbeanstalk.com", + "*.elb.amazonaws.com", + "*.elb.amazonaws.com.cn", + "s3.amazonaws.com", + "s3-ap-northeast-1.amazonaws.com", + "s3-ap-northeast-2.amazonaws.com", + "s3-ap-south-1.amazonaws.com", + "s3-ap-southeast-1.amazonaws.com", + "s3-ap-southeast-2.amazonaws.com", + "s3-ca-central-1.amazonaws.com", + "s3-eu-central-1.amazonaws.com", + "s3-eu-west-1.amazonaws.com", + "s3-eu-west-2.amazonaws.com", + "s3-eu-west-3.amazonaws.com", + "s3-external-1.amazonaws.com", + "s3-fips-us-gov-west-1.amazonaws.com", + "s3-sa-east-1.amazonaws.com", + "s3-us-gov-west-1.amazonaws.com", + "s3-us-east-2.amazonaws.com", + "s3-us-west-1.amazonaws.com", + "s3-us-west-2.amazonaws.com", + "s3.ap-northeast-2.amazonaws.com", + "s3.ap-south-1.amazonaws.com", + "s3.cn-north-1.amazonaws.com.cn", + "s3.ca-central-1.amazonaws.com", + "s3.eu-central-1.amazonaws.com", + "s3.eu-west-2.amazonaws.com", + "s3.eu-west-3.amazonaws.com", + "s3.us-east-2.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + "s3.dualstack.eu-west-2.amazonaws.com", + "s3.dualstack.eu-west-3.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + "s3-website-us-east-1.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ca-central-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website.eu-west-2.amazonaws.com", + "s3-website.eu-west-3.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "t3l3p0rt.net", + "tele.amune.org", + "on-aptible.com", + "user.party.eus", + "pimienta.org", + "poivron.org", + "potager.org", + "sweetpepper.org", + "myasustor.com", + "myfritz.net", + "*.awdev.ca", + "*.advisor.ws", + "backplaneapp.io", + "betainabox.com", + "bnr.la", + "boomla.net", + "boxfuse.io", + "square7.ch", + "bplaced.com", + "bplaced.de", + "square7.de", + "bplaced.net", + "square7.net", + "browsersafetymark.io", + "mycd.eu", + "ae.org", + "ar.com", + "br.com", + "cn.com", + "com.de", + "com.se", + "de.com", + "eu.com", + "gb.com", + "gb.net", + "hu.com", + "hu.net", + "jp.net", + "jpn.com", + "kr.com", + "mex.com", + "no.com", + "qc.com", + "ru.com", + "sa.com", + "se.com", + "se.net", + "uk.com", + "uk.net", + "us.com", + "uy.com", + "za.bz", + "za.com", + "africa.com", + "gr.com", + "in.net", + "us.org", + "co.com", + "c.la", + "certmgr.org", + "xenapponazure.com", + "virtueeldomein.nl", + "c66.me", + "jdevcloud.com", + "wpdevcloud.com", + "cloudaccess.host", + "freesite.host", + "cloudaccess.net", + "cloudcontrolled.com", + "cloudcontrolapp.com", + "co.ca", + "co.cz", + "c.cdn77.org", + "cdn77-ssl.net", + "r.cdn77.net", + "rsc.cdn77.org", + "ssl.origin.cdn77-secure.org", + "cloudns.asia", + "cloudns.biz", + "cloudns.club", + "cloudns.cc", + "cloudns.eu", + "cloudns.in", + "cloudns.info", + "cloudns.org", + "cloudns.pro", + "cloudns.pw", + "cloudns.us", + "co.nl", + "co.no", + "webhosting.be", + "hosting-cluster.nl", + "dyn.cosidns.de", + "dynamisches-dns.de", + "dnsupdater.de", + "internet-dns.de", + "l-o-g-i-n.de", + "dynamic-dns.info", + "feste-ip.net", + "knx-server.net", + "static-access.net", + "realm.cz", + "*.cryptonomic.net", + "cupcake.is", + "cyon.link", + "cyon.site", + "daplie.me", + "localhost.daplie.me", + "biz.dk", + "co.dk", + "firm.dk", + "reg.dk", + "store.dk", + "debian.net", + "dedyn.io", + "dnshome.de", + "drayddns.com", + "dreamhosters.com", + "mydrobo.com", + "drud.io", + "drud.us", + "duckdns.org", + "dy.fi", + "tunk.org", + "dyndns-at-home.com", + "dyndns-at-work.com", + "dyndns-blog.com", + "dyndns-free.com", + "dyndns-home.com", + "dyndns-ip.com", + "dyndns-mail.com", + "dyndns-office.com", + "dyndns-pics.com", + "dyndns-remote.com", + "dyndns-server.com", + "dyndns-web.com", + "dyndns-wiki.com", + "dyndns-work.com", + "dyndns.biz", + "dyndns.info", + "dyndns.org", + "dyndns.tv", + "at-band-camp.net", + "ath.cx", + "barrel-of-knowledge.info", + "barrell-of-knowledge.info", + "better-than.tv", + "blogdns.com", + "blogdns.net", + "blogdns.org", + "blogsite.org", + "boldlygoingnowhere.org", + "broke-it.net", + "buyshouses.net", + "cechire.com", + "dnsalias.com", + "dnsalias.net", + "dnsalias.org", + "dnsdojo.com", + "dnsdojo.net", + "dnsdojo.org", + "does-it.net", + "doesntexist.com", + "doesntexist.org", + "dontexist.com", + "dontexist.net", + "dontexist.org", + "doomdns.com", + "doomdns.org", + "dvrdns.org", + "dyn-o-saur.com", + "dynalias.com", + "dynalias.net", + "dynalias.org", + "dynathome.net", + "dyndns.ws", + "endofinternet.net", + "endofinternet.org", + "endoftheinternet.org", + "est-a-la-maison.com", + "est-a-la-masion.com", + "est-le-patron.com", + "est-mon-blogueur.com", + "for-better.biz", + "for-more.biz", + "for-our.info", + "for-some.biz", + "for-the.biz", + "forgot.her.name", + "forgot.his.name", + "from-ak.com", + "from-al.com", + "from-ar.com", + "from-az.net", + "from-ca.com", + "from-co.net", + "from-ct.com", + "from-dc.com", + "from-de.com", + "from-fl.com", + "from-ga.com", + "from-hi.com", + "from-ia.com", + "from-id.com", + "from-il.com", + "from-in.com", + "from-ks.com", + "from-ky.com", + "from-la.net", + "from-ma.com", + "from-md.com", + "from-me.org", + "from-mi.com", + "from-mn.com", + "from-mo.com", + "from-ms.com", + "from-mt.com", + "from-nc.com", + "from-nd.com", + "from-ne.com", + "from-nh.com", + "from-nj.com", + "from-nm.com", + "from-nv.com", + "from-ny.net", + "from-oh.com", + "from-ok.com", + "from-or.com", + "from-pa.com", + "from-pr.com", + "from-ri.com", + "from-sc.com", + "from-sd.com", + "from-tn.com", + "from-tx.com", + "from-ut.com", + "from-va.com", + "from-vt.com", + "from-wa.com", + "from-wi.com", + "from-wv.com", + "from-wy.com", + "ftpaccess.cc", + "fuettertdasnetz.de", + "game-host.org", + "game-server.cc", + "getmyip.com", + "gets-it.net", + "go.dyndns.org", + "gotdns.com", + "gotdns.org", + "groks-the.info", + "groks-this.info", + "ham-radio-op.net", + "here-for-more.info", + "hobby-site.com", + "hobby-site.org", + "home.dyndns.org", + "homedns.org", + "homeftp.net", + "homeftp.org", + "homeip.net", + "homelinux.com", + "homelinux.net", + "homelinux.org", + "homeunix.com", + "homeunix.net", + "homeunix.org", + "iamallama.com", + "in-the-band.net", + "is-a-anarchist.com", + "is-a-blogger.com", + "is-a-bookkeeper.com", + "is-a-bruinsfan.org", + "is-a-bulls-fan.com", + "is-a-candidate.org", + "is-a-caterer.com", + "is-a-celticsfan.org", + "is-a-chef.com", + "is-a-chef.net", + "is-a-chef.org", + "is-a-conservative.com", + "is-a-cpa.com", + "is-a-cubicle-slave.com", + "is-a-democrat.com", + "is-a-designer.com", + "is-a-doctor.com", + "is-a-financialadvisor.com", + "is-a-geek.com", + "is-a-geek.net", + "is-a-geek.org", + "is-a-green.com", + "is-a-guru.com", + "is-a-hard-worker.com", + "is-a-hunter.com", + "is-a-knight.org", + "is-a-landscaper.com", + "is-a-lawyer.com", + "is-a-liberal.com", + "is-a-libertarian.com", + "is-a-linux-user.org", + "is-a-llama.com", + "is-a-musician.com", + "is-a-nascarfan.com", + "is-a-nurse.com", + "is-a-painter.com", + "is-a-patsfan.org", + "is-a-personaltrainer.com", + "is-a-photographer.com", + "is-a-player.com", + "is-a-republican.com", + "is-a-rockstar.com", + "is-a-socialist.com", + "is-a-soxfan.org", + "is-a-student.com", + "is-a-teacher.com", + "is-a-techie.com", + "is-a-therapist.com", + "is-an-accountant.com", + "is-an-actor.com", + "is-an-actress.com", + "is-an-anarchist.com", + "is-an-artist.com", + "is-an-engineer.com", + "is-an-entertainer.com", + "is-by.us", + "is-certified.com", + "is-found.org", + "is-gone.com", + "is-into-anime.com", + "is-into-cars.com", + "is-into-cartoons.com", + "is-into-games.com", + "is-leet.com", + "is-lost.org", + "is-not-certified.com", + "is-saved.org", + "is-slick.com", + "is-uberleet.com", + "is-very-bad.org", + "is-very-evil.org", + "is-very-good.org", + "is-very-nice.org", + "is-very-sweet.org", + "is-with-theband.com", + "isa-geek.com", + "isa-geek.net", + "isa-geek.org", + "isa-hockeynut.com", + "issmarterthanyou.com", + "isteingeek.de", + "istmein.de", + "kicks-ass.net", + "kicks-ass.org", + "knowsitall.info", + "land-4-sale.us", + "lebtimnetz.de", + "leitungsen.de", + "likes-pie.com", + "likescandy.com", + "merseine.nu", + "mine.nu", + "misconfused.org", + "mypets.ws", + "myphotos.cc", + "neat-url.com", + "office-on-the.net", + "on-the-web.tv", + "podzone.net", + "podzone.org", + "readmyblog.org", + "saves-the-whales.com", + "scrapper-site.net", + "scrapping.cc", + "selfip.biz", + "selfip.com", + "selfip.info", + "selfip.net", + "selfip.org", + "sells-for-less.com", + "sells-for-u.com", + "sells-it.net", + "sellsyourhome.org", + "servebbs.com", + "servebbs.net", + "servebbs.org", + "serveftp.net", + "serveftp.org", + "servegame.org", + "shacknet.nu", + "simple-url.com", + "space-to-rent.com", + "stuff-4-sale.org", + "stuff-4-sale.us", + "teaches-yoga.com", + "thruhere.net", + "traeumtgerade.de", + "webhop.biz", + "webhop.info", + "webhop.net", + "webhop.org", + "worse-than.tv", + "writesthisblog.com", + "ddnss.de", + "dyn.ddnss.de", + "dyndns.ddnss.de", + "dyndns1.de", + "dyn-ip24.de", + "home-webserver.de", + "dyn.home-webserver.de", + "myhome-server.de", + "ddnss.org", + "definima.net", + "definima.io", + "ddnsfree.com", + "ddnsgeek.com", + "giize.com", + "gleeze.com", + "kozow.com", + "loseyourip.com", + "ooguy.com", + "theworkpc.com", + "casacam.net", + "dynu.net", + "accesscam.org", + "camdvr.org", + "freeddns.org", + "mywire.org", + "webredirect.org", + "myddns.rocks", + "blogsite.xyz", + "dynv6.net", + "e4.cz", + "mytuleap.com", + "enonic.io", + "customer.enonic.io", + "eu.org", + "al.eu.org", + "asso.eu.org", + "at.eu.org", + "au.eu.org", + "be.eu.org", + "bg.eu.org", + "ca.eu.org", + "cd.eu.org", + "ch.eu.org", + "cn.eu.org", + "cy.eu.org", + "cz.eu.org", + "de.eu.org", + "dk.eu.org", + "edu.eu.org", + "ee.eu.org", + "es.eu.org", + "fi.eu.org", + "fr.eu.org", + "gr.eu.org", + "hr.eu.org", + "hu.eu.org", + "ie.eu.org", + "il.eu.org", + "in.eu.org", + "int.eu.org", + "is.eu.org", + "it.eu.org", + "jp.eu.org", + "kr.eu.org", + "lt.eu.org", + "lu.eu.org", + "lv.eu.org", + "mc.eu.org", + "me.eu.org", + "mk.eu.org", + "mt.eu.org", + "my.eu.org", + "net.eu.org", + "ng.eu.org", + "nl.eu.org", + "no.eu.org", + "nz.eu.org", + "paris.eu.org", + "pl.eu.org", + "pt.eu.org", + "q-a.eu.org", + "ro.eu.org", + "ru.eu.org", + "se.eu.org", + "si.eu.org", + "sk.eu.org", + "tr.eu.org", + "uk.eu.org", + "us.eu.org", + "eu-1.evennode.com", + "eu-2.evennode.com", + "eu-3.evennode.com", + "eu-4.evennode.com", + "us-1.evennode.com", + "us-2.evennode.com", + "us-3.evennode.com", + "us-4.evennode.com", + "twmail.cc", + "twmail.net", + "twmail.org", + "mymailer.com.tw", + "url.tw", + "apps.fbsbx.com", + "ru.net", + "adygeya.ru", + "bashkiria.ru", + "bir.ru", + "cbg.ru", + "com.ru", + "dagestan.ru", + "grozny.ru", + "kalmykia.ru", + "kustanai.ru", + "marine.ru", + "mordovia.ru", + "msk.ru", + "mytis.ru", + "nalchik.ru", + "nov.ru", + "pyatigorsk.ru", + "spb.ru", + "vladikavkaz.ru", + "vladimir.ru", + "abkhazia.su", + "adygeya.su", + "aktyubinsk.su", + "arkhangelsk.su", + "armenia.su", + "ashgabad.su", + "azerbaijan.su", + "balashov.su", + "bashkiria.su", + "bryansk.su", + "bukhara.su", + "chimkent.su", + "dagestan.su", + "east-kazakhstan.su", + "exnet.su", + "georgia.su", + "grozny.su", + "ivanovo.su", + "jambyl.su", + "kalmykia.su", + "kaluga.su", + "karacol.su", + "karaganda.su", + "karelia.su", + "khakassia.su", + "krasnodar.su", + "kurgan.su", + "kustanai.su", + "lenug.su", + "mangyshlak.su", + "mordovia.su", + "msk.su", + "murmansk.su", + "nalchik.su", + "navoi.su", + "north-kazakhstan.su", + "nov.su", + "obninsk.su", + "penza.su", + "pokrovsk.su", + "sochi.su", + "spb.su", + "tashkent.su", + "termez.su", + "togliatti.su", + "troitsk.su", + "tselinograd.su", + "tula.su", + "tuva.su", + "vladikavkaz.su", + "vladimir.su", + "vologda.su", + "channelsdvr.net", + "fastlylb.net", + "map.fastlylb.net", + "freetls.fastly.net", + "map.fastly.net", + "a.prod.fastly.net", + "global.prod.fastly.net", + "a.ssl.fastly.net", + "b.ssl.fastly.net", + "global.ssl.fastly.net", + "fhapp.xyz", + "fedorainfracloud.org", + "fedorapeople.org", + "cloud.fedoraproject.org", + "app.os.fedoraproject.org", + "app.os.stg.fedoraproject.org", + "filegear.me", + "firebaseapp.com", + "flynnhub.com", + "flynnhosting.net", + "freebox-os.com", + "freeboxos.com", + "fbx-os.fr", + "fbxos.fr", + "freebox-os.fr", + "freeboxos.fr", + "*.futurecms.at", + "futurehosting.at", + "futuremailing.at", + "*.ex.ortsinfo.at", + "*.kunden.ortsinfo.at", + "*.statics.cloud", + "service.gov.uk", + "github.io", + "githubusercontent.com", + "gitlab.io", + "homeoffice.gov.uk", + "ro.im", + "shop.ro", + "goip.de", + "*.0emm.com", + "appspot.com", + "blogspot.ae", + "blogspot.al", + "blogspot.am", + "blogspot.ba", + "blogspot.be", + "blogspot.bg", + "blogspot.bj", + "blogspot.ca", + "blogspot.cf", + "blogspot.ch", + "blogspot.cl", + "blogspot.co.at", + "blogspot.co.id", + "blogspot.co.il", + "blogspot.co.ke", + "blogspot.co.nz", + "blogspot.co.uk", + "blogspot.co.za", + "blogspot.com", + "blogspot.com.ar", + "blogspot.com.au", + "blogspot.com.br", + "blogspot.com.by", + "blogspot.com.co", + "blogspot.com.cy", + "blogspot.com.ee", + "blogspot.com.eg", + "blogspot.com.es", + "blogspot.com.mt", + "blogspot.com.ng", + "blogspot.com.tr", + "blogspot.com.uy", + "blogspot.cv", + "blogspot.cz", + "blogspot.de", + "blogspot.dk", + "blogspot.fi", + "blogspot.fr", + "blogspot.gr", + "blogspot.hk", + "blogspot.hr", + "blogspot.hu", + "blogspot.ie", + "blogspot.in", + "blogspot.is", + "blogspot.it", + "blogspot.jp", + "blogspot.kr", + "blogspot.li", + "blogspot.lt", + "blogspot.lu", + "blogspot.md", + "blogspot.mk", + "blogspot.mr", + "blogspot.mx", + "blogspot.my", + "blogspot.nl", + "blogspot.no", + "blogspot.pe", + "blogspot.pt", + "blogspot.qa", + "blogspot.re", + "blogspot.ro", + "blogspot.rs", + "blogspot.ru", + "blogspot.se", + "blogspot.sg", + "blogspot.si", + "blogspot.sk", + "blogspot.sn", + "blogspot.td", + "blogspot.tw", + "blogspot.ug", + "blogspot.vn", + "cloudfunctions.net", + "cloud.goog", + "codespot.com", + "googleapis.com", + "googlecode.com", + "pagespeedmobilizer.com", + "publishproxy.com", + "withgoogle.com", + "withyoutube.com", + "hashbang.sh", + "hasura-app.io", + "hepforge.org", + "herokuapp.com", + "herokussl.com", + "moonscale.net", + "iki.fi", + "biz.at", + "info.at", + "info.cx", + "ac.leg.br", + "al.leg.br", + "am.leg.br", + "ap.leg.br", + "ba.leg.br", + "ce.leg.br", + "df.leg.br", + "es.leg.br", + "go.leg.br", + "ma.leg.br", + "mg.leg.br", + "ms.leg.br", + "mt.leg.br", + "pa.leg.br", + "pb.leg.br", + "pe.leg.br", + "pi.leg.br", + "pr.leg.br", + "rj.leg.br", + "rn.leg.br", + "ro.leg.br", + "rr.leg.br", + "rs.leg.br", + "sc.leg.br", + "se.leg.br", + "sp.leg.br", + "to.leg.br", + "pixolino.com", + "ipifony.net", + "*.triton.zone", + "*.cns.joyent.com", + "js.org", + "keymachine.de", + "knightpoint.systems", + "co.krd", + "edu.krd", + "git-repos.de", + "lcube-server.de", + "svn-repos.de", + "we.bs", + "barsy.bg", + "barsyonline.com", + "barsy.de", + "barsy.eu", + "barsy.in", + "barsy.net", + "barsy.online", + "barsy.support", + "*.magentosite.cloud", + "hb.cldmail.ru", + "cloud.metacentrum.cz", + "custom.metacentrum.cz", + "meteorapp.com", + "eu.meteorapp.com", + "co.pl", + "azurewebsites.net", + "azure-mobile.net", + "cloudapp.net", + "bmoattachments.org", + "net.ru", + "org.ru", + "pp.ru", + "bitballoon.com", + "netlify.com", + "4u.com", + "ngrok.io", + "nh-serv.co.uk", + "nfshost.com", + "nsupdate.info", + "nerdpol.ovh", + "blogsyte.com", + "brasilia.me", + "cable-modem.org", + "ciscofreak.com", + "collegefan.org", + "couchpotatofries.org", + "damnserver.com", + "ddns.me", + "ditchyourip.com", + "dnsfor.me", + "dnsiskinky.com", + "dvrcam.info", + "dynns.com", + "eating-organic.net", + "fantasyleague.cc", + "geekgalaxy.com", + "golffan.us", + "health-carereform.com", + "homesecuritymac.com", + "homesecuritypc.com", + "hopto.me", + "ilovecollege.info", + "loginto.me", + "mlbfan.org", + "mmafan.biz", + "myactivedirectory.com", + "mydissent.net", + "myeffect.net", + "mymediapc.net", + "mypsx.net", + "mysecuritycamera.com", + "mysecuritycamera.net", + "mysecuritycamera.org", + "net-freaks.com", + "nflfan.org", + "nhlfan.net", + "no-ip.ca", + "no-ip.co.uk", + "no-ip.net", + "noip.us", + "onthewifi.com", + "pgafan.net", + "point2this.com", + "pointto.us", + "privatizehealthinsurance.net", + "quicksytes.com", + "read-books.org", + "securitytactics.com", + "serveexchange.com", + "servehumour.com", + "servep2p.com", + "servesarcasm.com", + "stufftoread.com", + "ufcfan.org", + "unusualperson.com", + "workisboring.com", + "3utilities.com", + "bounceme.net", + "ddns.net", + "ddnsking.com", + "gotdns.ch", + "hopto.org", + "myftp.biz", + "myftp.org", + "myvnc.com", + "no-ip.biz", + "no-ip.info", + "no-ip.org", + "noip.me", + "redirectme.net", + "servebeer.com", + "serveblog.net", + "servecounterstrike.com", + "serveftp.com", + "servegame.com", + "servehalflife.com", + "servehttp.com", + "serveirc.com", + "serveminecraft.net", + "servemp3.com", + "servepics.com", + "servequake.com", + "sytes.net", + "webhop.me", + "zapto.org", + "stage.nodeart.io", + "nodum.co", + "nodum.io", + "nyc.mn", + "nom.ae", + "nom.ai", + "nom.al", + "nym.by", + "nym.bz", + "nom.cl", + "nom.gd", + "nom.gl", + "nym.gr", + "nom.gt", + "nom.hn", + "nom.im", + "nym.kz", + "nym.la", + "nom.li", + "nym.li", + "nym.lt", + "nym.lu", + "nym.me", + "nom.mk", + "nym.mx", + "nom.nu", + "nym.nz", + "nym.pe", + "nym.pt", + "nom.pw", + "nom.qa", + "nom.rs", + "nom.si", + "nym.sk", + "nym.su", + "nym.sx", + "nym.tw", + "nom.ug", + "nom.uy", + "nom.vc", + "nom.vg", + "cya.gg", + "nid.io", + "opencraft.hosting", + "operaunite.com", + "outsystemscloud.com", + "ownprovider.com", + "oy.lc", + "pgfog.com", + "pagefrontapp.com", + "art.pl", + "gliwice.pl", + "krakow.pl", + "poznan.pl", + "wroc.pl", + "zakopane.pl", + "pantheonsite.io", + "gotpantheon.com", + "mypep.link", + "on-web.fr", + "*.platform.sh", + "*.platformsh.site", + "xen.prgmr.com", + "priv.at", + "protonet.io", + "chirurgiens-dentistes-en-france.fr", + "byen.site", + "qa2.com", + "dev-myqnapcloud.com", + "alpha-myqnapcloud.com", + "myqnapcloud.com", + "*.quipelements.com", + "vapor.cloud", + "vaporcloud.io", + "rackmaze.com", + "rackmaze.net", + "rhcloud.com", + "resindevice.io", + "devices.resinstaging.io", + "hzc.io", + "wellbeingzone.eu", + "ptplus.fit", + "wellbeingzone.co.uk", + "sandcats.io", + "logoip.de", + "logoip.com", + "scrysec.com", + "firewall-gateway.com", + "firewall-gateway.de", + "my-gateway.de", + "my-router.de", + "spdns.de", + "spdns.eu", + "firewall-gateway.net", + "my-firewall.org", + "myfirewall.org", + "spdns.org", + "*.s5y.io", + "*.sensiosite.cloud", + "biz.ua", + "co.ua", + "pp.ua", + "shiftedit.io", + "myshopblocks.com", + "1kapp.com", + "appchizi.com", + "applinzi.com", + "sinaapp.com", + "vipsinaapp.com", + "bounty-full.com", + "alpha.bounty-full.com", + "beta.bounty-full.com", + "static.land", + "dev.static.land", + "sites.static.land", + "apps.lair.io", + "*.stolos.io", + "spacekit.io", + "stackspace.space", + "storj.farm", + "temp-dns.com", + "diskstation.me", + "dscloud.biz", + "dscloud.me", + "dscloud.mobi", + "dsmynas.com", + "dsmynas.net", + "dsmynas.org", + "familyds.com", + "familyds.net", + "familyds.org", + "i234.me", + "myds.me", + "synology.me", + "vpnplus.to", + "taifun-dns.de", + "gda.pl", + "gdansk.pl", + "gdynia.pl", + "med.pl", + "sopot.pl", + "cust.dev.thingdust.io", + "cust.disrec.thingdust.io", + "cust.prod.thingdust.io", + "cust.testing.thingdust.io", + "bloxcms.com", + "townnews-staging.com", + "12hp.at", + "2ix.at", + "4lima.at", + "lima-city.at", + "12hp.ch", + "2ix.ch", + "4lima.ch", + "lima-city.ch", + "trafficplex.cloud", + "de.cool", + "12hp.de", + "2ix.de", + "4lima.de", + "lima-city.de", + "1337.pictures", + "clan.rip", + "lima-city.rocks", + "webspace.rocks", + "lima.zone", + "*.transurl.be", + "*.transurl.eu", + "*.transurl.nl", + "tuxfamily.org", + "dd-dns.de", + "diskstation.eu", + "diskstation.org", + "dray-dns.de", + "draydns.de", + "dyn-vpn.de", + "dynvpn.de", + "mein-vigor.de", + "my-vigor.de", + "my-wan.de", + "syno-ds.de", + "synology-diskstation.de", + "synology-ds.de", + "uber.space", + "hk.com", + "hk.org", + "ltd.hk", + "inc.hk", + "lib.de.us", + "router.management", + "v-info.info", + "wedeploy.io", + "wedeploy.me", + "wedeploy.sh", + "remotewd.com", + "wmflabs.org", + "cistron.nl", + "demon.nl", + "xs4all.space", + "yolasite.com", + "ybo.faith", + "yombo.me", + "homelink.one", + "ybo.party", + "ybo.review", + "ybo.science", + "ybo.trade", + "za.net", + "za.org", + "now.sh", +} + +var nodeLabels = [...]string{ + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "ac", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "ad", + "adac", + "ads", + "adult", + "ae", + "aeg", + "aero", + "aetna", + "af", + "afamilycompany", + "afl", + "africa", + "ag", + "agakhan", + "agency", + "ai", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "al", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "am", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "ao", + "aol", + "apartments", + "app", + "apple", + "aq", + "aquarelle", + "ar", + "arab", + "aramco", + "archi", + "army", + "arpa", + "art", + "arte", + "as", + "asda", + "asia", + "associates", + "at", + "athleta", + "attorney", + "au", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aw", + "aws", + "ax", + "axa", + "az", + "azure", + "ba", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bb", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "bd", + "be", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bf", + "bg", + "bh", + "bharti", + "bi", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "biz", + "bj", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bm", + "bms", + "bmw", + "bn", + "bnl", + "bnpparibas", + "bo", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "br", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "bs", + "bt", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bv", + "bw", + "by", + "bz", + "bzh", + "ca", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "cat", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "cc", + "cd", + "ceb", + "center", + "ceo", + "cern", + "cf", + "cfa", + "cfd", + "cg", + "ch", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "christmas", + "chrome", + "chrysler", + "church", + "ci", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "ck", + "cl", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "cm", + "cn", + "co", + "coach", + "codes", + "coffee", + "college", + "cologne", + "com", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "coop", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "cr", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cu", + "cuisinella", + "cv", + "cw", + "cx", + "cy", + "cymru", + "cyou", + "cz", + "dabur", + "dad", + "dance", + "data", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "de", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dj", + "dk", + "dm", + "dnp", + "do", + "docs", + "doctor", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dvr", + "dz", + "earth", + "eat", + "ec", + "eco", + "edeka", + "edu", + "education", + "ee", + "eg", + "email", + "emerck", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "er", + "ericsson", + "erni", + "es", + "esq", + "estate", + "esurance", + "et", + "etisalat", + "eu", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fi", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "fj", + "fk", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "fly", + "fm", + "fo", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "fr", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "ga", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gb", + "gbiz", + "gd", + "gdn", + "ge", + "gea", + "gent", + "genting", + "george", + "gf", + "gg", + "ggee", + "gh", + "gi", + "gift", + "gifts", + "gives", + "giving", + "gl", + "glade", + "glass", + "gle", + "global", + "globo", + "gm", + "gmail", + "gmbh", + "gmo", + "gmx", + "gn", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "gov", + "gp", + "gq", + "gr", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "grocery", + "group", + "gs", + "gt", + "gu", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "gw", + "gy", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hk", + "hkt", + "hm", + "hn", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "hospital", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hr", + "hsbc", + "ht", + "hu", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "id", + "ie", + "ieee", + "ifm", + "ikano", + "il", + "im", + "imamat", + "imdb", + "immo", + "immobilien", + "in", + "industries", + "infiniti", + "info", + "ing", + "ink", + "institute", + "insurance", + "insure", + "int", + "intel", + "international", + "intuit", + "investments", + "io", + "ipiranga", + "iq", + "ir", + "irish", + "is", + "iselect", + "ismaili", + "ist", + "istanbul", + "it", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "je", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jm", + "jmp", + "jnj", + "jo", + "jobs", + "joburg", + "jot", + "joy", + "jp", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "ke", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kg", + "kh", + "ki", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "km", + "kn", + "koeln", + "komatsu", + "kosher", + "kp", + "kpmg", + "kpn", + "kr", + "krd", + "kred", + "kuokgroup", + "kw", + "ky", + "kyoto", + "kz", + "la", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lb", + "lc", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "li", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "lk", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "lr", + "ls", + "lt", + "ltd", + "ltda", + "lu", + "lundbeck", + "lupin", + "luxe", + "luxury", + "lv", + "ly", + "ma", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "map", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mc", + "mckinsey", + "md", + "me", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "merckmsd", + "metlife", + "mg", + "mh", + "miami", + "microsoft", + "mil", + "mini", + "mint", + "mit", + "mitsubishi", + "mk", + "ml", + "mlb", + "mls", + "mm", + "mma", + "mn", + "mo", + "mobi", + "mobile", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "mp", + "mq", + "mr", + "ms", + "msd", + "mt", + "mtn", + "mtpc", + "mtr", + "mu", + "museum", + "mutual", + "mv", + "mw", + "mx", + "my", + "mz", + "na", + "nab", + "nadex", + "nagoya", + "name", + "nationwide", + "natura", + "navy", + "nba", + "nc", + "ne", + "nec", + "net", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nf", + "nfl", + "ng", + "ngo", + "nhk", + "ni", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nl", + "no", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "np", + "nr", + "nra", + "nrw", + "ntt", + "nu", + "nyc", + "nz", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "om", + "omega", + "one", + "ong", + "onion", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "org", + "organic", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "pa", + "page", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "pccw", + "pe", + "pet", + "pf", + "pfizer", + "pg", + "ph", + "pharmacy", + "phd", + "philips", + "phone", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "pk", + "pl", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pm", + "pn", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "post", + "pr", + "pramerica", + "praxi", + "press", + "prime", + "pro", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "ps", + "pt", + "pub", + "pw", + "pwc", + "py", + "qa", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "radio", + "raid", + "re", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "ro", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rs", + "rsvp", + "ru", + "rugby", + "ruhr", + "run", + "rw", + "rwe", + "ryukyu", + "sa", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sb", + "sbi", + "sbs", + "sc", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "sd", + "se", + "search", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "sg", + "sh", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "si", + "silk", + "sina", + "singles", + "site", + "sj", + "sk", + "ski", + "skin", + "sky", + "skype", + "sl", + "sling", + "sm", + "smart", + "smile", + "sn", + "sncf", + "so", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "sr", + "srl", + "srt", + "st", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "su", + "sucks", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "sv", + "swatch", + "swiftcover", + "swiss", + "sx", + "sy", + "sydney", + "symantec", + "systems", + "sz", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tc", + "tci", + "td", + "tdk", + "team", + "tech", + "technology", + "tel", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "tf", + "tg", + "th", + "thd", + "theater", + "theatre", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tj", + "tjmaxx", + "tjx", + "tk", + "tkmaxx", + "tl", + "tm", + "tmall", + "tn", + "to", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "tr", + "trade", + "trading", + "training", + "travel", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tt", + "tube", + "tui", + "tunes", + "tushu", + "tv", + "tvs", + "tw", + "tz", + "ua", + "ubank", + "ubs", + "uconnect", + "ug", + "uk", + "unicom", + "university", + "uno", + "uol", + "ups", + "us", + "uy", + "uz", + "va", + "vacations", + "vana", + "vanguard", + "vc", + "ve", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "vg", + "vi", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vn", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vu", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "wf", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "ws", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--2scrj9c", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3e0b707e", + "xn--3hcrj9c", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45br5cyl", + "xn--45brj9c", + "xn--45q11c", + "xn--4gbrim", + "xn--54b7fta0cc", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80ao21a", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--90a3ac", + "xn--90ae", + "xn--90ais", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--clchc0ea0b2g2a9gcd", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--d1alf", + "xn--e1a4c", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--fjq720a", + "xn--flw351e", + "xn--fpcrj9c3d", + "xn--fzc2c9e2c", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gecrj9c", + "xn--gk3at1e", + "xn--h2breg3eve", + "xn--h2brj9c", + "xn--h2brj9c8c", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--j1amh", + "xn--j6w193g", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kprw13d", + "xn--kpry57d", + "xn--kpu716f", + "xn--kput3i", + "xn--l1acc", + "xn--lgbbat1ad8j", + "xn--mgb2ddes", + "xn--mgb9awbf", + "xn--mgba3a3ejt", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbaam7a8h", + "xn--mgbab2bd", + "xn--mgbai9a5eva00b", + "xn--mgbai9azgqp6j", + "xn--mgbayh7gpa", + "xn--mgbb9fbpob", + "xn--mgbbh1a71e", + "xn--mgbc0a9azcg", + "xn--mgbca7dzdo", + "xn--mgberp4a5d4a87g", + "xn--mgberp4a5d4ar", + "xn--mgbgu82a", + "xn--mgbi4ecexp", + "xn--mgbpl2fh", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbt3dhd", + "xn--mgbtf8fl", + "xn--mgbtx2b", + "xn--mgbx4cd0ab", + "xn--mix082f", + "xn--mix891f", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nnx388a", + "xn--node", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--o3cw4h", + "xn--ogbpf8fl", + "xn--p1acf", + "xn--p1ai", + "xn--pbt977c", + "xn--pgbs0dh", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--qxam", + "xn--rhqv96g", + "xn--rovu88b", + "xn--rvc1e0am3e", + "xn--s9brj9c", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--wgbh1c", + "xn--wgbl6a", + "xn--xhq521b", + "xn--xkc2al3hye2a", + "xn--xkc2dl3a5ee0h", + "xn--y9a3aq", + "xn--yfro4i67o", + "xn--ygbi2ammx", + "xn--zfr164b", + "xperia", + "xxx", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "ye", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yt", + "yun", + "za", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zm", + "zone", + "zuerich", + "zw", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "nom", + "ac", + "blogspot", + "co", + "gov", + "mil", + "net", + "nom", + "org", + "sch", + "accident-investigation", + "accident-prevention", + "aerobatic", + "aeroclub", + "aerodrome", + "agents", + "air-surveillance", + "air-traffic-control", + "aircraft", + "airline", + "airport", + "airtraffic", + "ambulance", + "amusement", + "association", + "author", + "ballooning", + "broker", + "caa", + "cargo", + "catering", + "certification", + "championship", + "charter", + "civilaviation", + "club", + "conference", + "consultant", + "consulting", + "control", + "council", + "crew", + "design", + "dgca", + "educator", + "emergency", + "engine", + "engineer", + "entertainment", + "equipment", + "exchange", + "express", + "federation", + "flight", + "freight", + "fuel", + "gliding", + "government", + "groundhandling", + "group", + "hanggliding", + "homebuilt", + "insurance", + "journal", + "journalist", + "leasing", + "logistics", + "magazine", + "maintenance", + "media", + "microlight", + "modelling", + "navigation", + "parachuting", + "paragliding", + "passenger-association", + "pilot", + "press", + "production", + "recreation", + "repbody", + "res", + "research", + "rotorcraft", + "safety", + "scientist", + "services", + "show", + "skydiving", + "software", + "student", + "trader", + "trading", + "trainer", + "union", + "workinggroup", + "works", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "net", + "nom", + "org", + "com", + "net", + "nom", + "off", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "blogspot", + "co", + "ed", + "gv", + "it", + "og", + "pb", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "musica", + "net", + "org", + "tur", + "blogspot", + "e164", + "in-addr", + "ip6", + "iris", + "uri", + "urn", + "gov", + "cloudns", + "12hp", + "2ix", + "4lima", + "ac", + "biz", + "co", + "futurecms", + "futurehosting", + "futuremailing", + "gv", + "info", + "lima-city", + "or", + "ortsinfo", + "priv", + "blogspot", + "ex", + "kunden", + "act", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "info", + "net", + "nsw", + "nt", + "org", + "oz", + "qld", + "sa", + "tas", + "vic", + "wa", + "blogspot", + "act", + "nsw", + "nt", + "qld", + "sa", + "tas", + "vic", + "wa", + "qld", + "sa", + "tas", + "vic", + "wa", + "com", + "biz", + "com", + "edu", + "gov", + "info", + "int", + "mil", + "name", + "net", + "org", + "pp", + "pro", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "net", + "org", + "store", + "tv", + "ac", + "blogspot", + "transurl", + "webhosting", + "gov", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "a", + "b", + "barsy", + "blogspot", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "or", + "org", + "cloudns", + "dscloud", + "dyndns", + "for-better", + "for-more", + "for-some", + "for-the", + "mmafan", + "myftp", + "no-ip", + "selfip", + "webhop", + "asso", + "barreau", + "blogspot", + "gouv", + "com", + "edu", + "gov", + "net", + "org", + "academia", + "agro", + "arte", + "blog", + "bolivia", + "ciencia", + "com", + "cooperativa", + "democracia", + "deporte", + "ecologia", + "economia", + "edu", + "empresa", + "gob", + "indigena", + "industria", + "info", + "int", + "medicina", + "mil", + "movimiento", + "musica", + "natural", + "net", + "nombre", + "noticias", + "org", + "patria", + "plurinacional", + "politica", + "profesional", + "pueblo", + "revista", + "salud", + "tecnologia", + "tksat", + "transporte", + "tv", + "web", + "wiki", + "9guacu", + "abc", + "adm", + "adv", + "agr", + "aju", + "am", + "anani", + "aparecida", + "arq", + "art", + "ato", + "b", + "belem", + "bhz", + "bio", + "blog", + "bmd", + "boavista", + "bsb", + "campinagrande", + "campinas", + "caxias", + "cim", + "cng", + "cnt", + "com", + "contagem", + "coop", + "cri", + "cuiaba", + "curitiba", + "def", + "ecn", + "eco", + "edu", + "emp", + "eng", + "esp", + "etc", + "eti", + "far", + "feira", + "flog", + "floripa", + "fm", + "fnd", + "fortal", + "fot", + "foz", + "fst", + "g12", + "ggf", + "goiania", + "gov", + "gru", + "imb", + "ind", + "inf", + "jab", + "jampa", + "jdf", + "joinville", + "jor", + "jus", + "leg", + "lel", + "londrina", + "macapa", + "maceio", + "manaus", + "maringa", + "mat", + "med", + "mil", + "morena", + "mp", + "mus", + "natal", + "net", + "niteroi", + "nom", + "not", + "ntr", + "odo", + "org", + "osasco", + "palmas", + "poa", + "ppg", + "pro", + "psc", + "psi", + "pvh", + "qsl", + "radio", + "rec", + "recife", + "ribeirao", + "rio", + "riobranco", + "riopreto", + "salvador", + "sampa", + "santamaria", + "santoandre", + "saobernardo", + "saogonca", + "sjc", + "slg", + "slz", + "sorocaba", + "srv", + "taxi", + "teo", + "the", + "tmp", + "trd", + "tur", + "tv", + "udi", + "vet", + "vix", + "vlog", + "wiki", + "zlg", + "blogspot", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "com", + "edu", + "gov", + "net", + "org", + "we", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "com", + "gov", + "mil", + "nym", + "of", + "blogspot", + "com", + "edu", + "gov", + "net", + "nym", + "org", + "za", + "1password", + "ab", + "awdev", + "bc", + "blogspot", + "co", + "gc", + "mb", + "nb", + "nf", + "nl", + "no-ip", + "ns", + "nt", + "nu", + "on", + "pe", + "qc", + "sk", + "yk", + "cloudns", + "fantasyleague", + "ftpaccess", + "game-server", + "myphotos", + "scrapping", + "twmail", + "gov", + "blogspot", + "12hp", + "2ix", + "4lima", + "blogspot", + "gotdns", + "lima-city", + "square7", + "ac", + "asso", + "co", + "com", + "ed", + "edu", + "go", + "gouv", + "int", + "md", + "net", + "or", + "org", + "presse", + "xn--aroport-bya", + "www", + "blogspot", + "co", + "gob", + "gov", + "mil", + "nom", + "magentosite", + "sensiosite", + "statics", + "trafficplex", + "vapor", + "cloudns", + "co", + "com", + "gov", + "net", + "ac", + "ah", + "bj", + "com", + "cq", + "edu", + "fj", + "gd", + "gov", + "gs", + "gx", + "gz", + "ha", + "hb", + "he", + "hi", + "hk", + "hl", + "hn", + "jl", + "js", + "jx", + "ln", + "mil", + "mo", + "net", + "nm", + "nx", + "org", + "qh", + "sc", + "sd", + "sh", + "sn", + "sx", + "tj", + "tw", + "xj", + "xn--55qx5d", + "xn--io0a7i", + "xn--od0alg", + "xz", + "yn", + "zj", + "amazonaws", + "cn-north-1", + "compute", + "eb", + "elb", + "s3", + "cn-north-1", + "arts", + "com", + "edu", + "firm", + "gov", + "info", + "int", + "mil", + "net", + "nodum", + "nom", + "org", + "rec", + "web", + "blogspot", + "0emm", + "1kapp", + "1password", + "3utilities", + "4u", + "africa", + "alpha-myqnapcloud", + "amazonaws", + "appchizi", + "applinzi", + "appspot", + "ar", + "barsyonline", + "betainabox", + "bitballoon", + "blogdns", + "blogspot", + "blogsyte", + "bloxcms", + "bounty-full", + "bplaced", + "br", + "cechire", + "ciscofreak", + "cloudcontrolapp", + "cloudcontrolled", + "cn", + "co", + "codespot", + "damnserver", + "ddnsfree", + "ddnsgeek", + "ddnsking", + "de", + "dev-myqnapcloud", + "ditchyourip", + "dnsalias", + "dnsdojo", + "dnsiskinky", + "doesntexist", + "dontexist", + "doomdns", + "drayddns", + "dreamhosters", + "dsmynas", + "dyn-o-saur", + "dynalias", + "dyndns-at-home", + "dyndns-at-work", + "dyndns-blog", + "dyndns-free", + "dyndns-home", + "dyndns-ip", + "dyndns-mail", + "dyndns-office", + "dyndns-pics", + "dyndns-remote", + "dyndns-server", + "dyndns-web", + "dyndns-wiki", + "dyndns-work", + "dynns", + "elasticbeanstalk", + "est-a-la-maison", + "est-a-la-masion", + "est-le-patron", + "est-mon-blogueur", + "eu", + "evennode", + "familyds", + "fbsbx", + "firebaseapp", + "firewall-gateway", + "flynnhub", + "freebox-os", + "freeboxos", + "from-ak", + "from-al", + "from-ar", + "from-ca", + "from-ct", + "from-dc", + "from-de", + "from-fl", + "from-ga", + "from-hi", + "from-ia", + "from-id", + "from-il", + "from-in", + "from-ks", + "from-ky", + "from-ma", + "from-md", + "from-mi", + "from-mn", + "from-mo", + "from-ms", + "from-mt", + "from-nc", + "from-nd", + "from-ne", + "from-nh", + "from-nj", + "from-nm", + "from-nv", + "from-oh", + "from-ok", + "from-or", + "from-pa", + "from-pr", + "from-ri", + "from-sc", + "from-sd", + "from-tn", + "from-tx", + "from-ut", + "from-va", + "from-vt", + "from-wa", + "from-wi", + "from-wv", + "from-wy", + "gb", + "geekgalaxy", + "getmyip", + "giize", + "githubusercontent", + "gleeze", + "googleapis", + "googlecode", + "gotdns", + "gotpantheon", + "gr", + "health-carereform", + "herokuapp", + "herokussl", + "hk", + "hobby-site", + "homelinux", + "homesecuritymac", + "homesecuritypc", + "homeunix", + "hu", + "iamallama", + "is-a-anarchist", + "is-a-blogger", + "is-a-bookkeeper", + "is-a-bulls-fan", + "is-a-caterer", + "is-a-chef", + "is-a-conservative", + "is-a-cpa", + "is-a-cubicle-slave", + "is-a-democrat", + "is-a-designer", + "is-a-doctor", + "is-a-financialadvisor", + "is-a-geek", + "is-a-green", + "is-a-guru", + "is-a-hard-worker", + "is-a-hunter", + "is-a-landscaper", + "is-a-lawyer", + "is-a-liberal", + "is-a-libertarian", + "is-a-llama", + "is-a-musician", + "is-a-nascarfan", + "is-a-nurse", + "is-a-painter", + "is-a-personaltrainer", + "is-a-photographer", + "is-a-player", + "is-a-republican", + "is-a-rockstar", + "is-a-socialist", + "is-a-student", + "is-a-teacher", + "is-a-techie", + "is-a-therapist", + "is-an-accountant", + "is-an-actor", + "is-an-actress", + "is-an-anarchist", + "is-an-artist", + "is-an-engineer", + "is-an-entertainer", + "is-certified", + "is-gone", + "is-into-anime", + "is-into-cars", + "is-into-cartoons", + "is-into-games", + "is-leet", + "is-not-certified", + "is-slick", + "is-uberleet", + "is-with-theband", + "isa-geek", + "isa-hockeynut", + "issmarterthanyou", + "jdevcloud", + "joyent", + "jpn", + "kozow", + "kr", + "likes-pie", + "likescandy", + "logoip", + "loseyourip", + "meteorapp", + "mex", + "myactivedirectory", + "myasustor", + "mydrobo", + "myqnapcloud", + "mysecuritycamera", + "myshopblocks", + "mytuleap", + "myvnc", + "neat-url", + "net-freaks", + "netlify", + "nfshost", + "no", + "on-aptible", + "onthewifi", + "ooguy", + "operaunite", + "outsystemscloud", + "ownprovider", + "pagefrontapp", + "pagespeedmobilizer", + "pgfog", + "pixolino", + "point2this", + "prgmr", + "publishproxy", + "qa2", + "qc", + "quicksytes", + "quipelements", + "rackmaze", + "remotewd", + "rhcloud", + "ru", + "sa", + "saves-the-whales", + "scrysec", + "se", + "securitytactics", + "selfip", + "sells-for-less", + "sells-for-u", + "servebbs", + "servebeer", + "servecounterstrike", + "serveexchange", + "serveftp", + "servegame", + "servehalflife", + "servehttp", + "servehumour", + "serveirc", + "servemp3", + "servep2p", + "servepics", + "servequake", + "servesarcasm", + "simple-url", + "sinaapp", + "space-to-rent", + "stufftoread", + "teaches-yoga", + "temp-dns", + "theworkpc", + "townnews-staging", + "uk", + "unusualperson", + "us", + "uy", + "vipsinaapp", + "withgoogle", + "withyoutube", + "workisboring", + "wpdevcloud", + "writesthisblog", + "xenapponazure", + "yolasite", + "za", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "compute", + "compute-1", + "elb", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "s3", + "s3-ap-northeast-1", + "s3-ap-northeast-2", + "s3-ap-south-1", + "s3-ap-southeast-1", + "s3-ap-southeast-2", + "s3-ca-central-1", + "s3-eu-central-1", + "s3-eu-west-1", + "s3-eu-west-2", + "s3-eu-west-3", + "s3-external-1", + "s3-fips-us-gov-west-1", + "s3-sa-east-1", + "s3-us-east-2", + "s3-us-gov-west-1", + "s3-us-west-1", + "s3-us-west-2", + "s3-website-ap-northeast-1", + "s3-website-ap-southeast-1", + "s3-website-ap-southeast-2", + "s3-website-eu-west-1", + "s3-website-sa-east-1", + "s3-website-us-east-1", + "s3-website-us-west-1", + "s3-website-us-west-2", + "sa-east-1", + "us-east-1", + "us-east-2", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "alpha", + "beta", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-west-1", + "us-west-1", + "us-west-2", + "eu-1", + "eu-2", + "eu-3", + "eu-4", + "us-1", + "us-2", + "us-3", + "us-4", + "apps", + "cns", + "eu", + "xen", + "de", + "ac", + "co", + "ed", + "fi", + "go", + "or", + "sa", + "com", + "edu", + "gov", + "inf", + "net", + "org", + "blogspot", + "com", + "edu", + "net", + "org", + "ath", + "gov", + "info", + "ac", + "biz", + "com", + "ekloges", + "gov", + "ltd", + "name", + "net", + "org", + "parliament", + "press", + "pro", + "tm", + "blogspot", + "blogspot", + "co", + "e4", + "metacentrum", + "realm", + "cloud", + "custom", + "12hp", + "2ix", + "4lima", + "barsy", + "blogspot", + "bplaced", + "com", + "cosidns", + "dd-dns", + "ddnss", + "dnshome", + "dnsupdater", + "dray-dns", + "draydns", + "dyn-ip24", + "dyn-vpn", + "dynamisches-dns", + "dyndns1", + "dynvpn", + "firewall-gateway", + "fuettertdasnetz", + "git-repos", + "goip", + "home-webserver", + "internet-dns", + "isteingeek", + "istmein", + "keymachine", + "l-o-g-i-n", + "lcube-server", + "lebtimnetz", + "leitungsen", + "lima-city", + "logoip", + "mein-vigor", + "my-gateway", + "my-router", + "my-vigor", + "my-wan", + "myhome-server", + "spdns", + "square7", + "svn-repos", + "syno-ds", + "synology-diskstation", + "synology-ds", + "taifun-dns", + "traeumtgerade", + "dyn", + "dyn", + "dyndns", + "dyn", + "biz", + "blogspot", + "co", + "firm", + "reg", + "store", + "com", + "edu", + "gov", + "net", + "org", + "art", + "com", + "edu", + "gob", + "gov", + "mil", + "net", + "org", + "sld", + "web", + "art", + "asso", + "com", + "edu", + "gov", + "net", + "org", + "pol", + "com", + "edu", + "fin", + "gob", + "gov", + "info", + "k12", + "med", + "mil", + "net", + "org", + "pro", + "aip", + "com", + "edu", + "fie", + "gov", + "lib", + "med", + "org", + "pri", + "riik", + "blogspot", + "com", + "edu", + "eun", + "gov", + "mil", + "name", + "net", + "org", + "sci", + "blogspot", + "com", + "edu", + "gob", + "nom", + "org", + "blogspot", + "compute", + "biz", + "com", + "edu", + "gov", + "info", + "name", + "net", + "org", + "1password", + "barsy", + "cloudns", + "diskstation", + "mycd", + "spdns", + "transurl", + "wellbeingzone", + "party", + "user", + "ybo", + "storj", + "aland", + "blogspot", + "dy", + "iki", + "ptplus", + "aeroport", + "assedic", + "asso", + "avocat", + "avoues", + "blogspot", + "cci", + "chambagri", + "chirurgiens-dentistes", + "chirurgiens-dentistes-en-france", + "com", + "experts-comptables", + "fbx-os", + "fbxos", + "freebox-os", + "freeboxos", + "geometre-expert", + "gouv", + "greta", + "huissier-justice", + "medecin", + "nom", + "notaires", + "on-web", + "pharmacien", + "port", + "prd", + "presse", + "tm", + "veterinaire", + "nom", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "pvt", + "co", + "cya", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "org", + "com", + "edu", + "gov", + "ltd", + "mod", + "org", + "co", + "com", + "edu", + "net", + "nom", + "org", + "ac", + "com", + "edu", + "gov", + "net", + "org", + "cloud", + "asso", + "com", + "edu", + "mobi", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "net", + "nym", + "org", + "com", + "edu", + "gob", + "ind", + "mil", + "net", + "nom", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "idv", + "inc", + "ltd", + "net", + "org", + "xn--55qx5d", + "xn--ciqpn", + "xn--gmq050i", + "xn--gmqw5a", + "xn--io0a7i", + "xn--lcvr32d", + "xn--mk0axi", + "xn--mxtq1m", + "xn--od0alg", + "xn--od0aq3b", + "xn--tn0ag", + "xn--uc0atv", + "xn--uc0ay4a", + "xn--wcvs22d", + "xn--zf0avx", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "org", + "cloudaccess", + "freesite", + "opencraft", + "blogspot", + "com", + "from", + "iz", + "name", + "adult", + "art", + "asso", + "com", + "coop", + "edu", + "firm", + "gouv", + "info", + "med", + "net", + "org", + "perso", + "pol", + "pro", + "rel", + "shop", + "2000", + "agrar", + "blogspot", + "bolt", + "casino", + "city", + "co", + "erotica", + "erotika", + "film", + "forum", + "games", + "hotel", + "info", + "ingatlan", + "jogasz", + "konyvelo", + "lakas", + "media", + "news", + "org", + "priv", + "reklam", + "sex", + "shop", + "sport", + "suli", + "szex", + "tm", + "tozsde", + "utazas", + "video", + "ac", + "biz", + "co", + "desa", + "go", + "mil", + "my", + "net", + "or", + "sch", + "web", + "blogspot", + "blogspot", + "gov", + "ac", + "co", + "gov", + "idf", + "k12", + "muni", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "net", + "nom", + "org", + "ro", + "tt", + "tv", + "ltd", + "plc", + "ac", + "barsy", + "blogspot", + "cloudns", + "co", + "edu", + "firm", + "gen", + "gov", + "ind", + "mil", + "net", + "nic", + "org", + "res", + "barrel-of-knowledge", + "barrell-of-knowledge", + "cloudns", + "dvrcam", + "dynamic-dns", + "dyndns", + "for-our", + "groks-the", + "groks-this", + "here-for-more", + "ilovecollege", + "knowsitall", + "no-ip", + "nsupdate", + "selfip", + "v-info", + "webhop", + "eu", + "backplaneapp", + "boxfuse", + "browsersafetymark", + "com", + "dedyn", + "definima", + "drud", + "enonic", + "github", + "gitlab", + "hasura-app", + "hzc", + "lair", + "ngrok", + "nid", + "nodeart", + "nodum", + "pantheonsite", + "protonet", + "resindevice", + "resinstaging", + "s5y", + "sandcats", + "shiftedit", + "spacekit", + "stolos", + "thingdust", + "vaporcloud", + "wedeploy", + "customer", + "apps", + "stage", + "devices", + "dev", + "disrec", + "prod", + "testing", + "cust", + "cust", + "cust", + "cust", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "ac", + "co", + "gov", + "id", + "net", + "org", + "sch", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "blogspot", + "com", + "cupcake", + "edu", + "gov", + "int", + "net", + "org", + "abr", + "abruzzo", + "ag", + "agrigento", + "al", + "alessandria", + "alto-adige", + "altoadige", + "an", + "ancona", + "andria-barletta-trani", + "andria-trani-barletta", + "andriabarlettatrani", + "andriatranibarletta", + "ao", + "aosta", + "aosta-valley", + "aostavalley", + "aoste", + "ap", + "aq", + "aquila", + "ar", + "arezzo", + "ascoli-piceno", + "ascolipiceno", + "asti", + "at", + "av", + "avellino", + "ba", + "balsan", + "bari", + "barletta-trani-andria", + "barlettatraniandria", + "bas", + "basilicata", + "belluno", + "benevento", + "bergamo", + "bg", + "bi", + "biella", + "bl", + "blogspot", + "bn", + "bo", + "bologna", + "bolzano", + "bozen", + "br", + "brescia", + "brindisi", + "bs", + "bt", + "bz", + "ca", + "cagliari", + "cal", + "calabria", + "caltanissetta", + "cam", + "campania", + "campidano-medio", + "campidanomedio", + "campobasso", + "carbonia-iglesias", + "carboniaiglesias", + "carrara-massa", + "carraramassa", + "caserta", + "catania", + "catanzaro", + "cb", + "ce", + "cesena-forli", + "cesenaforli", + "ch", + "chieti", + "ci", + "cl", + "cn", + "co", + "como", + "cosenza", + "cr", + "cremona", + "crotone", + "cs", + "ct", + "cuneo", + "cz", + "dell-ogliastra", + "dellogliastra", + "edu", + "emilia-romagna", + "emiliaromagna", + "emr", + "en", + "enna", + "fc", + "fe", + "fermo", + "ferrara", + "fg", + "fi", + "firenze", + "florence", + "fm", + "foggia", + "forli-cesena", + "forlicesena", + "fr", + "friuli-v-giulia", + "friuli-ve-giulia", + "friuli-vegiulia", + "friuli-venezia-giulia", + "friuli-veneziagiulia", + "friuli-vgiulia", + "friuliv-giulia", + "friulive-giulia", + "friulivegiulia", + "friulivenezia-giulia", + "friuliveneziagiulia", + "friulivgiulia", + "frosinone", + "fvg", + "ge", + "genoa", + "genova", + "go", + "gorizia", + "gov", + "gr", + "grosseto", + "iglesias-carbonia", + "iglesiascarbonia", + "im", + "imperia", + "is", + "isernia", + "kr", + "la-spezia", + "laquila", + "laspezia", + "latina", + "laz", + "lazio", + "lc", + "le", + "lecce", + "lecco", + "li", + "lig", + "liguria", + "livorno", + "lo", + "lodi", + "lom", + "lombardia", + "lombardy", + "lt", + "lu", + "lucania", + "lucca", + "macerata", + "mantova", + "mar", + "marche", + "massa-carrara", + "massacarrara", + "matera", + "mb", + "mc", + "me", + "medio-campidano", + "mediocampidano", + "messina", + "mi", + "milan", + "milano", + "mn", + "mo", + "modena", + "mol", + "molise", + "monza", + "monza-brianza", + "monza-e-della-brianza", + "monzabrianza", + "monzaebrianza", + "monzaedellabrianza", + "ms", + "mt", + "na", + "naples", + "napoli", + "no", + "novara", + "nu", + "nuoro", + "og", + "ogliastra", + "olbia-tempio", + "olbiatempio", + "or", + "oristano", + "ot", + "pa", + "padova", + "padua", + "palermo", + "parma", + "pavia", + "pc", + "pd", + "pe", + "perugia", + "pesaro-urbino", + "pesarourbino", + "pescara", + "pg", + "pi", + "piacenza", + "piedmont", + "piemonte", + "pisa", + "pistoia", + "pmn", + "pn", + "po", + "pordenone", + "potenza", + "pr", + "prato", + "pt", + "pu", + "pug", + "puglia", + "pv", + "pz", + "ra", + "ragusa", + "ravenna", + "rc", + "re", + "reggio-calabria", + "reggio-emilia", + "reggiocalabria", + "reggioemilia", + "rg", + "ri", + "rieti", + "rimini", + "rm", + "rn", + "ro", + "roma", + "rome", + "rovigo", + "sa", + "salerno", + "sar", + "sardegna", + "sardinia", + "sassari", + "savona", + "si", + "sic", + "sicilia", + "sicily", + "siena", + "siracusa", + "so", + "sondrio", + "sp", + "sr", + "ss", + "suedtirol", + "sv", + "ta", + "taa", + "taranto", + "te", + "tempio-olbia", + "tempioolbia", + "teramo", + "terni", + "tn", + "to", + "torino", + "tos", + "toscana", + "tp", + "tr", + "trani-andria-barletta", + "trani-barletta-andria", + "traniandriabarletta", + "tranibarlettaandria", + "trapani", + "trentino", + "trentino-a-adige", + "trentino-aadige", + "trentino-alto-adige", + "trentino-altoadige", + "trentino-s-tirol", + "trentino-stirol", + "trentino-sud-tirol", + "trentino-sudtirol", + "trentino-sued-tirol", + "trentino-suedtirol", + "trentinoa-adige", + "trentinoaadige", + "trentinoalto-adige", + "trentinoaltoadige", + "trentinos-tirol", + "trentinostirol", + "trentinosud-tirol", + "trentinosudtirol", + "trentinosued-tirol", + "trentinosuedtirol", + "trento", + "treviso", + "trieste", + "ts", + "turin", + "tuscany", + "tv", + "ud", + "udine", + "umb", + "umbria", + "urbino-pesaro", + "urbinopesaro", + "va", + "val-d-aosta", + "val-daosta", + "vald-aosta", + "valdaosta", + "valle-aosta", + "valle-d-aosta", + "valle-daosta", + "valleaosta", + "valled-aosta", + "valledaosta", + "vallee-aoste", + "valleeaoste", + "vao", + "varese", + "vb", + "vc", + "vda", + "ve", + "ven", + "veneto", + "venezia", + "venice", + "verbania", + "vercelli", + "verona", + "vi", + "vibo-valentia", + "vibovalentia", + "vicenza", + "viterbo", + "vr", + "vs", + "vt", + "vv", + "co", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "sch", + "ac", + "ad", + "aichi", + "akita", + "aomori", + "blogspot", + "chiba", + "co", + "ed", + "ehime", + "fukui", + "fukuoka", + "fukushima", + "gifu", + "go", + "gr", + "gunma", + "hiroshima", + "hokkaido", + "hyogo", + "ibaraki", + "ishikawa", + "iwate", + "kagawa", + "kagoshima", + "kanagawa", + "kawasaki", + "kitakyushu", + "kobe", + "kochi", + "kumamoto", + "kyoto", + "lg", + "mie", + "miyagi", + "miyazaki", + "nagano", + "nagasaki", + "nagoya", + "nara", + "ne", + "niigata", + "oita", + "okayama", + "okinawa", + "or", + "osaka", + "saga", + "saitama", + "sapporo", + "sendai", + "shiga", + "shimane", + "shizuoka", + "tochigi", + "tokushima", + "tokyo", + "tottori", + "toyama", + "wakayama", + "xn--0trq7p7nn", + "xn--1ctwo", + "xn--1lqs03n", + "xn--1lqs71d", + "xn--2m4a15e", + "xn--32vp30h", + "xn--4it168d", + "xn--4it797k", + "xn--4pvxs", + "xn--5js045d", + "xn--5rtp49c", + "xn--5rtq34k", + "xn--6btw5a", + "xn--6orx2r", + "xn--7t0a264c", + "xn--8ltr62k", + "xn--8pvr4u", + "xn--c3s14m", + "xn--d5qv7z876c", + "xn--djrs72d6uy", + "xn--djty4k", + "xn--efvn9s", + "xn--ehqz56n", + "xn--elqq16h", + "xn--f6qx53a", + "xn--k7yn95e", + "xn--kbrq7o", + "xn--klt787d", + "xn--kltp7d", + "xn--kltx9a", + "xn--klty5x", + "xn--mkru45i", + "xn--nit225k", + "xn--ntso0iqx3a", + "xn--ntsq17g", + "xn--pssu33l", + "xn--qqqt11m", + "xn--rht27z", + "xn--rht3d", + "xn--rht61e", + "xn--rny31h", + "xn--tor131o", + "xn--uist22h", + "xn--uisz3g", + "xn--uuwu58a", + "xn--vgu402c", + "xn--zbx025d", + "yamagata", + "yamaguchi", + "yamanashi", + "yokohama", + "aisai", + "ama", + "anjo", + "asuke", + "chiryu", + "chita", + "fuso", + "gamagori", + "handa", + "hazu", + "hekinan", + "higashiura", + "ichinomiya", + "inazawa", + "inuyama", + "isshiki", + "iwakura", + "kanie", + "kariya", + "kasugai", + "kira", + "kiyosu", + "komaki", + "konan", + "kota", + "mihama", + "miyoshi", + "nishio", + "nisshin", + "obu", + "oguchi", + "oharu", + "okazaki", + "owariasahi", + "seto", + "shikatsu", + "shinshiro", + "shitara", + "tahara", + "takahama", + "tobishima", + "toei", + "togo", + "tokai", + "tokoname", + "toyoake", + "toyohashi", + "toyokawa", + "toyone", + "toyota", + "tsushima", + "yatomi", + "akita", + "daisen", + "fujisato", + "gojome", + "hachirogata", + "happou", + "higashinaruse", + "honjo", + "honjyo", + "ikawa", + "kamikoani", + "kamioka", + "katagami", + "kazuno", + "kitaakita", + "kosaka", + "kyowa", + "misato", + "mitane", + "moriyoshi", + "nikaho", + "noshiro", + "odate", + "oga", + "ogata", + "semboku", + "yokote", + "yurihonjo", + "aomori", + "gonohe", + "hachinohe", + "hashikami", + "hiranai", + "hirosaki", + "itayanagi", + "kuroishi", + "misawa", + "mutsu", + "nakadomari", + "noheji", + "oirase", + "owani", + "rokunohe", + "sannohe", + "shichinohe", + "shingo", + "takko", + "towada", + "tsugaru", + "tsuruta", + "abiko", + "asahi", + "chonan", + "chosei", + "choshi", + "chuo", + "funabashi", + "futtsu", + "hanamigawa", + "ichihara", + "ichikawa", + "ichinomiya", + "inzai", + "isumi", + "kamagaya", + "kamogawa", + "kashiwa", + "katori", + "katsuura", + "kimitsu", + "kisarazu", + "kozaki", + "kujukuri", + "kyonan", + "matsudo", + "midori", + "mihama", + "minamiboso", + "mobara", + "mutsuzawa", + "nagara", + "nagareyama", + "narashino", + "narita", + "noda", + "oamishirasato", + "omigawa", + "onjuku", + "otaki", + "sakae", + "sakura", + "shimofusa", + "shirako", + "shiroi", + "shisui", + "sodegaura", + "sosa", + "tako", + "tateyama", + "togane", + "tohnosho", + "tomisato", + "urayasu", + "yachimata", + "yachiyo", + "yokaichiba", + "yokoshibahikari", + "yotsukaido", + "ainan", + "honai", + "ikata", + "imabari", + "iyo", + "kamijima", + "kihoku", + "kumakogen", + "masaki", + "matsuno", + "matsuyama", + "namikata", + "niihama", + "ozu", + "saijo", + "seiyo", + "shikokuchuo", + "tobe", + "toon", + "uchiko", + "uwajima", + "yawatahama", + "echizen", + "eiheiji", + "fukui", + "ikeda", + "katsuyama", + "mihama", + "minamiechizen", + "obama", + "ohi", + "ono", + "sabae", + "sakai", + "takahama", + "tsuruga", + "wakasa", + "ashiya", + "buzen", + "chikugo", + "chikuho", + "chikujo", + "chikushino", + "chikuzen", + "chuo", + "dazaifu", + "fukuchi", + "hakata", + "higashi", + "hirokawa", + "hisayama", + "iizuka", + "inatsuki", + "kaho", + "kasuga", + "kasuya", + "kawara", + "keisen", + "koga", + "kurate", + "kurogi", + "kurume", + "minami", + "miyako", + "miyama", + "miyawaka", + "mizumaki", + "munakata", + "nakagawa", + "nakama", + "nishi", + "nogata", + "ogori", + "okagaki", + "okawa", + "oki", + "omuta", + "onga", + "onojo", + "oto", + "saigawa", + "sasaguri", + "shingu", + "shinyoshitomi", + "shonai", + "soeda", + "sue", + "tachiarai", + "tagawa", + "takata", + "toho", + "toyotsu", + "tsuiki", + "ukiha", + "umi", + "usui", + "yamada", + "yame", + "yanagawa", + "yukuhashi", + "aizubange", + "aizumisato", + "aizuwakamatsu", + "asakawa", + "bandai", + "date", + "fukushima", + "furudono", + "futaba", + "hanawa", + "higashi", + "hirata", + "hirono", + "iitate", + "inawashiro", + "ishikawa", + "iwaki", + "izumizaki", + "kagamiishi", + "kaneyama", + "kawamata", + "kitakata", + "kitashiobara", + "koori", + "koriyama", + "kunimi", + "miharu", + "mishima", + "namie", + "nango", + "nishiaizu", + "nishigo", + "okuma", + "omotego", + "ono", + "otama", + "samegawa", + "shimogo", + "shirakawa", + "showa", + "soma", + "sukagawa", + "taishin", + "tamakawa", + "tanagura", + "tenei", + "yabuki", + "yamato", + "yamatsuri", + "yanaizu", + "yugawa", + "anpachi", + "ena", + "gifu", + "ginan", + "godo", + "gujo", + "hashima", + "hichiso", + "hida", + "higashishirakawa", + "ibigawa", + "ikeda", + "kakamigahara", + "kani", + "kasahara", + "kasamatsu", + "kawaue", + "kitagata", + "mino", + "minokamo", + "mitake", + "mizunami", + "motosu", + "nakatsugawa", + "ogaki", + "sakahogi", + "seki", + "sekigahara", + "shirakawa", + "tajimi", + "takayama", + "tarui", + "toki", + "tomika", + "wanouchi", + "yamagata", + "yaotsu", + "yoro", + "annaka", + "chiyoda", + "fujioka", + "higashiagatsuma", + "isesaki", + "itakura", + "kanna", + "kanra", + "katashina", + "kawaba", + "kiryu", + "kusatsu", + "maebashi", + "meiwa", + "midori", + "minakami", + "naganohara", + "nakanojo", + "nanmoku", + "numata", + "oizumi", + "ora", + "ota", + "shibukawa", + "shimonita", + "shinto", + "showa", + "takasaki", + "takayama", + "tamamura", + "tatebayashi", + "tomioka", + "tsukiyono", + "tsumagoi", + "ueno", + "yoshioka", + "asaminami", + "daiwa", + "etajima", + "fuchu", + "fukuyama", + "hatsukaichi", + "higashihiroshima", + "hongo", + "jinsekikogen", + "kaita", + "kui", + "kumano", + "kure", + "mihara", + "miyoshi", + "naka", + "onomichi", + "osakikamijima", + "otake", + "saka", + "sera", + "seranishi", + "shinichi", + "shobara", + "takehara", + "abashiri", + "abira", + "aibetsu", + "akabira", + "akkeshi", + "asahikawa", + "ashibetsu", + "ashoro", + "assabu", + "atsuma", + "bibai", + "biei", + "bifuka", + "bihoro", + "biratori", + "chippubetsu", + "chitose", + "date", + "ebetsu", + "embetsu", + "eniwa", + "erimo", + "esan", + "esashi", + "fukagawa", + "fukushima", + "furano", + "furubira", + "haboro", + "hakodate", + "hamatonbetsu", + "hidaka", + "higashikagura", + "higashikawa", + "hiroo", + "hokuryu", + "hokuto", + "honbetsu", + "horokanai", + "horonobe", + "ikeda", + "imakane", + "ishikari", + "iwamizawa", + "iwanai", + "kamifurano", + "kamikawa", + "kamishihoro", + "kamisunagawa", + "kamoenai", + "kayabe", + "kembuchi", + "kikonai", + "kimobetsu", + "kitahiroshima", + "kitami", + "kiyosato", + "koshimizu", + "kunneppu", + "kuriyama", + "kuromatsunai", + "kushiro", + "kutchan", + "kyowa", + "mashike", + "matsumae", + "mikasa", + "minamifurano", + "mombetsu", + "moseushi", + "mukawa", + "muroran", + "naie", + "nakagawa", + "nakasatsunai", + "nakatombetsu", + "nanae", + "nanporo", + "nayoro", + "nemuro", + "niikappu", + "niki", + "nishiokoppe", + "noboribetsu", + "numata", + "obihiro", + "obira", + "oketo", + "okoppe", + "otaru", + "otobe", + "otofuke", + "otoineppu", + "oumu", + "ozora", + "pippu", + "rankoshi", + "rebun", + "rikubetsu", + "rishiri", + "rishirifuji", + "saroma", + "sarufutsu", + "shakotan", + "shari", + "shibecha", + "shibetsu", + "shikabe", + "shikaoi", + "shimamaki", + "shimizu", + "shimokawa", + "shinshinotsu", + "shintoku", + "shiranuka", + "shiraoi", + "shiriuchi", + "sobetsu", + "sunagawa", + "taiki", + "takasu", + "takikawa", + "takinoue", + "teshikaga", + "tobetsu", + "tohma", + "tomakomai", + "tomari", + "toya", + "toyako", + "toyotomi", + "toyoura", + "tsubetsu", + "tsukigata", + "urakawa", + "urausu", + "uryu", + "utashinai", + "wakkanai", + "wassamu", + "yakumo", + "yoichi", + "aioi", + "akashi", + "ako", + "amagasaki", + "aogaki", + "asago", + "ashiya", + "awaji", + "fukusaki", + "goshiki", + "harima", + "himeji", + "ichikawa", + "inagawa", + "itami", + "kakogawa", + "kamigori", + "kamikawa", + "kasai", + "kasuga", + "kawanishi", + "miki", + "minamiawaji", + "nishinomiya", + "nishiwaki", + "ono", + "sanda", + "sannan", + "sasayama", + "sayo", + "shingu", + "shinonsen", + "shiso", + "sumoto", + "taishi", + "taka", + "takarazuka", + "takasago", + "takino", + "tamba", + "tatsuno", + "toyooka", + "yabu", + "yashiro", + "yoka", + "yokawa", + "ami", + "asahi", + "bando", + "chikusei", + "daigo", + "fujishiro", + "hitachi", + "hitachinaka", + "hitachiomiya", + "hitachiota", + "ibaraki", + "ina", + "inashiki", + "itako", + "iwama", + "joso", + "kamisu", + "kasama", + "kashima", + "kasumigaura", + "koga", + "miho", + "mito", + "moriya", + "naka", + "namegata", + "oarai", + "ogawa", + "omitama", + "ryugasaki", + "sakai", + "sakuragawa", + "shimodate", + "shimotsuma", + "shirosato", + "sowa", + "suifu", + "takahagi", + "tamatsukuri", + "tokai", + "tomobe", + "tone", + "toride", + "tsuchiura", + "tsukuba", + "uchihara", + "ushiku", + "yachiyo", + "yamagata", + "yawara", + "yuki", + "anamizu", + "hakui", + "hakusan", + "kaga", + "kahoku", + "kanazawa", + "kawakita", + "komatsu", + "nakanoto", + "nanao", + "nomi", + "nonoichi", + "noto", + "shika", + "suzu", + "tsubata", + "tsurugi", + "uchinada", + "wajima", + "fudai", + "fujisawa", + "hanamaki", + "hiraizumi", + "hirono", + "ichinohe", + "ichinoseki", + "iwaizumi", + "iwate", + "joboji", + "kamaishi", + "kanegasaki", + "karumai", + "kawai", + "kitakami", + "kuji", + "kunohe", + "kuzumaki", + "miyako", + "mizusawa", + "morioka", + "ninohe", + "noda", + "ofunato", + "oshu", + "otsuchi", + "rikuzentakata", + "shiwa", + "shizukuishi", + "sumita", + "tanohata", + "tono", + "yahaba", + "yamada", + "ayagawa", + "higashikagawa", + "kanonji", + "kotohira", + "manno", + "marugame", + "mitoyo", + "naoshima", + "sanuki", + "tadotsu", + "takamatsu", + "tonosho", + "uchinomi", + "utazu", + "zentsuji", + "akune", + "amami", + "hioki", + "isa", + "isen", + "izumi", + "kagoshima", + "kanoya", + "kawanabe", + "kinko", + "kouyama", + "makurazaki", + "matsumoto", + "minamitane", + "nakatane", + "nishinoomote", + "satsumasendai", + "soo", + "tarumizu", + "yusui", + "aikawa", + "atsugi", + "ayase", + "chigasaki", + "ebina", + "fujisawa", + "hadano", + "hakone", + "hiratsuka", + "isehara", + "kaisei", + "kamakura", + "kiyokawa", + "matsuda", + "minamiashigara", + "miura", + "nakai", + "ninomiya", + "odawara", + "oi", + "oiso", + "sagamihara", + "samukawa", + "tsukui", + "yamakita", + "yamato", + "yokosuka", + "yugawara", + "zama", + "zushi", + "city", + "city", + "city", + "aki", + "geisei", + "hidaka", + "higashitsuno", + "ino", + "kagami", + "kami", + "kitagawa", + "kochi", + "mihara", + "motoyama", + "muroto", + "nahari", + "nakamura", + "nankoku", + "nishitosa", + "niyodogawa", + "ochi", + "okawa", + "otoyo", + "otsuki", + "sakawa", + "sukumo", + "susaki", + "tosa", + "tosashimizu", + "toyo", + "tsuno", + "umaji", + "yasuda", + "yusuhara", + "amakusa", + "arao", + "aso", + "choyo", + "gyokuto", + "kamiamakusa", + "kikuchi", + "kumamoto", + "mashiki", + "mifune", + "minamata", + "minamioguni", + "nagasu", + "nishihara", + "oguni", + "ozu", + "sumoto", + "takamori", + "uki", + "uto", + "yamaga", + "yamato", + "yatsushiro", + "ayabe", + "fukuchiyama", + "higashiyama", + "ide", + "ine", + "joyo", + "kameoka", + "kamo", + "kita", + "kizu", + "kumiyama", + "kyotamba", + "kyotanabe", + "kyotango", + "maizuru", + "minami", + "minamiyamashiro", + "miyazu", + "muko", + "nagaokakyo", + "nakagyo", + "nantan", + "oyamazaki", + "sakyo", + "seika", + "tanabe", + "uji", + "ujitawara", + "wazuka", + "yamashina", + "yawata", + "asahi", + "inabe", + "ise", + "kameyama", + "kawagoe", + "kiho", + "kisosaki", + "kiwa", + "komono", + "kumano", + "kuwana", + "matsusaka", + "meiwa", + "mihama", + "minamiise", + "misugi", + "miyama", + "nabari", + "shima", + "suzuka", + "tado", + "taiki", + "taki", + "tamaki", + "toba", + "tsu", + "udono", + "ureshino", + "watarai", + "yokkaichi", + "furukawa", + "higashimatsushima", + "ishinomaki", + "iwanuma", + "kakuda", + "kami", + "kawasaki", + "marumori", + "matsushima", + "minamisanriku", + "misato", + "murata", + "natori", + "ogawara", + "ohira", + "onagawa", + "osaki", + "rifu", + "semine", + "shibata", + "shichikashuku", + "shikama", + "shiogama", + "shiroishi", + "tagajo", + "taiwa", + "tome", + "tomiya", + "wakuya", + "watari", + "yamamoto", + "zao", + "aya", + "ebino", + "gokase", + "hyuga", + "kadogawa", + "kawaminami", + "kijo", + "kitagawa", + "kitakata", + "kitaura", + "kobayashi", + "kunitomi", + "kushima", + "mimata", + "miyakonojo", + "miyazaki", + "morotsuka", + "nichinan", + "nishimera", + "nobeoka", + "saito", + "shiiba", + "shintomi", + "takaharu", + "takanabe", + "takazaki", + "tsuno", + "achi", + "agematsu", + "anan", + "aoki", + "asahi", + "azumino", + "chikuhoku", + "chikuma", + "chino", + "fujimi", + "hakuba", + "hara", + "hiraya", + "iida", + "iijima", + "iiyama", + "iizuna", + "ikeda", + "ikusaka", + "ina", + "karuizawa", + "kawakami", + "kiso", + "kisofukushima", + "kitaaiki", + "komagane", + "komoro", + "matsukawa", + "matsumoto", + "miasa", + "minamiaiki", + "minamimaki", + "minamiminowa", + "minowa", + "miyada", + "miyota", + "mochizuki", + "nagano", + "nagawa", + "nagiso", + "nakagawa", + "nakano", + "nozawaonsen", + "obuse", + "ogawa", + "okaya", + "omachi", + "omi", + "ookuwa", + "ooshika", + "otaki", + "otari", + "sakae", + "sakaki", + "saku", + "sakuho", + "shimosuwa", + "shinanomachi", + "shiojiri", + "suwa", + "suzaka", + "takagi", + "takamori", + "takayama", + "tateshina", + "tatsuno", + "togakushi", + "togura", + "tomi", + "ueda", + "wada", + "yamagata", + "yamanouchi", + "yasaka", + "yasuoka", + "chijiwa", + "futsu", + "goto", + "hasami", + "hirado", + "iki", + "isahaya", + "kawatana", + "kuchinotsu", + "matsuura", + "nagasaki", + "obama", + "omura", + "oseto", + "saikai", + "sasebo", + "seihi", + "shimabara", + "shinkamigoto", + "togitsu", + "tsushima", + "unzen", + "city", + "ando", + "gose", + "heguri", + "higashiyoshino", + "ikaruga", + "ikoma", + "kamikitayama", + "kanmaki", + "kashiba", + "kashihara", + "katsuragi", + "kawai", + "kawakami", + "kawanishi", + "koryo", + "kurotaki", + "mitsue", + "miyake", + "nara", + "nosegawa", + "oji", + "ouda", + "oyodo", + "sakurai", + "sango", + "shimoichi", + "shimokitayama", + "shinjo", + "soni", + "takatori", + "tawaramoto", + "tenkawa", + "tenri", + "uda", + "yamatokoriyama", + "yamatotakada", + "yamazoe", + "yoshino", + "aga", + "agano", + "gosen", + "itoigawa", + "izumozaki", + "joetsu", + "kamo", + "kariwa", + "kashiwazaki", + "minamiuonuma", + "mitsuke", + "muika", + "murakami", + "myoko", + "nagaoka", + "niigata", + "ojiya", + "omi", + "sado", + "sanjo", + "seiro", + "seirou", + "sekikawa", + "shibata", + "tagami", + "tainai", + "tochio", + "tokamachi", + "tsubame", + "tsunan", + "uonuma", + "yahiko", + "yoita", + "yuzawa", + "beppu", + "bungoono", + "bungotakada", + "hasama", + "hiji", + "himeshima", + "hita", + "kamitsue", + "kokonoe", + "kuju", + "kunisaki", + "kusu", + "oita", + "saiki", + "taketa", + "tsukumi", + "usa", + "usuki", + "yufu", + "akaiwa", + "asakuchi", + "bizen", + "hayashima", + "ibara", + "kagamino", + "kasaoka", + "kibichuo", + "kumenan", + "kurashiki", + "maniwa", + "misaki", + "nagi", + "niimi", + "nishiawakura", + "okayama", + "satosho", + "setouchi", + "shinjo", + "shoo", + "soja", + "takahashi", + "tamano", + "tsuyama", + "wake", + "yakage", + "aguni", + "ginowan", + "ginoza", + "gushikami", + "haebaru", + "higashi", + "hirara", + "iheya", + "ishigaki", + "ishikawa", + "itoman", + "izena", + "kadena", + "kin", + "kitadaito", + "kitanakagusuku", + "kumejima", + "kunigami", + "minamidaito", + "motobu", + "nago", + "naha", + "nakagusuku", + "nakijin", + "nanjo", + "nishihara", + "ogimi", + "okinawa", + "onna", + "shimoji", + "taketomi", + "tarama", + "tokashiki", + "tomigusuku", + "tonaki", + "urasoe", + "uruma", + "yaese", + "yomitan", + "yonabaru", + "yonaguni", + "zamami", + "abeno", + "chihayaakasaka", + "chuo", + "daito", + "fujiidera", + "habikino", + "hannan", + "higashiosaka", + "higashisumiyoshi", + "higashiyodogawa", + "hirakata", + "ibaraki", + "ikeda", + "izumi", + "izumiotsu", + "izumisano", + "kadoma", + "kaizuka", + "kanan", + "kashiwara", + "katano", + "kawachinagano", + "kishiwada", + "kita", + "kumatori", + "matsubara", + "minato", + "minoh", + "misaki", + "moriguchi", + "neyagawa", + "nishi", + "nose", + "osakasayama", + "sakai", + "sayama", + "sennan", + "settsu", + "shijonawate", + "shimamoto", + "suita", + "tadaoka", + "taishi", + "tajiri", + "takaishi", + "takatsuki", + "tondabayashi", + "toyonaka", + "toyono", + "yao", + "ariake", + "arita", + "fukudomi", + "genkai", + "hamatama", + "hizen", + "imari", + "kamimine", + "kanzaki", + "karatsu", + "kashima", + "kitagata", + "kitahata", + "kiyama", + "kouhoku", + "kyuragi", + "nishiarita", + "ogi", + "omachi", + "ouchi", + "saga", + "shiroishi", + "taku", + "tara", + "tosu", + "yoshinogari", + "arakawa", + "asaka", + "chichibu", + "fujimi", + "fujimino", + "fukaya", + "hanno", + "hanyu", + "hasuda", + "hatogaya", + "hatoyama", + "hidaka", + "higashichichibu", + "higashimatsuyama", + "honjo", + "ina", + "iruma", + "iwatsuki", + "kamiizumi", + "kamikawa", + "kamisato", + "kasukabe", + "kawagoe", + "kawaguchi", + "kawajima", + "kazo", + "kitamoto", + "koshigaya", + "kounosu", + "kuki", + "kumagaya", + "matsubushi", + "minano", + "misato", + "miyashiro", + "miyoshi", + "moroyama", + "nagatoro", + "namegawa", + "niiza", + "ogano", + "ogawa", + "ogose", + "okegawa", + "omiya", + "otaki", + "ranzan", + "ryokami", + "saitama", + "sakado", + "satte", + "sayama", + "shiki", + "shiraoka", + "soka", + "sugito", + "toda", + "tokigawa", + "tokorozawa", + "tsurugashima", + "urawa", + "warabi", + "yashio", + "yokoze", + "yono", + "yorii", + "yoshida", + "yoshikawa", + "yoshimi", + "city", + "city", + "aisho", + "gamo", + "higashiomi", + "hikone", + "koka", + "konan", + "kosei", + "koto", + "kusatsu", + "maibara", + "moriyama", + "nagahama", + "nishiazai", + "notogawa", + "omihachiman", + "otsu", + "ritto", + "ryuoh", + "takashima", + "takatsuki", + "torahime", + "toyosato", + "yasu", + "akagi", + "ama", + "gotsu", + "hamada", + "higashiizumo", + "hikawa", + "hikimi", + "izumo", + "kakinoki", + "masuda", + "matsue", + "misato", + "nishinoshima", + "ohda", + "okinoshima", + "okuizumo", + "shimane", + "tamayu", + "tsuwano", + "unnan", + "yakumo", + "yasugi", + "yatsuka", + "arai", + "atami", + "fuji", + "fujieda", + "fujikawa", + "fujinomiya", + "fukuroi", + "gotemba", + "haibara", + "hamamatsu", + "higashiizu", + "ito", + "iwata", + "izu", + "izunokuni", + "kakegawa", + "kannami", + "kawanehon", + "kawazu", + "kikugawa", + "kosai", + "makinohara", + "matsuzaki", + "minamiizu", + "mishima", + "morimachi", + "nishiizu", + "numazu", + "omaezaki", + "shimada", + "shimizu", + "shimoda", + "shizuoka", + "susono", + "yaizu", + "yoshida", + "ashikaga", + "bato", + "haga", + "ichikai", + "iwafune", + "kaminokawa", + "kanuma", + "karasuyama", + "kuroiso", + "mashiko", + "mibu", + "moka", + "motegi", + "nasu", + "nasushiobara", + "nikko", + "nishikata", + "nogi", + "ohira", + "ohtawara", + "oyama", + "sakura", + "sano", + "shimotsuke", + "shioya", + "takanezawa", + "tochigi", + "tsuga", + "ujiie", + "utsunomiya", + "yaita", + "aizumi", + "anan", + "ichiba", + "itano", + "kainan", + "komatsushima", + "matsushige", + "mima", + "minami", + "miyoshi", + "mugi", + "nakagawa", + "naruto", + "sanagochi", + "shishikui", + "tokushima", + "wajiki", + "adachi", + "akiruno", + "akishima", + "aogashima", + "arakawa", + "bunkyo", + "chiyoda", + "chofu", + "chuo", + "edogawa", + "fuchu", + "fussa", + "hachijo", + "hachioji", + "hamura", + "higashikurume", + "higashimurayama", + "higashiyamato", + "hino", + "hinode", + "hinohara", + "inagi", + "itabashi", + "katsushika", + "kita", + "kiyose", + "kodaira", + "koganei", + "kokubunji", + "komae", + "koto", + "kouzushima", + "kunitachi", + "machida", + "meguro", + "minato", + "mitaka", + "mizuho", + "musashimurayama", + "musashino", + "nakano", + "nerima", + "ogasawara", + "okutama", + "ome", + "oshima", + "ota", + "setagaya", + "shibuya", + "shinagawa", + "shinjuku", + "suginami", + "sumida", + "tachikawa", + "taito", + "tama", + "toshima", + "chizu", + "hino", + "kawahara", + "koge", + "kotoura", + "misasa", + "nanbu", + "nichinan", + "sakaiminato", + "tottori", + "wakasa", + "yazu", + "yonago", + "asahi", + "fuchu", + "fukumitsu", + "funahashi", + "himi", + "imizu", + "inami", + "johana", + "kamiichi", + "kurobe", + "nakaniikawa", + "namerikawa", + "nanto", + "nyuzen", + "oyabe", + "taira", + "takaoka", + "tateyama", + "toga", + "tonami", + "toyama", + "unazuki", + "uozu", + "yamada", + "arida", + "aridagawa", + "gobo", + "hashimoto", + "hidaka", + "hirogawa", + "inami", + "iwade", + "kainan", + "kamitonda", + "katsuragi", + "kimino", + "kinokawa", + "kitayama", + "koya", + "koza", + "kozagawa", + "kudoyama", + "kushimoto", + "mihama", + "misato", + "nachikatsuura", + "shingu", + "shirahama", + "taiji", + "tanabe", + "wakayama", + "yuasa", + "yura", + "asahi", + "funagata", + "higashine", + "iide", + "kahoku", + "kaminoyama", + "kaneyama", + "kawanishi", + "mamurogawa", + "mikawa", + "murayama", + "nagai", + "nakayama", + "nanyo", + "nishikawa", + "obanazawa", + "oe", + "oguni", + "ohkura", + "oishida", + "sagae", + "sakata", + "sakegawa", + "shinjo", + "shirataka", + "shonai", + "takahata", + "tendo", + "tozawa", + "tsuruoka", + "yamagata", + "yamanobe", + "yonezawa", + "yuza", + "abu", + "hagi", + "hikari", + "hofu", + "iwakuni", + "kudamatsu", + "mitou", + "nagato", + "oshima", + "shimonoseki", + "shunan", + "tabuse", + "tokuyama", + "toyota", + "ube", + "yuu", + "chuo", + "doshi", + "fuefuki", + "fujikawa", + "fujikawaguchiko", + "fujiyoshida", + "hayakawa", + "hokuto", + "ichikawamisato", + "kai", + "kofu", + "koshu", + "kosuge", + "minami-alps", + "minobu", + "nakamichi", + "nanbu", + "narusawa", + "nirasaki", + "nishikatsura", + "oshino", + "otsuki", + "showa", + "tabayama", + "tsuru", + "uenohara", + "yamanakako", + "yamanashi", + "city", + "ac", + "co", + "go", + "info", + "me", + "mobi", + "ne", + "or", + "sc", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "ass", + "asso", + "com", + "coop", + "edu", + "gouv", + "gov", + "medecin", + "mil", + "nom", + "notaires", + "org", + "pharmaciens", + "prd", + "presse", + "tm", + "veterinaire", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "org", + "rep", + "tra", + "ac", + "blogspot", + "busan", + "chungbuk", + "chungnam", + "co", + "daegu", + "daejeon", + "es", + "gangwon", + "go", + "gwangju", + "gyeongbuk", + "gyeonggi", + "gyeongnam", + "hs", + "incheon", + "jeju", + "jeonbuk", + "jeonnam", + "kg", + "mil", + "ms", + "ne", + "or", + "pe", + "re", + "sc", + "seoul", + "ulsan", + "co", + "edu", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "nym", + "org", + "bnr", + "c", + "com", + "edu", + "gov", + "info", + "int", + "net", + "nym", + "org", + "per", + "static", + "dev", + "sites", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "oy", + "blogspot", + "nom", + "nym", + "cyon", + "mypep", + "ac", + "assn", + "com", + "edu", + "gov", + "grp", + "hotel", + "int", + "ltd", + "net", + "ngo", + "org", + "sch", + "soc", + "web", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "blogspot", + "gov", + "nym", + "blogspot", + "nym", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "mil", + "net", + "org", + "com", + "edu", + "gov", + "id", + "med", + "net", + "org", + "plc", + "sch", + "ac", + "co", + "gov", + "net", + "org", + "press", + "router", + "asso", + "tm", + "blogspot", + "ac", + "brasilia", + "c66", + "co", + "daplie", + "ddns", + "diskstation", + "dnsfor", + "dscloud", + "edu", + "filegear", + "gov", + "hopto", + "i234", + "its", + "loginto", + "myds", + "net", + "noip", + "nym", + "org", + "priv", + "synology", + "webhop", + "wedeploy", + "yombo", + "localhost", + "co", + "com", + "edu", + "gov", + "mil", + "nom", + "org", + "prd", + "tm", + "blogspot", + "com", + "edu", + "gov", + "inf", + "name", + "net", + "nom", + "org", + "com", + "edu", + "gouv", + "gov", + "net", + "org", + "presse", + "edu", + "gov", + "nyc", + "org", + "com", + "edu", + "gov", + "net", + "org", + "dscloud", + "blogspot", + "gov", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "gov", + "net", + "or", + "org", + "academy", + "agriculture", + "air", + "airguard", + "alabama", + "alaska", + "amber", + "ambulance", + "american", + "americana", + "americanantiques", + "americanart", + "amsterdam", + "and", + "annefrank", + "anthro", + "anthropology", + "antiques", + "aquarium", + "arboretum", + "archaeological", + "archaeology", + "architecture", + "art", + "artanddesign", + "artcenter", + "artdeco", + "arteducation", + "artgallery", + "arts", + "artsandcrafts", + "asmatart", + "assassination", + "assisi", + "association", + "astronomy", + "atlanta", + "austin", + "australia", + "automotive", + "aviation", + "axis", + "badajoz", + "baghdad", + "bahn", + "bale", + "baltimore", + "barcelona", + "baseball", + "basel", + "baths", + "bauern", + "beauxarts", + "beeldengeluid", + "bellevue", + "bergbau", + "berkeley", + "berlin", + "bern", + "bible", + "bilbao", + "bill", + "birdart", + "birthplace", + "bonn", + "boston", + "botanical", + "botanicalgarden", + "botanicgarden", + "botany", + "brandywinevalley", + "brasil", + "bristol", + "british", + "britishcolumbia", + "broadcast", + "brunel", + "brussel", + "brussels", + "bruxelles", + "building", + "burghof", + "bus", + "bushey", + "cadaques", + "california", + "cambridge", + "can", + "canada", + "capebreton", + "carrier", + "cartoonart", + "casadelamoneda", + "castle", + "castres", + "celtic", + "center", + "chattanooga", + "cheltenham", + "chesapeakebay", + "chicago", + "children", + "childrens", + "childrensgarden", + "chiropractic", + "chocolate", + "christiansburg", + "cincinnati", + "cinema", + "circus", + "civilisation", + "civilization", + "civilwar", + "clinton", + "clock", + "coal", + "coastaldefence", + "cody", + "coldwar", + "collection", + "colonialwilliamsburg", + "coloradoplateau", + "columbia", + "columbus", + "communication", + "communications", + "community", + "computer", + "computerhistory", + "contemporary", + "contemporaryart", + "convent", + "copenhagen", + "corporation", + "corvette", + "costume", + "countryestate", + "county", + "crafts", + "cranbrook", + "creation", + "cultural", + "culturalcenter", + "culture", + "cyber", + "cymru", + "dali", + "dallas", + "database", + "ddr", + "decorativearts", + "delaware", + "delmenhorst", + "denmark", + "depot", + "design", + "detroit", + "dinosaur", + "discovery", + "dolls", + "donostia", + "durham", + "eastafrica", + "eastcoast", + "education", + "educational", + "egyptian", + "eisenbahn", + "elburg", + "elvendrell", + "embroidery", + "encyclopedic", + "england", + "entomology", + "environment", + "environmentalconservation", + "epilepsy", + "essex", + "estate", + "ethnology", + "exeter", + "exhibition", + "family", + "farm", + "farmequipment", + "farmers", + "farmstead", + "field", + "figueres", + "filatelia", + "film", + "fineart", + "finearts", + "finland", + "flanders", + "florida", + "force", + "fortmissoula", + "fortworth", + "foundation", + "francaise", + "frankfurt", + "franziskaner", + "freemasonry", + "freiburg", + "fribourg", + "frog", + "fundacio", + "furniture", + "gallery", + "garden", + "gateway", + "geelvinck", + "gemological", + "geology", + "georgia", + "giessen", + "glas", + "glass", + "gorge", + "grandrapids", + "graz", + "guernsey", + "halloffame", + "hamburg", + "handson", + "harvestcelebration", + "hawaii", + "health", + "heimatunduhren", + "hellas", + "helsinki", + "hembygdsforbund", + "heritage", + "histoire", + "historical", + "historicalsociety", + "historichouses", + "historisch", + "historisches", + "history", + "historyofscience", + "horology", + "house", + "humanities", + "illustration", + "imageandsound", + "indian", + "indiana", + "indianapolis", + "indianmarket", + "intelligence", + "interactive", + "iraq", + "iron", + "isleofman", + "jamison", + "jefferson", + "jerusalem", + "jewelry", + "jewish", + "jewishart", + "jfk", + "journalism", + "judaica", + "judygarland", + "juedisches", + "juif", + "karate", + "karikatur", + "kids", + "koebenhavn", + "koeln", + "kunst", + "kunstsammlung", + "kunstunddesign", + "labor", + "labour", + "lajolla", + "lancashire", + "landes", + "lans", + "larsson", + "lewismiller", + "lincoln", + "linz", + "living", + "livinghistory", + "localhistory", + "london", + "losangeles", + "louvre", + "loyalist", + "lucerne", + "luxembourg", + "luzern", + "mad", + "madrid", + "mallorca", + "manchester", + "mansion", + "mansions", + "manx", + "marburg", + "maritime", + "maritimo", + "maryland", + "marylhurst", + "media", + "medical", + "medizinhistorisches", + "meeres", + "memorial", + "mesaverde", + "michigan", + "midatlantic", + "military", + "mill", + "miners", + "mining", + "minnesota", + "missile", + "missoula", + "modern", + "moma", + "money", + "monmouth", + "monticello", + "montreal", + "moscow", + "motorcycle", + "muenchen", + "muenster", + "mulhouse", + "muncie", + "museet", + "museumcenter", + "museumvereniging", + "music", + "national", + "nationalfirearms", + "nationalheritage", + "nativeamerican", + "naturalhistory", + "naturalhistorymuseum", + "naturalsciences", + "nature", + "naturhistorisches", + "natuurwetenschappen", + "naumburg", + "naval", + "nebraska", + "neues", + "newhampshire", + "newjersey", + "newmexico", + "newport", + "newspaper", + "newyork", + "niepce", + "norfolk", + "north", + "nrw", + "nuernberg", + "nuremberg", + "nyc", + "nyny", + "oceanographic", + "oceanographique", + "omaha", + "online", + "ontario", + "openair", + "oregon", + "oregontrail", + "otago", + "oxford", + "pacific", + "paderborn", + "palace", + "paleo", + "palmsprings", + "panama", + "paris", + "pasadena", + "pharmacy", + "philadelphia", + "philadelphiaarea", + "philately", + "phoenix", + "photography", + "pilots", + "pittsburgh", + "planetarium", + "plantation", + "plants", + "plaza", + "portal", + "portland", + "portlligat", + "posts-and-telecommunications", + "preservation", + "presidio", + "press", + "project", + "public", + "pubol", + "quebec", + "railroad", + "railway", + "research", + "resistance", + "riodejaneiro", + "rochester", + "rockart", + "roma", + "russia", + "saintlouis", + "salem", + "salvadordali", + "salzburg", + "sandiego", + "sanfrancisco", + "santabarbara", + "santacruz", + "santafe", + "saskatchewan", + "satx", + "savannahga", + "schlesisches", + "schoenbrunn", + "schokoladen", + "school", + "schweiz", + "science", + "science-fiction", + "scienceandhistory", + "scienceandindustry", + "sciencecenter", + "sciencecenters", + "sciencehistory", + "sciences", + "sciencesnaturelles", + "scotland", + "seaport", + "settlement", + "settlers", + "shell", + "sherbrooke", + "sibenik", + "silk", + "ski", + "skole", + "society", + "sologne", + "soundandvision", + "southcarolina", + "southwest", + "space", + "spy", + "square", + "stadt", + "stalbans", + "starnberg", + "state", + "stateofdelaware", + "station", + "steam", + "steiermark", + "stjohn", + "stockholm", + "stpetersburg", + "stuttgart", + "suisse", + "surgeonshall", + "surrey", + "svizzera", + "sweden", + "sydney", + "tank", + "tcm", + "technology", + "telekommunikation", + "television", + "texas", + "textile", + "theater", + "time", + "timekeeping", + "topology", + "torino", + "touch", + "town", + "transport", + "tree", + "trolley", + "trust", + "trustee", + "uhren", + "ulm", + "undersea", + "university", + "usa", + "usantiques", + "usarts", + "uscountryestate", + "usculture", + "usdecorativearts", + "usgarden", + "ushistory", + "ushuaia", + "uslivinghistory", + "utah", + "uvic", + "valley", + "vantaa", + "versailles", + "viking", + "village", + "virginia", + "virtual", + "virtuel", + "vlaanderen", + "volkenkunde", + "wales", + "wallonie", + "war", + "washingtondc", + "watch-and-clock", + "watchandclock", + "western", + "westfalen", + "whaling", + "wildlife", + "williamsburg", + "windmill", + "workshop", + "xn--9dbhblg6di", + "xn--comunicaes-v6a2o", + "xn--correios-e-telecomunicaes-ghc29a", + "xn--h1aegh", + "xn--lns-qla", + "york", + "yorkshire", + "yosemite", + "youth", + "zoological", + "zoology", + "aero", + "biz", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "mil", + "museum", + "name", + "net", + "org", + "pro", + "ac", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "int", + "museum", + "net", + "org", + "blogspot", + "com", + "edu", + "gob", + "net", + "nym", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "ac", + "adv", + "co", + "edu", + "gov", + "mil", + "net", + "org", + "ca", + "cc", + "co", + "com", + "dr", + "in", + "info", + "mobi", + "mx", + "name", + "or", + "org", + "pro", + "school", + "tv", + "us", + "ws", + "her", + "his", + "forgot", + "forgot", + "asso", + "nom", + "alwaysdata", + "at-band-camp", + "azure-mobile", + "azurewebsites", + "barsy", + "blogdns", + "boomla", + "bounceme", + "bplaced", + "broke-it", + "buyshouses", + "casacam", + "cdn77", + "cdn77-ssl", + "channelsdvr", + "cloudaccess", + "cloudapp", + "cloudfront", + "cloudfunctions", + "cryptonomic", + "ddns", + "debian", + "definima", + "dnsalias", + "dnsdojo", + "does-it", + "dontexist", + "dsmynas", + "dynalias", + "dynathome", + "dynu", + "dynv6", + "eating-organic", + "endofinternet", + "familyds", + "fastly", + "fastlylb", + "feste-ip", + "firewall-gateway", + "flynnhosting", + "from-az", + "from-co", + "from-la", + "from-ny", + "gb", + "gets-it", + "ham-radio-op", + "homeftp", + "homeip", + "homelinux", + "homeunix", + "hu", + "in", + "in-the-band", + "ipifony", + "is-a-chef", + "is-a-geek", + "isa-geek", + "jp", + "kicks-ass", + "knx-server", + "moonscale", + "mydissent", + "myeffect", + "myfritz", + "mymediapc", + "mypsx", + "mysecuritycamera", + "nhlfan", + "no-ip", + "office-on-the", + "pgafan", + "podzone", + "privatizehealthinsurance", + "rackmaze", + "redirectme", + "ru", + "scrapper-site", + "se", + "selfip", + "sells-it", + "servebbs", + "serveblog", + "serveftp", + "serveminecraft", + "square7", + "static-access", + "sytes", + "t3l3p0rt", + "thruhere", + "twmail", + "uk", + "webhop", + "za", + "r", + "freetls", + "map", + "prod", + "ssl", + "a", + "global", + "a", + "b", + "global", + "map", + "alces", + "arts", + "com", + "firm", + "info", + "net", + "other", + "per", + "rec", + "store", + "web", + "com", + "edu", + "gov", + "i", + "mil", + "mobi", + "name", + "net", + "org", + "sch", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gob", + "in", + "info", + "int", + "mil", + "net", + "nom", + "org", + "web", + "blogspot", + "bv", + "cistron", + "co", + "demon", + "hosting-cluster", + "transurl", + "virtueeldomein", + "aa", + "aarborte", + "aejrie", + "afjord", + "agdenes", + "ah", + "akershus", + "aknoluokta", + "akrehamn", + "al", + "alaheadju", + "alesund", + "algard", + "alstahaug", + "alta", + "alvdal", + "amli", + "amot", + "andasuolo", + "andebu", + "andoy", + "ardal", + "aremark", + "arendal", + "arna", + "aseral", + "asker", + "askim", + "askoy", + "askvoll", + "asnes", + "audnedaln", + "aukra", + "aure", + "aurland", + "aurskog-holand", + "austevoll", + "austrheim", + "averoy", + "badaddja", + "bahcavuotna", + "bahccavuotna", + "baidar", + "bajddar", + "balat", + "balestrand", + "ballangen", + "balsfjord", + "bamble", + "bardu", + "barum", + "batsfjord", + "bearalvahki", + "beardu", + "beiarn", + "berg", + "bergen", + "berlevag", + "bievat", + "bindal", + "birkenes", + "bjarkoy", + "bjerkreim", + "bjugn", + "blogspot", + "bodo", + "bokn", + "bomlo", + "bremanger", + "bronnoy", + "bronnoysund", + "brumunddal", + "bryne", + "bu", + "budejju", + "buskerud", + "bygland", + "bykle", + "cahcesuolo", + "co", + "davvenjarga", + "davvesiida", + "deatnu", + "dep", + "dielddanuorri", + "divtasvuodna", + "divttasvuotna", + "donna", + "dovre", + "drammen", + "drangedal", + "drobak", + "dyroy", + "egersund", + "eid", + "eidfjord", + "eidsberg", + "eidskog", + "eidsvoll", + "eigersund", + "elverum", + "enebakk", + "engerdal", + "etne", + "etnedal", + "evenassi", + "evenes", + "evje-og-hornnes", + "farsund", + "fauske", + "fedje", + "fet", + "fetsund", + "fhs", + "finnoy", + "fitjar", + "fjaler", + "fjell", + "fla", + "flakstad", + "flatanger", + "flekkefjord", + "flesberg", + "flora", + "floro", + "fm", + "folkebibl", + "folldal", + "forde", + "forsand", + "fosnes", + "frana", + "fredrikstad", + "frei", + "frogn", + "froland", + "frosta", + "froya", + "fuoisku", + "fuossko", + "fusa", + "fylkesbibl", + "fyresdal", + "gaivuotna", + "galsa", + "gamvik", + "gangaviika", + "gaular", + "gausdal", + "giehtavuoatna", + "gildeskal", + "giske", + "gjemnes", + "gjerdrum", + "gjerstad", + "gjesdal", + "gjovik", + "gloppen", + "gol", + "gran", + "grane", + "granvin", + "gratangen", + "grimstad", + "grong", + "grue", + "gulen", + "guovdageaidnu", + "ha", + "habmer", + "hadsel", + "hagebostad", + "halden", + "halsa", + "hamar", + "hamaroy", + "hammarfeasta", + "hammerfest", + "hapmir", + "haram", + "hareid", + "harstad", + "hasvik", + "hattfjelldal", + "haugesund", + "hedmark", + "hemne", + "hemnes", + "hemsedal", + "herad", + "hitra", + "hjartdal", + "hjelmeland", + "hl", + "hm", + "hobol", + "hof", + "hokksund", + "hol", + "hole", + "holmestrand", + "holtalen", + "honefoss", + "hordaland", + "hornindal", + "horten", + "hoyanger", + "hoylandet", + "hurdal", + "hurum", + "hvaler", + "hyllestad", + "ibestad", + "idrett", + "inderoy", + "iveland", + "ivgu", + "jan-mayen", + "jessheim", + "jevnaker", + "jolster", + "jondal", + "jorpeland", + "kafjord", + "karasjohka", + "karasjok", + "karlsoy", + "karmoy", + "kautokeino", + "kirkenes", + "klabu", + "klepp", + "kommune", + "kongsberg", + "kongsvinger", + "kopervik", + "kraanghke", + "kragero", + "kristiansand", + "kristiansund", + "krodsherad", + "krokstadelva", + "kvafjord", + "kvalsund", + "kvam", + "kvanangen", + "kvinesdal", + "kvinnherad", + "kviteseid", + "kvitsoy", + "laakesvuemie", + "lahppi", + "langevag", + "lardal", + "larvik", + "lavagis", + "lavangen", + "leangaviika", + "lebesby", + "leikanger", + "leirfjord", + "leirvik", + "leka", + "leksvik", + "lenvik", + "lerdal", + "lesja", + "levanger", + "lier", + "lierne", + "lillehammer", + "lillesand", + "lindas", + "lindesnes", + "loabat", + "lodingen", + "lom", + "loppa", + "lorenskog", + "loten", + "lund", + "lunner", + "luroy", + "luster", + "lyngdal", + "lyngen", + "malatvuopmi", + "malselv", + "malvik", + "mandal", + "marker", + "marnardal", + "masfjorden", + "masoy", + "matta-varjjat", + "meland", + "meldal", + "melhus", + "meloy", + "meraker", + "midsund", + "midtre-gauldal", + "mil", + "mjondalen", + "mo-i-rana", + "moareke", + "modalen", + "modum", + "molde", + "more-og-romsdal", + "mosjoen", + "moskenes", + "moss", + "mosvik", + "mr", + "muosat", + "museum", + "naamesjevuemie", + "namdalseid", + "namsos", + "namsskogan", + "nannestad", + "naroy", + "narviika", + "narvik", + "naustdal", + "navuotna", + "nedre-eiker", + "nesna", + "nesodden", + "nesoddtangen", + "nesseby", + "nesset", + "nissedal", + "nittedal", + "nl", + "nord-aurdal", + "nord-fron", + "nord-odal", + "norddal", + "nordkapp", + "nordland", + "nordre-land", + "nordreisa", + "nore-og-uvdal", + "notodden", + "notteroy", + "nt", + "odda", + "of", + "oksnes", + "ol", + "omasvuotna", + "oppdal", + "oppegard", + "orkanger", + "orkdal", + "orland", + "orskog", + "orsta", + "osen", + "oslo", + "osoyro", + "osteroy", + "ostfold", + "ostre-toten", + "overhalla", + "ovre-eiker", + "oyer", + "oygarden", + "oystre-slidre", + "porsanger", + "porsangu", + "porsgrunn", + "priv", + "rade", + "radoy", + "rahkkeravju", + "raholt", + "raisa", + "rakkestad", + "ralingen", + "rana", + "randaberg", + "rauma", + "rendalen", + "rennebu", + "rennesoy", + "rindal", + "ringebu", + "ringerike", + "ringsaker", + "risor", + "rissa", + "rl", + "roan", + "rodoy", + "rollag", + "romsa", + "romskog", + "roros", + "rost", + "royken", + "royrvik", + "ruovat", + "rygge", + "salangen", + "salat", + "saltdal", + "samnanger", + "sandefjord", + "sandnes", + "sandnessjoen", + "sandoy", + "sarpsborg", + "sauda", + "sauherad", + "sel", + "selbu", + "selje", + "seljord", + "sf", + "siellak", + "sigdal", + "siljan", + "sirdal", + "skanit", + "skanland", + "skaun", + "skedsmo", + "skedsmokorset", + "ski", + "skien", + "skierva", + "skiptvet", + "skjak", + "skjervoy", + "skodje", + "slattum", + "smola", + "snaase", + "snasa", + "snillfjord", + "snoasa", + "sogndal", + "sogne", + "sokndal", + "sola", + "solund", + "somna", + "sondre-land", + "songdalen", + "sor-aurdal", + "sor-fron", + "sor-odal", + "sor-varanger", + "sorfold", + "sorreisa", + "sortland", + "sorum", + "spjelkavik", + "spydeberg", + "st", + "stange", + "stat", + "stathelle", + "stavanger", + "stavern", + "steigen", + "steinkjer", + "stjordal", + "stjordalshalsen", + "stokke", + "stor-elvdal", + "stord", + "stordal", + "storfjord", + "strand", + "stranda", + "stryn", + "sula", + "suldal", + "sund", + "sunndal", + "surnadal", + "svalbard", + "sveio", + "svelvik", + "sykkylven", + "tana", + "tananger", + "telemark", + "time", + "tingvoll", + "tinn", + "tjeldsund", + "tjome", + "tm", + "tokke", + "tolga", + "tonsberg", + "torsken", + "tr", + "trana", + "tranby", + "tranoy", + "troandin", + "trogstad", + "tromsa", + "tromso", + "trondheim", + "trysil", + "tvedestrand", + "tydal", + "tynset", + "tysfjord", + "tysnes", + "tysvar", + "ullensaker", + "ullensvang", + "ulvik", + "unjarga", + "utsira", + "va", + "vaapste", + "vadso", + "vaga", + "vagan", + "vagsoy", + "vaksdal", + "valle", + "vang", + "vanylven", + "vardo", + "varggat", + "varoy", + "vefsn", + "vega", + "vegarshei", + "vennesla", + "verdal", + "verran", + "vestby", + "vestfold", + "vestnes", + "vestre-slidre", + "vestre-toten", + "vestvagoy", + "vevelstad", + "vf", + "vgs", + "vik", + "vikna", + "vindafjord", + "voagat", + "volda", + "voss", + "vossevangen", + "xn--andy-ira", + "xn--asky-ira", + "xn--aurskog-hland-jnb", + "xn--avery-yua", + "xn--bdddj-mrabd", + "xn--bearalvhki-y4a", + "xn--berlevg-jxa", + "xn--bhcavuotna-s4a", + "xn--bhccavuotna-k7a", + "xn--bidr-5nac", + "xn--bievt-0qa", + "xn--bjarky-fya", + "xn--bjddar-pta", + "xn--blt-elab", + "xn--bmlo-gra", + "xn--bod-2na", + "xn--brnny-wuac", + "xn--brnnysund-m8ac", + "xn--brum-voa", + "xn--btsfjord-9za", + "xn--davvenjrga-y4a", + "xn--dnna-gra", + "xn--drbak-wua", + "xn--dyry-ira", + "xn--eveni-0qa01ga", + "xn--finny-yua", + "xn--fjord-lra", + "xn--fl-zia", + "xn--flor-jra", + "xn--frde-gra", + "xn--frna-woa", + "xn--frya-hra", + "xn--ggaviika-8ya47h", + "xn--gildeskl-g0a", + "xn--givuotna-8ya", + "xn--gjvik-wua", + "xn--gls-elac", + "xn--h-2fa", + "xn--hbmer-xqa", + "xn--hcesuolo-7ya35b", + "xn--hgebostad-g3a", + "xn--hmmrfeasta-s4ac", + "xn--hnefoss-q1a", + "xn--hobl-ira", + "xn--holtlen-hxa", + "xn--hpmir-xqa", + "xn--hyanger-q1a", + "xn--hylandet-54a", + "xn--indery-fya", + "xn--jlster-bya", + "xn--jrpeland-54a", + "xn--karmy-yua", + "xn--kfjord-iua", + "xn--klbu-woa", + "xn--koluokta-7ya57h", + "xn--krager-gya", + "xn--kranghke-b0a", + "xn--krdsherad-m8a", + "xn--krehamn-dxa", + "xn--krjohka-hwab49j", + "xn--ksnes-uua", + "xn--kvfjord-nxa", + "xn--kvitsy-fya", + "xn--kvnangen-k0a", + "xn--l-1fa", + "xn--laheadju-7ya", + "xn--langevg-jxa", + "xn--ldingen-q1a", + "xn--leagaviika-52b", + "xn--lesund-hua", + "xn--lgrd-poac", + "xn--lhppi-xqa", + "xn--linds-pra", + "xn--loabt-0qa", + "xn--lrdal-sra", + "xn--lrenskog-54a", + "xn--lt-liac", + "xn--lten-gra", + "xn--lury-ira", + "xn--mely-ira", + "xn--merker-kua", + "xn--mjndalen-64a", + "xn--mlatvuopmi-s4a", + "xn--mli-tla", + "xn--mlselv-iua", + "xn--moreke-jua", + "xn--mosjen-eya", + "xn--mot-tla", + "xn--mre-og-romsdal-qqb", + "xn--msy-ula0h", + "xn--mtta-vrjjat-k7af", + "xn--muost-0qa", + "xn--nmesjevuemie-tcba", + "xn--nry-yla5g", + "xn--nttery-byae", + "xn--nvuotna-hwa", + "xn--oppegrd-ixa", + "xn--ostery-fya", + "xn--osyro-wua", + "xn--porsgu-sta26f", + "xn--rady-ira", + "xn--rdal-poa", + "xn--rde-ula", + "xn--rdy-0nab", + "xn--rennesy-v1a", + "xn--rhkkervju-01af", + "xn--rholt-mra", + "xn--risa-5na", + "xn--risr-ira", + "xn--rland-uua", + "xn--rlingen-mxa", + "xn--rmskog-bya", + "xn--rros-gra", + "xn--rskog-uua", + "xn--rst-0na", + "xn--rsta-fra", + "xn--ryken-vua", + "xn--ryrvik-bya", + "xn--s-1fa", + "xn--sandnessjen-ogb", + "xn--sandy-yua", + "xn--seral-lra", + "xn--sgne-gra", + "xn--skierv-uta", + "xn--skjervy-v1a", + "xn--skjk-soa", + "xn--sknit-yqa", + "xn--sknland-fxa", + "xn--slat-5na", + "xn--slt-elab", + "xn--smla-hra", + "xn--smna-gra", + "xn--snase-nra", + "xn--sndre-land-0cb", + "xn--snes-poa", + "xn--snsa-roa", + "xn--sr-aurdal-l8a", + "xn--sr-fron-q1a", + "xn--sr-odal-q1a", + "xn--sr-varanger-ggb", + "xn--srfold-bya", + "xn--srreisa-q1a", + "xn--srum-gra", + "xn--stfold-9xa", + "xn--stjrdal-s1a", + "xn--stjrdalshalsen-sqb", + "xn--stre-toten-zcb", + "xn--tjme-hra", + "xn--tnsberg-q1a", + "xn--trany-yua", + "xn--trgstad-r1a", + "xn--trna-woa", + "xn--troms-zua", + "xn--tysvr-vra", + "xn--unjrga-rta", + "xn--vads-jra", + "xn--vard-jra", + "xn--vegrshei-c0a", + "xn--vestvgy-ixa6o", + "xn--vg-yiab", + "xn--vgan-qoa", + "xn--vgsy-qoa0j", + "xn--vre-eiker-k8a", + "xn--vrggt-xqad", + "xn--vry-yla5g", + "xn--yer-zna", + "xn--ygarden-p1a", + "xn--ystre-slidre-ujb", + "gs", + "gs", + "nes", + "gs", + "nes", + "gs", + "os", + "valer", + "xn--vler-qoa", + "gs", + "gs", + "os", + "gs", + "heroy", + "sande", + "gs", + "gs", + "bo", + "heroy", + "xn--b-5ga", + "xn--hery-ira", + "gs", + "gs", + "gs", + "gs", + "valer", + "gs", + "gs", + "gs", + "gs", + "bo", + "xn--b-5ga", + "gs", + "gs", + "gs", + "sande", + "gs", + "sande", + "xn--hery-ira", + "xn--vler-qoa", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "merseine", + "mine", + "nom", + "shacknet", + "ac", + "co", + "cri", + "geek", + "gen", + "govt", + "health", + "iwi", + "kiwi", + "maori", + "mil", + "net", + "nym", + "org", + "parliament", + "school", + "xn--mori-qsa", + "blogspot", + "co", + "com", + "edu", + "gov", + "med", + "museum", + "net", + "org", + "pro", + "homelink", + "barsy", + "accesscam", + "ae", + "amune", + "blogdns", + "blogsite", + "bmoattachments", + "boldlygoingnowhere", + "cable-modem", + "camdvr", + "cdn77", + "cdn77-secure", + "certmgr", + "cloudns", + "collegefan", + "couchpotatofries", + "ddnss", + "diskstation", + "dnsalias", + "dnsdojo", + "doesntexist", + "dontexist", + "doomdns", + "dsmynas", + "duckdns", + "dvrdns", + "dynalias", + "dyndns", + "endofinternet", + "endoftheinternet", + "eu", + "familyds", + "fedorainfracloud", + "fedorapeople", + "fedoraproject", + "freeddns", + "from-me", + "game-host", + "gotdns", + "hepforge", + "hk", + "hobby-site", + "homedns", + "homeftp", + "homelinux", + "homeunix", + "hopto", + "is-a-bruinsfan", + "is-a-candidate", + "is-a-celticsfan", + "is-a-chef", + "is-a-geek", + "is-a-knight", + "is-a-linux-user", + "is-a-patsfan", + "is-a-soxfan", + "is-found", + "is-lost", + "is-saved", + "is-very-bad", + "is-very-evil", + "is-very-good", + "is-very-nice", + "is-very-sweet", + "isa-geek", + "js", + "kicks-ass", + "misconfused", + "mlbfan", + "my-firewall", + "myfirewall", + "myftp", + "mysecuritycamera", + "mywire", + "nflfan", + "no-ip", + "pimienta", + "podzone", + "poivron", + "potager", + "read-books", + "readmyblog", + "selfip", + "sellsyourhome", + "servebbs", + "serveftp", + "servegame", + "spdns", + "stuff-4-sale", + "sweetpepper", + "tunk", + "tuxfamily", + "twmail", + "ufcfan", + "us", + "webhop", + "webredirect", + "wmflabs", + "za", + "zapto", + "tele", + "c", + "rsc", + "origin", + "ssl", + "go", + "home", + "al", + "asso", + "at", + "au", + "be", + "bg", + "ca", + "cd", + "ch", + "cn", + "cy", + "cz", + "de", + "dk", + "edu", + "ee", + "es", + "fi", + "fr", + "gr", + "hr", + "hu", + "ie", + "il", + "in", + "int", + "is", + "it", + "jp", + "kr", + "lt", + "lu", + "lv", + "mc", + "me", + "mk", + "mt", + "my", + "net", + "ng", + "nl", + "no", + "nz", + "paris", + "pl", + "pt", + "q-a", + "ro", + "ru", + "se", + "si", + "sk", + "tr", + "uk", + "us", + "cloud", + "os", + "stg", + "app", + "os", + "app", + "nerdpol", + "abo", + "ac", + "com", + "edu", + "gob", + "ing", + "med", + "net", + "nom", + "org", + "sld", + "ybo", + "blogspot", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "nym", + "org", + "com", + "edu", + "org", + "com", + "edu", + "gov", + "i", + "mil", + "net", + "ngo", + "org", + "1337", + "biz", + "com", + "edu", + "fam", + "gob", + "gok", + "gon", + "gop", + "gos", + "gov", + "info", + "net", + "org", + "web", + "agro", + "aid", + "art", + "atm", + "augustow", + "auto", + "babia-gora", + "bedzin", + "beep", + "beskidy", + "bialowieza", + "bialystok", + "bielawa", + "bieszczady", + "biz", + "boleslawiec", + "bydgoszcz", + "bytom", + "cieszyn", + "co", + "com", + "czeladz", + "czest", + "dlugoleka", + "edu", + "elblag", + "elk", + "gda", + "gdansk", + "gdynia", + "gliwice", + "glogow", + "gmina", + "gniezno", + "gorlice", + "gov", + "grajewo", + "gsm", + "ilawa", + "info", + "jaworzno", + "jelenia-gora", + "jgora", + "kalisz", + "karpacz", + "kartuzy", + "kaszuby", + "katowice", + "kazimierz-dolny", + "kepno", + "ketrzyn", + "klodzko", + "kobierzyce", + "kolobrzeg", + "konin", + "konskowola", + "krakow", + "kutno", + "lapy", + "lebork", + "legnica", + "lezajsk", + "limanowa", + "lomza", + "lowicz", + "lubin", + "lukow", + "mail", + "malbork", + "malopolska", + "mazowsze", + "mazury", + "med", + "media", + "miasta", + "mielec", + "mielno", + "mil", + "mragowo", + "naklo", + "net", + "nieruchomosci", + "nom", + "nowaruda", + "nysa", + "olawa", + "olecko", + "olkusz", + "olsztyn", + "opoczno", + "opole", + "org", + "ostroda", + "ostroleka", + "ostrowiec", + "ostrowwlkp", + "pc", + "pila", + "pisz", + "podhale", + "podlasie", + "polkowice", + "pomorskie", + "pomorze", + "powiat", + "poznan", + "priv", + "prochowice", + "pruszkow", + "przeworsk", + "pulawy", + "radom", + "rawa-maz", + "realestate", + "rel", + "rybnik", + "rzeszow", + "sanok", + "sejny", + "sex", + "shop", + "sklep", + "skoczow", + "slask", + "slupsk", + "sopot", + "sos", + "sosnowiec", + "stalowa-wola", + "starachowice", + "stargard", + "suwalki", + "swidnica", + "swiebodzin", + "swinoujscie", + "szczecin", + "szczytno", + "szkola", + "targi", + "tarnobrzeg", + "tgory", + "tm", + "tourism", + "travel", + "turek", + "turystyka", + "tychy", + "ustka", + "walbrzych", + "warmia", + "warszawa", + "waw", + "wegrow", + "wielun", + "wlocl", + "wloclawek", + "wodzislaw", + "wolomin", + "wroc", + "wroclaw", + "zachpomor", + "zagan", + "zakopane", + "zarow", + "zgora", + "zgorzelec", + "ap", + "griw", + "ic", + "is", + "kmpsp", + "konsulat", + "kppsp", + "kwp", + "kwpsp", + "mup", + "mw", + "oirm", + "oum", + "pa", + "pinb", + "piw", + "po", + "psp", + "psse", + "pup", + "rzgw", + "sa", + "sdn", + "sko", + "so", + "sr", + "starostwo", + "ug", + "ugim", + "um", + "umig", + "upow", + "uppo", + "us", + "uw", + "uzs", + "wif", + "wiih", + "winb", + "wios", + "witd", + "wiw", + "wsa", + "wskr", + "wuoz", + "wzmiuw", + "zp", + "co", + "edu", + "gov", + "net", + "org", + "ac", + "biz", + "com", + "edu", + "est", + "gov", + "info", + "isla", + "name", + "net", + "org", + "pro", + "prof", + "aaa", + "aca", + "acct", + "avocat", + "bar", + "cloudns", + "cpa", + "eng", + "jur", + "law", + "med", + "recht", + "com", + "edu", + "gov", + "net", + "org", + "plo", + "sec", + "blogspot", + "com", + "edu", + "gov", + "int", + "net", + "nome", + "nym", + "org", + "publ", + "belau", + "cloudns", + "co", + "ed", + "go", + "ne", + "nom", + "or", + "com", + "coop", + "edu", + "gov", + "mil", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "nom", + "org", + "sch", + "asso", + "blogspot", + "com", + "nom", + "ybo", + "clan", + "arts", + "blogspot", + "com", + "firm", + "info", + "nom", + "nt", + "org", + "rec", + "shop", + "store", + "tm", + "www", + "lima-city", + "myddns", + "webspace", + "ac", + "blogspot", + "co", + "edu", + "gov", + "in", + "nom", + "org", + "ac", + "adygeya", + "bashkiria", + "bir", + "blogspot", + "cbg", + "cldmail", + "com", + "dagestan", + "edu", + "gov", + "grozny", + "int", + "kalmykia", + "kustanai", + "marine", + "mil", + "mordovia", + "msk", + "mytis", + "nalchik", + "net", + "nov", + "org", + "pp", + "pyatigorsk", + "spb", + "test", + "vladikavkaz", + "vladimir", + "hb", + "ac", + "co", + "com", + "edu", + "gouv", + "gov", + "int", + "mil", + "net", + "com", + "edu", + "gov", + "med", + "net", + "org", + "pub", + "sch", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "net", + "org", + "ybo", + "com", + "edu", + "gov", + "info", + "med", + "net", + "org", + "tv", + "a", + "ac", + "b", + "bd", + "blogspot", + "brand", + "c", + "com", + "d", + "e", + "f", + "fh", + "fhsk", + "fhv", + "g", + "h", + "i", + "k", + "komforb", + "kommunalforbund", + "komvux", + "l", + "lanbib", + "m", + "n", + "naturbruksgymn", + "o", + "org", + "p", + "parti", + "pp", + "press", + "r", + "s", + "t", + "tm", + "u", + "w", + "x", + "y", + "z", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "per", + "com", + "gov", + "hashbang", + "mil", + "net", + "now", + "org", + "platform", + "wedeploy", + "blogspot", + "nom", + "byen", + "cyon", + "platformsh", + "blogspot", + "nym", + "com", + "edu", + "gov", + "net", + "org", + "art", + "blogspot", + "com", + "edu", + "gouv", + "org", + "perso", + "univ", + "com", + "net", + "org", + "stackspace", + "uber", + "xs4all", + "co", + "com", + "consulado", + "edu", + "embaixada", + "gov", + "mil", + "net", + "org", + "principe", + "saotome", + "store", + "abkhazia", + "adygeya", + "aktyubinsk", + "arkhangelsk", + "armenia", + "ashgabad", + "azerbaijan", + "balashov", + "bashkiria", + "bryansk", + "bukhara", + "chimkent", + "dagestan", + "east-kazakhstan", + "exnet", + "georgia", + "grozny", + "ivanovo", + "jambyl", + "kalmykia", + "kaluga", + "karacol", + "karaganda", + "karelia", + "khakassia", + "krasnodar", + "kurgan", + "kustanai", + "lenug", + "mangyshlak", + "mordovia", + "msk", + "murmansk", + "nalchik", + "navoi", + "north-kazakhstan", + "nov", + "nym", + "obninsk", + "penza", + "pokrovsk", + "sochi", + "spb", + "tashkent", + "termez", + "togliatti", + "troitsk", + "tselinograd", + "tula", + "tuva", + "vladikavkaz", + "vladimir", + "vologda", + "barsy", + "com", + "edu", + "gob", + "org", + "red", + "gov", + "nym", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "knightpoint", + "ac", + "co", + "org", + "blogspot", + "ac", + "co", + "go", + "in", + "mi", + "net", + "or", + "ac", + "biz", + "co", + "com", + "edu", + "go", + "gov", + "int", + "mil", + "name", + "net", + "nic", + "org", + "test", + "web", + "gov", + "co", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "agrinet", + "com", + "defense", + "edunet", + "ens", + "fin", + "gov", + "ind", + "info", + "intl", + "mincom", + "nat", + "net", + "org", + "perso", + "rnrt", + "rns", + "rnu", + "tourism", + "turen", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "vpnplus", + "av", + "bbs", + "bel", + "biz", + "com", + "dr", + "edu", + "gen", + "gov", + "info", + "k12", + "kep", + "mil", + "name", + "nc", + "net", + "org", + "pol", + "tel", + "tv", + "web", + "blogspot", + "gov", + "ybo", + "aero", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "jobs", + "mobi", + "museum", + "name", + "net", + "org", + "pro", + "travel", + "better-than", + "dyndns", + "on-the-web", + "worse-than", + "blogspot", + "club", + "com", + "ebiz", + "edu", + "game", + "gov", + "idv", + "mil", + "net", + "nym", + "org", + "url", + "xn--czrw28b", + "xn--uc0atv", + "xn--zf0ao64a", + "mymailer", + "ac", + "co", + "go", + "hotel", + "info", + "me", + "mil", + "mobi", + "ne", + "or", + "sc", + "tv", + "biz", + "cc", + "cherkassy", + "cherkasy", + "chernigov", + "chernihiv", + "chernivtsi", + "chernovtsy", + "ck", + "cn", + "co", + "com", + "cr", + "crimea", + "cv", + "dn", + "dnepropetrovsk", + "dnipropetrovsk", + "dominic", + "donetsk", + "dp", + "edu", + "gov", + "if", + "in", + "inf", + "ivano-frankivsk", + "kh", + "kharkiv", + "kharkov", + "kherson", + "khmelnitskiy", + "khmelnytskyi", + "kiev", + "kirovograd", + "km", + "kr", + "krym", + "ks", + "kv", + "kyiv", + "lg", + "lt", + "ltd", + "lugansk", + "lutsk", + "lv", + "lviv", + "mk", + "mykolaiv", + "net", + "nikolaev", + "od", + "odesa", + "odessa", + "org", + "pl", + "poltava", + "pp", + "rivne", + "rovno", + "rv", + "sb", + "sebastopol", + "sevastopol", + "sm", + "sumy", + "te", + "ternopil", + "uz", + "uzhgorod", + "vinnica", + "vinnytsia", + "vn", + "volyn", + "yalta", + "zaporizhzhe", + "zaporizhzhia", + "zhitomir", + "zhytomyr", + "zp", + "zt", + "ac", + "blogspot", + "co", + "com", + "go", + "ne", + "nom", + "or", + "org", + "sc", + "ac", + "co", + "gov", + "ltd", + "me", + "net", + "nhs", + "org", + "plc", + "police", + "sch", + "blogspot", + "nh-serv", + "no-ip", + "wellbeingzone", + "homeoffice", + "service", + "ak", + "al", + "ar", + "as", + "az", + "ca", + "cloudns", + "co", + "ct", + "dc", + "de", + "dni", + "drud", + "fed", + "fl", + "ga", + "golffan", + "gu", + "hi", + "ia", + "id", + "il", + "in", + "is-by", + "isa", + "kids", + "ks", + "ky", + "la", + "land-4-sale", + "ma", + "md", + "me", + "mi", + "mn", + "mo", + "ms", + "mt", + "nc", + "nd", + "ne", + "nh", + "nj", + "nm", + "noip", + "nsn", + "nv", + "ny", + "oh", + "ok", + "or", + "pa", + "pointto", + "pr", + "ri", + "sc", + "sd", + "stuff-4-sale", + "tn", + "tx", + "ut", + "va", + "vi", + "vt", + "wa", + "wi", + "wv", + "wy", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "chtr", + "paroch", + "pvt", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "ann-arbor", + "cc", + "cog", + "dst", + "eaton", + "gen", + "k12", + "lib", + "mus", + "tec", + "washtenaw", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "cc", + "k12", + "lib", + "com", + "edu", + "gub", + "mil", + "net", + "nom", + "org", + "blogspot", + "co", + "com", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "arts", + "co", + "com", + "e12", + "edu", + "firm", + "gob", + "gov", + "info", + "int", + "mil", + "net", + "org", + "rec", + "store", + "tec", + "web", + "nom", + "co", + "com", + "k12", + "net", + "org", + "ac", + "biz", + "blogspot", + "com", + "edu", + "gov", + "health", + "info", + "int", + "name", + "net", + "org", + "pro", + "com", + "edu", + "net", + "org", + "advisor", + "com", + "dyndns", + "edu", + "gov", + "mypets", + "net", + "org", + "xn--80au", + "xn--90azh", + "xn--c1avg", + "xn--d1at", + "xn--o1ac", + "xn--o1ach", + "xn--12c1fe0br", + "xn--12cfi8ixb8l", + "xn--12co0c3b4eva", + "xn--h3cuzk1di", + "xn--m3ch0j3a", + "xn--o3cyx2a", + "blogsite", + "fhapp", + "ac", + "agric", + "alt", + "co", + "edu", + "gov", + "grondar", + "law", + "mil", + "net", + "ngo", + "nis", + "nom", + "org", + "school", + "tm", + "web", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "mil", + "net", + "org", + "sch", + "lima", + "triton", + "ac", + "co", + "gov", + "mil", + "org", +} diff --git a/vendor/golang.org/x/net/route/address.go b/vendor/golang.org/x/net/route/address.go new file mode 100644 index 0000000..e6bfa39 --- /dev/null +++ b/vendor/golang.org/x/net/route/address.go @@ -0,0 +1,425 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "runtime" + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// A LinkAddr represents a link-layer address. +type LinkAddr struct { + Index int // interface index when attached + Name string // interface name when attached + Addr []byte // link-layer address when attached +} + +// Family implements the Family method of Addr interface. +func (a *LinkAddr) Family() int { return sysAF_LINK } + +func (a *LinkAddr) lenAndSpace() (int, int) { + l := 8 + len(a.Name) + len(a.Addr) + return l, roundup(l) +} + +func (a *LinkAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + nlen, alen := len(a.Name), len(a.Addr) + if nlen > 255 || alen > 255 { + return 0, errInvalidAddr + } + b[0] = byte(l) + b[1] = sysAF_LINK + if a.Index > 0 { + nativeEndian.PutUint16(b[2:4], uint16(a.Index)) + } + data := b[8:] + if nlen > 0 { + b[5] = byte(nlen) + copy(data[:nlen], a.Addr) + data = data[nlen:] + } + if alen > 0 { + b[6] = byte(alen) + copy(data[:alen], a.Name) + data = data[alen:] + } + return ll, nil +} + +func parseLinkAddr(b []byte) (Addr, error) { + if len(b) < 8 { + return nil, errInvalidAddr + } + _, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:]) + if err != nil { + return nil, err + } + a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4])) + return a, nil +} + +// parseKernelLinkAddr parses b as a link-layer address in +// conventional BSD kernel form. +func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) { + // The encoding looks like the following: + // +----------------------------+ + // | Type (1 octet) | + // +----------------------------+ + // | Name length (1 octet) | + // +----------------------------+ + // | Address length (1 octet) | + // +----------------------------+ + // | Selector length (1 octet) | + // +----------------------------+ + // | Data (variable) | + // +----------------------------+ + // + // On some platforms, all-bit-one of length field means "don't + // care". + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + if nlen == 0xff { + nlen = 0 + } + if alen == 0xff { + alen = 0 + } + if slen == 0xff { + slen = 0 + } + l := 4 + nlen + alen + slen + if len(b) < l { + return 0, nil, errInvalidAddr + } + data := b[4:] + var name string + var addr []byte + if nlen > 0 { + name = string(data[:nlen]) + data = data[nlen:] + } + if alen > 0 { + addr = data[:alen] + data = data[alen:] + } + return l, &LinkAddr{Name: name, Addr: addr}, nil +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +func (a *Inet4Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet, roundup(sizeofSockaddrInet) +} + +func (a *Inet4Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET + copy(b[4:8], a.IP[:]) + return ll, nil +} + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +func (a *Inet6Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6) +} + +func (a *Inet6Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET6 + copy(b[8:24], a.IP[:]) + if a.ZoneID > 0 { + nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID)) + } + return ll, nil +} + +// parseInetAddr parses b as an internet address for IPv4 or IPv6. +func parseInetAddr(af int, b []byte) (Addr, error) { + switch af { + case sysAF_INET: + if len(b) < sizeofSockaddrInet { + return nil, errInvalidAddr + } + a := &Inet4Addr{} + copy(a.IP[:], b[4:8]) + return a, nil + case sysAF_INET6: + if len(b) < sizeofSockaddrInet6 { + return nil, errInvalidAddr + } + a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))} + copy(a.IP[:], b[8:24]) + if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) { + // KAME based IPv6 protocol stack usually + // embeds the interface index in the + // interface-local or link-local address as + // the kernel-internal form. + id := int(bigEndian.Uint16(a.IP[2:4])) + if id != 0 { + a.ZoneID = id + a.IP[2], a.IP[3] = 0, 0 + } + } + return a, nil + default: + return nil, errInvalidAddr + } +} + +// parseKernelInetAddr parses b as an internet address in conventional +// BSD kernel form. +func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { + // The encoding looks similar to the NLRI encoding. + // +----------------------------+ + // | Length (1 octet) | + // +----------------------------+ + // | Address prefix (variable) | + // +----------------------------+ + // + // The differences between the kernel form and the NLRI + // encoding are: + // + // - The length field of the kernel form indicates the prefix + // length in bytes, not in bits + // + // - In the kernel form, zero value of the length field + // doesn't mean 0.0.0.0/0 or ::/0 + // + // - The kernel form appends leading bytes to the prefix field + // to make the tuple to be conformed with + // the routing message boundary + l := int(b[0]) + if runtime.GOOS == "darwin" { + // On Darwn, an address in the kernel form is also + // used as a message filler. + if l == 0 || len(b) > roundup(l) { + l = roundup(l) + } + } else { + l = roundup(l) + } + if len(b) < l { + return 0, nil, errInvalidAddr + } + // Don't reorder case expressions. + // The case expressions for IPv6 must come first. + const ( + off4 = 4 // offset of in_addr + off6 = 8 // offset of in6_addr + ) + switch { + case b[0] == sizeofSockaddrInet6: + a := &Inet6Addr{} + copy(a.IP[:], b[off6:off6+16]) + return int(b[0]), a, nil + case af == sysAF_INET6: + a := &Inet6Addr{} + if l-1 < off6 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off6:l]) + } + return int(b[0]), a, nil + case b[0] == sizeofSockaddrInet: + a := &Inet4Addr{} + copy(a.IP[:], b[off4:off4+4]) + return int(b[0]), a, nil + default: // an old fashion, AF_UNSPEC or unknown means AF_INET + a := &Inet4Addr{} + if l-1 < off4 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off4:l]) + } + return int(b[0]), a, nil + } +} + +// A DefaultAddr represents an address of various operating +// system-specific features. +type DefaultAddr struct { + af int + Raw []byte // raw format of address +} + +// Family implements the Family method of Addr interface. +func (a *DefaultAddr) Family() int { return a.af } + +func (a *DefaultAddr) lenAndSpace() (int, int) { + l := len(a.Raw) + return l, roundup(l) +} + +func (a *DefaultAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + if l > 255 { + return 0, errInvalidAddr + } + b[1] = byte(l) + copy(b[:l], a.Raw) + return ll, nil +} + +func parseDefaultAddr(b []byte) (Addr, error) { + if len(b) < 2 || len(b) < int(b[0]) { + return nil, errInvalidAddr + } + a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]} + return a, nil +} + +func addrsSpace(as []Addr) int { + var l int + for _, a := range as { + switch a := a.(type) { + case *LinkAddr: + _, ll := a.lenAndSpace() + l += ll + case *Inet4Addr: + _, ll := a.lenAndSpace() + l += ll + case *Inet6Addr: + _, ll := a.lenAndSpace() + l += ll + case *DefaultAddr: + _, ll := a.lenAndSpace() + l += ll + } + } + return l +} + +// marshalAddrs marshals as and returns a bitmap indicating which +// address is stored in b. +func marshalAddrs(b []byte, as []Addr) (uint, error) { + var attrs uint + for i, a := range as { + switch a := a.(type) { + case *LinkAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet4Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet6Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *DefaultAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + } + } + return attrs, nil +} + +func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) { + var as [sysRTAX_MAX]Addr + af := int(sysAF_UNSPEC) + for i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ { + if attrs&(1<> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} diff --git a/vendor/golang.org/x/net/route/defs_darwin.go b/vendor/golang.org/x/net/route/defs_darwin.go new file mode 100644 index 0000000..e771644 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_darwin.go @@ -0,0 +1,114 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STAT = C.NET_RT_STAT + sysNET_RT_TRASH = C.NET_RT_TRASH + sysNET_RT_IFLIST2 = C.NET_RT_IFLIST2 + sysNET_RT_DUMP2 = C.NET_RT_DUMP2 + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFINFO2 = C.RTM_IFINFO2 + sysRTM_NEWMADDR2 = C.RTM_NEWMADDR2 + sysRTM_GET2 = C.RTM_GET2 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDarwin15 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDarwin15 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr + sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2 + sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2 + sizeofIfDataDarwin15 = C.sizeof_struct_if_data + sizeofIfData64Darwin15 = C.sizeof_struct_if_data64 + + sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr + sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2 + sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_dragonfly.go b/vendor/golang.org/x/net/route/defs_dragonfly.go new file mode 100644 index 0000000..dd31de2 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_dragonfly.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B + sysCTL_LWKT = C.CTL_LWKT + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_MPLS1 = C.RTA_MPLS1 + sysRTA_MPLS2 = C.RTA_MPLS2 + sysRTA_MPLS3 = C.RTA_MPLS3 + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MPLS1 = C.RTAX_MPLS1 + sysRTAX_MPLS2 = C.RTAX_MPLS2 + sysRTAX_MPLS3 = C.RTAX_MPLS3 + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_freebsd.go b/vendor/golang.org/x/net/route/defs_freebsd.go new file mode 100644 index 0000000..d95594d --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_freebsd.go @@ -0,0 +1,337 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include + +struct if_data_freebsd7 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd9 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd10 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_vhid; + u_char ifi_baudrate_pf; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + uint64_t ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd11 { + uint8_t ifi_type; + uint8_t ifi_physical; + uint8_t ifi_addrlen; + uint8_t ifi_hdrlen; + uint8_t ifi_link_state; + uint8_t ifi_vhid; + uint16_t ifi_datalen; + uint32_t ifi_mtu; + uint32_t ifi_metric; + uint64_t ifi_baudrate; + uint64_t ifi_ipackets; + uint64_t ifi_ierrors; + uint64_t ifi_opackets; + uint64_t ifi_oerrors; + uint64_t ifi_collisions; + uint64_t ifi_ibytes; + uint64_t ifi_obytes; + uint64_t ifi_imcasts; + uint64_t ifi_omcasts; + uint64_t ifi_iqdrops; + uint64_t ifi_oqdrops; + uint64_t ifi_noproto; + uint64_t ifi_hwassist; + union { + time_t tt; + uint64_t ph; + } __ifi_epoch; + union { + struct timeval tv; + struct { + uint64_t ph1; + uint64_t ph2; + } ph; + } __ifi_lastchange; +}; + +struct if_msghdr_freebsd7 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd7 ifm_data; +}; + +struct if_msghdr_freebsd8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd8 ifm_data; +}; + +struct if_msghdr_freebsd9 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd9 ifm_data; +}; + +struct if_msghdr_freebsd10 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd10 ifm_data; +}; + +struct if_msghdr_freebsd11 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd11 ifm_data; +}; +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_IFMALIST = C.NET_RT_IFMALIST + sysNET_RT_IFLISTL = C.NET_RT_IFLISTL +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7 = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8 = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9 = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11 + + sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10Emu = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7Emu = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8Emu = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9Emu = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11 + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_netbsd.go b/vendor/golang.org/x/net/route/defs_netbsd.go new file mode 100644 index 0000000..b0abd54 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_netbsd.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_DDB = C.CTL_DDB + sysCTL_PROC = C.CTL_PROC + sysCTL_VENDOR = C.CTL_VENDOR + sysCTL_EMUL = C.CTL_EMUL + sysCTL_SECURITY = C.CTL_SECURITY + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + sysRTM_SETGATE = C.RTM_SETGATE + sysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_CHGADDR = C.RTM_CHGADDR + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_TAG = C.RTA_TAG + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_TAG = C.RTAX_TAG + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrNetBSD7 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr + sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_openbsd.go b/vendor/golang.org/x/net/route/defs_openbsd.go new file mode 100644 index 0000000..173bb5d --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_openbsd.go @@ -0,0 +1,116 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STATS = C.NET_RT_STATS + sysNET_RT_TABLE = C.NET_RT_TABLE + sysNET_RT_IFNAMES = C.NET_RT_IFNAMES + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_FS = C.CTL_FS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_DDB = C.CTL_DDB + sysCTL_VFS = C.CTL_VFS + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_DESYNC = C.RTM_DESYNC + sysRTM_INVALIDATE = C.RTM_INVALIDATE + sysRTM_BFD = C.RTM_BFD + sysRTM_PROPOSAL = C.RTM_PROPOSAL + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_SRC = C.RTA_SRC + sysRTA_SRCMASK = C.RTA_SRCMASK + sysRTA_LABEL = C.RTA_LABEL + sysRTA_BFD = C.RTA_BFD + sysRTA_DNS = C.RTA_DNS + sysRTA_STATIC = C.RTA_STATIC + sysRTA_SEARCH = C.RTA_SEARCH + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_SRC = C.RTAX_SRC + sysRTAX_SRCMASK = C.RTAX_SRCMASK + sysRTAX_LABEL = C.RTAX_LABEL + sysRTAX_BFD = C.RTAX_BFD + sysRTAX_DNS = C.RTAX_DNS + sysRTAX_STATIC = C.RTAX_STATIC + sysRTAX_SEARCH = C.RTAX_SEARCH + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofRtMsghdr = C.sizeof_struct_rt_msghdr + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/interface.go b/vendor/golang.org/x/net/route/interface.go new file mode 100644 index 0000000..854906d --- /dev/null +++ b/vendor/golang.org/x/net/route/interface.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// An InterfaceMessage represents an interface message. +type InterfaceMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Name string // interface name + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// An InterfaceAddrMessage represents an interface address message. +type InterfaceAddrMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAddrMessage) Sys() []Sys { return nil } + +// An InterfaceMulticastAddrMessage represents an interface multicast +// address message. +type InterfaceMulticastAddrMessage struct { + Version int // message version + Type int // messsage type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil } + +// An InterfaceAnnounceMessage represents an interface announcement +// message. +type InterfaceAnnounceMessage struct { + Version int // message version + Type int // message type + Index int // interface index + Name string // interface name + What int // what type of announcement + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAnnounceMessage) Sys() []Sys { return nil } diff --git a/vendor/golang.org/x/net/route/interface_announce.go b/vendor/golang.org/x/net/route/interface_announce.go new file mode 100644 index 0000000..520d657 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_announce.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd netbsd + +package route + +func (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[4:6])), + What: int(nativeEndian.Uint16(b[22:24])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[6+i] != 0 { + continue + } + m.Name = string(b[6 : 6+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_classic.go b/vendor/golang.org/x/net/route/interface_classic.go new file mode 100644 index 0000000..ac4e7a6 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_classic.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly netbsd + +package route + +import "runtime" + +func (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Addrs: make([]Addr, sysRTAX_MAX), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + extOff: w.extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[w.bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + raw: b[:l], + } + if runtime.GOOS == "netbsd" { + m.Index = int(nativeEndian.Uint16(b[16:18])) + } else { + m.Index = int(nativeEndian.Uint16(b[12:14])) + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_freebsd.go b/vendor/golang.org/x/net/route/interface_freebsd.go new file mode 100644 index 0000000..9f6f50c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_freebsd.go @@ -0,0 +1,78 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) { + var extOff, bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 20 { + return nil, errMessageTooShort + } + extOff = int(nativeEndian.Uint16(b[18:20])) + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + extOff = w.extOff + bodyOff = w.bodyOff + } + if len(b) < extOff || len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + Addrs: make([]Addr, sysRTAX_MAX), + extOff: extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) { + var bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 24 { + return nil, errMessageTooShort + } + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + bodyOff = w.bodyOff + } + if len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_multicast.go b/vendor/golang.org/x/net/route/interface_multicast.go new file mode 100644 index 0000000..1e99a9c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_multicast.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd + +package route + +func (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceMulticastAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_openbsd.go b/vendor/golang.org/x/net/route/interface_openbsd.go new file mode 100644 index 0000000..e4a143c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_openbsd.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 32 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[12:16])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + Addrs: make([]Addr, sysRTAX_MAX), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + a, err := parseLinkAddr(b[ll:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 24 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + bodyOff := int(nativeEndian.Uint16(b[4:6])) + if len(b) < bodyOff { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[12:16])), + Index: int(nativeEndian.Uint16(b[6:8])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} + +func (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 26 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[6:8])), + What: int(nativeEndian.Uint16(b[8:10])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[10+i] != 0 { + continue + } + m.Name = string(b[10 : 10+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/message.go b/vendor/golang.org/x/net/route/message.go new file mode 100644 index 0000000..0fa7e09 --- /dev/null +++ b/vendor/golang.org/x/net/route/message.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// A Message represents a routing message. +type Message interface { + // Sys returns operating system-specific information. + Sys() []Sys +} + +// A Sys reprensents operating system-specific information. +type Sys interface { + // SysType returns a type of operating system-specific + // information. + SysType() SysType +} + +// A SysType represents a type of operating system-specific +// information. +type SysType int + +const ( + SysMetrics SysType = iota + SysStats +) + +// ParseRIB parses b as a routing information base and returns a list +// of routing messages. +func ParseRIB(typ RIBType, b []byte) ([]Message, error) { + if !typ.parseable() { + return nil, errUnsupportedMessage + } + var msgs []Message + nmsgs, nskips := 0, 0 + for len(b) > 4 { + nmsgs++ + l := int(nativeEndian.Uint16(b[:2])) + if l == 0 { + return nil, errInvalidMessage + } + if len(b) < l { + return nil, errMessageTooShort + } + if b[2] != sysRTM_VERSION { + b = b[l:] + continue + } + if w, ok := wireFormats[int(b[3])]; !ok { + nskips++ + } else { + m, err := w.parse(typ, b) + if err != nil { + return nil, err + } + if m == nil { + nskips++ + } else { + msgs = append(msgs, m) + } + } + b = b[l:] + } + // We failed to parse any of the messages - version mismatch? + if nmsgs != len(msgs)+nskips { + return nil, errMessageMismatch + } + return msgs, nil +} diff --git a/vendor/golang.org/x/net/route/message_darwin_test.go b/vendor/golang.org/x/net/route/message_darwin_test.go new file mode 100644 index 0000000..316aa75 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_darwin_test.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "testing" + +func TestFetchAndParseRIBOnDarwin(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/vendor/golang.org/x/net/route/message_freebsd_test.go b/vendor/golang.org/x/net/route/message_freebsd_test.go new file mode 100644 index 0000000..db4b567 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_freebsd_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "testing" + "unsafe" +) + +func TestFetchAndParseRIBOnFreeBSD(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_IFMALIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} + +func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) { + if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil { + t.Skip("NET_RT_IFLISTL not supported") + } + var p uintptr + if kernelAlign != int(unsafe.Sizeof(p)) { + t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64") + } + + var tests = [2]struct { + typ RIBType + b []byte + msgs []Message + ss []string + }{ + {typ: sysNET_RT_IFLIST}, + {typ: sysNET_RT_IFLISTL}, + } + for i := range tests { + var lastErr error + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, tests[i].typ) + if err != nil { + lastErr = err + continue + } + tests[i].msgs = append(tests[i].msgs, rs...) + } + if len(tests[i].msgs) == 0 && lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + tests[i].ss, lastErr = msgs(tests[i].msgs).validate() + if lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + for _, s := range tests[i].ss { + t.Log(s) + } + } + for i := len(tests) - 1; i > 0; i-- { + if len(tests[i].ss) != len(tests[i-1].ss) { + t.Errorf("got %v; want %v", tests[i].ss, tests[i-1].ss) + continue + } + for j, s1 := range tests[i].ss { + s0 := tests[i-1].ss[j] + if s1 != s0 { + t.Errorf("got %s; want %s", s1, s0) + } + } + } +} diff --git a/vendor/golang.org/x/net/route/message_test.go b/vendor/golang.org/x/net/route/message_test.go new file mode 100644 index 0000000..e848dab --- /dev/null +++ b/vendor/golang.org/x/net/route/message_test.go @@ -0,0 +1,239 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "os" + "syscall" + "testing" + "time" +) + +func TestFetchAndParseRIB(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(typ, s) + } + } +} + +var ( + rtmonSock int + rtmonErr error +) + +func init() { + // We need to keep rtmonSock alive to avoid treading on + // recycled socket descriptors. + rtmonSock, rtmonErr = syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) +} + +// TestMonitorAndParseRIB leaks a worker goroutine and a socket +// descriptor but that's intentional. +func TestMonitorAndParseRIB(t *testing.T) { + if testing.Short() || os.Getuid() != 0 { + t.Skip("must be root") + } + + if rtmonErr != nil { + t.Fatal(rtmonErr) + } + + // We suppose that using an IPv4 link-local address and the + // dot1Q ID for Token Ring and FDDI doesn't harm anyone. + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(1002); err != nil { + t.Skip(err) + } + if err := pv.setup(); err != nil { + t.Skip(err) + } + pv.teardown() + + go func() { + b := make([]byte, os.Getpagesize()) + for { + // There's no easy way to unblock this read + // call because the routing message exchange + // over routing socket is a connectionless + // message-oriented protocol, no control plane + // for signaling connectivity, and we cannot + // use the net package of standard library due + // to the lack of support for routing socket + // and circular dependency. + n, err := syscall.Read(rtmonSock, b) + if err != nil { + return + } + ms, err := ParseRIB(0, b[:n]) + if err != nil { + t.Error(err) + return + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(err) + return + } + for _, s := range ss { + t.Log(s) + } + } + }() + + for _, vid := range []int{1002, 1003, 1004, 1005} { + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(vid); err != nil { + t.Fatal(err) + } + if err := pv.setup(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + if err := pv.teardown(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + } +} + +func TestParseRIBWithFuzz(t *testing.T) { + for _, fuzz := range []string{ + "0\x00\x05\x050000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "0000000000000\x02000000" + + "00000000", + "\x02\x00\x05\f0000000000000000" + + "0\x0200000000000000", + "\x02\x00\x05\x100000000000000\x1200" + + "0\x00\xff\x00", + "\x02\x00\x05\f0000000000000000" + + "0\x12000\x00\x02\x0000", + "\x00\x00\x00\x01\x00", + "00000", + } { + for typ := RIBType(0); typ < 256; typ++ { + ParseRIB(typ, []byte(fuzz)) + } + } +} + +func TestRouteMessage(t *testing.T) { + s, err := syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) + if err != nil { + t.Fatal(err) + } + defer syscall.Close(s) + + var ms []RouteMessage + for _, af := range []int{sysAF_INET, sysAF_INET6} { + if _, err := fetchAndParseRIB(af, sysNET_RT_DUMP); err != nil { + t.Log(err) + continue + } + switch af { + case sysAF_INET: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet4Addr{}, + nil, + &Inet4Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + }, + }, + }...) + case sysAF_INET6: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet6Addr{}, + nil, + &Inet6Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + }, + }, + }...) + } + } + for i, m := range ms { + m.ID = uintptr(os.Getpid()) + m.Seq = i + 1 + wb, err := m.Marshal() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + if _, err := syscall.Write(s, wb); err != nil { + t.Fatalf("%v: %v", m, err) + } + rb := make([]byte, os.Getpagesize()) + n, err := syscall.Read(s, rb) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + rms, err := ParseRIB(0, rb[:n]) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, rm := range rms { + err := rm.(*RouteMessage).Err + if err != nil { + t.Errorf("%v: %v", m, err) + } + } + ss, err := msgs(rms).validate() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/vendor/golang.org/x/net/route/route.go b/vendor/golang.org/x/net/route/route.go new file mode 100644 index 0000000..081da0d --- /dev/null +++ b/vendor/golang.org/x/net/route/route.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +// Package route provides basic functions for the manipulation of +// packet routing facilities on BSD variants. +// +// The package supports any version of Darwin, any version of +// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and +// OpenBSD 5.6 and above. +package route + +import ( + "errors" + "os" + "syscall" +) + +var ( + errUnsupportedMessage = errors.New("unsupported message") + errMessageMismatch = errors.New("message mismatch") + errMessageTooShort = errors.New("message too short") + errInvalidMessage = errors.New("invalid message") + errInvalidAddr = errors.New("invalid address") + errShortBuffer = errors.New("short buffer") +) + +// A RouteMessage represents a message conveying an address prefix, a +// nexthop address and an output interface. +// +// Unlike other messages, this message can be used to query adjacency +// information for the given address prefix, to add a new route, and +// to delete or modify the existing route from the routing information +// base inside the kernel by writing and reading route messages on a +// routing socket. +// +// For the manipulation of routing information, the route message must +// contain appropriate fields that include: +// +// Version = +// Type = +// Flags = +// Index = +// ID = +// Seq = +// Addrs = +// +// The Type field specifies a type of manipulation, the Flags field +// specifies a class of target information and the Addrs field +// specifies target information like the following: +// +// route.RouteMessage{ +// Version: RTM_VERSION, +// Type: RTM_GET, +// Flags: RTF_UP | RTF_HOST, +// ID: uintptr(os.Getpid()), +// Seq: 1, +// Addrs: []route.Addrs{ +// RTAX_DST: &route.Inet4Addr{ ... }, +// RTAX_IFP: &route.LinkAddr{ ... }, +// RTAX_BRD: &route.Inet4Addr{ ... }, +// }, +// } +// +// The values for the above fields depend on the implementation of +// each operating system. +// +// The Err field on a response message contains an error value on the +// requested operation. If non-nil, the requested operation is failed. +type RouteMessage struct { + Version int // message version + Type int // message type + Flags int // route flags + Index int // interface index when atatched + ID uintptr // sender's identifier; usually process ID + Seq int // sequence number + Err error // error on requested operation + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// Marshal returns the binary encoding of m. +func (m *RouteMessage) Marshal() ([]byte, error) { + return m.marshal() +} + +// A RIBType reprensents a type of routing information base. +type RIBType int + +const ( + RIBTypeRoute RIBType = syscall.NET_RT_DUMP + RIBTypeInterface RIBType = syscall.NET_RT_IFLIST +) + +// FetchRIB fetches a routing information base from the operating +// system. +// +// The provided af must be an address family. +// +// The provided arg must be a RIBType-specific argument. +// When RIBType is related to routes, arg might be a set of route +// flags. When RIBType is related to network interfaces, arg might be +// an interface index or a set of interface flags. In most cases, zero +// means a wildcard. +func FetchRIB(af int, typ RIBType, arg int) ([]byte, error) { + mib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)} + n := uintptr(0) + if err := sysctl(mib[:], nil, &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + if n == 0 { + return nil, nil + } + b := make([]byte, n) + if err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + return b[:n], nil +} diff --git a/vendor/golang.org/x/net/route/route_classic.go b/vendor/golang.org/x/net/route/route_classic.go new file mode 100644 index 0000000..02fa688 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_classic.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package route + +import ( + "runtime" + "syscall" +) + +func (m *RouteMessage) marshal() ([]byte, error) { + w, ok := wireFormats[m.Type] + if !ok { + return nil, errUnsupportedMessage + } + l := w.bodyOff + addrsSpace(m.Addrs) + if runtime.GOOS == "darwin" { + // Fix stray pointer writes on macOS. + // See golang.org/issue/22456. + l += 1024 + } + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint32(b[8:12], uint32(m.Flags)) + nativeEndian.PutUint16(b[4:6], uint16(m.Index)) + nativeEndian.PutUint32(b[16:20], uint32(m.ID)) + nativeEndian.PutUint32(b[20:24], uint32(m.Seq)) + attrs, err := marshalAddrs(b[w.bodyOff:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[4:6])), + ID: uintptr(nativeEndian.Uint32(b[16:20])), + Seq: int(nativeEndian.Uint32(b[20:24])), + extOff: w.extOff, + raw: b[:l], + } + errno := syscall.Errno(nativeEndian.Uint32(b[28:32])) + if errno != 0 { + m.Err = errno + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_openbsd.go b/vendor/golang.org/x/net/route/route_openbsd.go new file mode 100644 index 0000000..daf2e90 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_openbsd.go @@ -0,0 +1,65 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "syscall" + +func (m *RouteMessage) marshal() ([]byte, error) { + l := sizeofRtMsghdr + addrsSpace(m.Addrs) + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint16(b[4:6], uint16(sizeofRtMsghdr)) + nativeEndian.PutUint32(b[16:20], uint32(m.Flags)) + nativeEndian.PutUint16(b[6:8], uint16(m.Index)) + nativeEndian.PutUint32(b[24:28], uint32(m.ID)) + nativeEndian.PutUint32(b[28:32], uint32(m.Seq)) + attrs, err := marshalAddrs(b[sizeofRtMsghdr:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < sizeofRtMsghdr { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + ID: uintptr(nativeEndian.Uint32(b[24:28])), + Seq: int(nativeEndian.Uint32(b[28:32])), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + errno := syscall.Errno(nativeEndian.Uint32(b[32:36])) + if errno != 0 { + m.Err = errno + } + as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:]) + if err != nil { + return nil, err + } + m.Addrs = as + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_test.go b/vendor/golang.org/x/net/route/route_test.go new file mode 100644 index 0000000..61bd174 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_test.go @@ -0,0 +1,390 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "fmt" + "os/exec" + "runtime" + "time" +) + +func (m *RouteMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[12:16]))) +} + +func (m *InterfaceMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceAddrMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceMulticastAddrMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[4:8]))) +} + +func (m *InterfaceAnnounceMessage) String() string { + what := "" + switch m.What { + case 0: + what = "arrival" + case 1: + what = "departure" + } + return fmt.Sprintf("(%d %s %s)", m.Index, m.Name, what) +} + +func (m *InterfaceMetrics) String() string { + return fmt.Sprintf("(type=%d mtu=%d)", m.Type, m.MTU) +} + +func (m *RouteMetrics) String() string { + return fmt.Sprintf("(pmtu=%d)", m.PathMTU) +} + +type addrAttrs uint + +var addrAttrNames = [...]string{ + "dst", + "gateway", + "netmask", + "genmask", + "ifp", + "ifa", + "author", + "brd", + "df:mpls1-n:tag-o:src", // mpls1 for dragonfly, tag for netbsd, src for openbsd + "df:mpls2-o:srcmask", // mpls2 for dragonfly, srcmask for openbsd + "df:mpls3-o:label", // mpls3 for dragonfly, label for openbsd + "o:bfd", // bfd for openbsd + "o:dns", // dns for openbsd + "o:static", // static for openbsd + "o:search", // search for openbsd +} + +func (attrs addrAttrs) String() string { + var s string + for i, name := range addrAttrNames { + if attrs&(1<" + } + return s +} + +type msgs []Message + +func (ms msgs) validate() ([]string, error) { + var ss []string + for _, m := range ms { + switch m := m.(type) { + case *RouteMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceAddrMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceMulticastAddrMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceAnnounceMessage: + ss = append(ss, m.String()) + default: + ss = append(ss, fmt.Sprintf("%+v", m)) + } + } + return ss, nil +} + +type syss []Sys + +func (sys syss) String() string { + var s string + for _, sy := range sys { + switch sy := sy.(type) { + case *InterfaceMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + case *RouteMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + } + } + return s +} + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_LINK: + return "link" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *LinkAddr) String() string { + name := a.Name + if name == "" { + name = "" + } + lla := llAddr(a.Addr).String() + if lla == "" { + lla = "" + } + return fmt.Sprintf("(%v %d %s %s)", addrFamily(a.Family()), a.Index, name, lla) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%v %v)", addrFamily(a.Family()), ipAddr(a.IP[:])) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%v %v %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID) +} + +func (a *DefaultAddr) String() string { + return fmt.Sprintf("(%v %s)", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String()) +} + +type addrs []Addr + +func (as addrs) String() string { + var s string + for _, a := range as { + if a == nil { + continue + } + if len(s) > 0 { + s += " " + } + switch a := a.(type) { + case *LinkAddr: + s += a.String() + case *Inet4Addr: + s += a.String() + case *Inet6Addr: + s += a.String() + case *DefaultAddr: + s += a.String() + } + } + if s == "" { + return "" + } + return s +} + +func (as addrs) match(attrs addrAttrs) error { + var ts addrAttrs + af := sysAF_UNSPEC + for i := range as { + if as[i] != nil { + ts |= 1 << uint(i) + } + switch as[i].(type) { + case *Inet4Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET + } + if af != sysAF_INET { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + case *Inet6Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET6 + } + if af != sysAF_INET6 { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + } + } + if ts != attrs && ts > attrs { + return fmt.Errorf("%v not included in %v", ts, attrs) + } + return nil +} + +func fetchAndParseRIB(af int, typ RIBType) ([]Message, error) { + var err error + var b []byte + for i := 0; i < 3; i++ { + if b, err = FetchRIB(af, typ, 0); err != nil { + time.Sleep(10 * time.Millisecond) + continue + } + break + } + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + ms, err := ParseRIB(typ, b) + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + return ms, nil +} + +// propVirtual is a proprietary virtual network interface. +type propVirtual struct { + name string + addr, mask string + setupCmds []*exec.Cmd + teardownCmds []*exec.Cmd +} + +func (pv *propVirtual) setup() error { + for _, cmd := range pv.setupCmds { + if err := cmd.Run(); err != nil { + pv.teardown() + return err + } + } + return nil +} + +func (pv *propVirtual) teardown() error { + for _, cmd := range pv.teardownCmds { + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func (pv *propVirtual) configure(suffix int) error { + if runtime.GOOS == "openbsd" { + pv.name = fmt.Sprintf("vether%d", suffix) + } else { + pv.name = fmt.Sprintf("vlan%d", suffix) + } + xname, err := exec.LookPath("ifconfig") + if err != nil { + return err + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "create"}, + }) + if runtime.GOOS == "netbsd" { + // NetBSD requires an underlying dot1Q-capable network + // interface. + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "vlan", fmt.Sprintf("%d", suffix&0xfff), "vlanif", "wm0"}, + }) + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "inet", pv.addr, "netmask", pv.mask}, + }) + pv.teardownCmds = append(pv.teardownCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "destroy"}, + }) + return nil +} diff --git a/vendor/golang.org/x/net/route/sys.go b/vendor/golang.org/x/net/route/sys.go new file mode 100644 index 0000000..3d0ee9b --- /dev/null +++ b/vendor/golang.org/x/net/route/sys.go @@ -0,0 +1,39 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "unsafe" + +var ( + nativeEndian binaryByteOrder + kernelAlign int + wireFormats map[int]*wireFormat +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } + kernelAlign, wireFormats = probeRoutingStack() +} + +func roundup(l int) int { + if l == 0 { + return kernelAlign + } + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} + +type wireFormat struct { + extOff int // offset of header extension + bodyOff int // offset of message body + parse func(RIBType, []byte) (Message, error) +} diff --git a/vendor/golang.org/x/net/route/sys_darwin.go b/vendor/golang.org/x/net/route/sys_darwin.go new file mode 100644 index 0000000..d2daf5c --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_darwin.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STAT, sysNET_RT_TRASH: + return false + default: + return true + } +} + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15} + rtm.parse = rtm.parseRouteMessage + rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15} + rtm2.parse = rtm2.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15} + ifm.parse = ifm.parseInterfaceMessage + ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15} + ifm2.parse = ifm2.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} + ifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage + // Darwin kernels require 32-bit aligned access to routing facilities. + return 4, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFINFO2: ifm2, + sysRTM_NEWMADDR2: ifmam2, + sysRTM_GET2: rtm2, + } +} diff --git a/vendor/golang.org/x/net/route/sys_dragonfly.go b/vendor/golang.org/x/net/route/sys_dragonfly.go new file mode 100644 index 0000000..0c14bc2 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_dragonfly.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_freebsd.go b/vendor/golang.org/x/net/route/sys_freebsd.go new file mode 100644 index 0000000..89ba1c4 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_freebsd.go @@ -0,0 +1,155 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "syscall" + "unsafe" +) + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + if kernelAlign == 8 { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } + } + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + wordSize := int(unsafe.Sizeof(p)) + align := int(unsafe.Sizeof(p)) + // In the case of kern.supported_archs="amd64 i386", we need + // to know the underlying kernel's architecture because the + // alignment for routing facilities are set at the build time + // of the kernel. + conf, _ := syscall.Sysctl("kern.conftxt") + for i, j := 0, 0; j < len(conf); j++ { + if conf[j] != '\n' { + continue + } + s := conf[i:j] + i = j + 1 + if len(s) > len("machine") && s[:len("machine")] == "machine" { + s = s[len("machine"):] + for k := 0; k < len(s); k++ { + if s[k] == ' ' || s[k] == '\t' { + s = s[1:] + } + break + } + if s == "amd64" { + align = 8 + } + break + } + } + var rtm, ifm, ifam, ifmam, ifanm *wireFormat + if align != wordSize { // 386 emulation on amd64 + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu} + } else { + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10} + } + rel, _ := syscall.SysctlUint32("kern.osreldate") + switch { + case rel < 800000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD7 + } + case 800000 <= rel && rel < 900000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD8 + } + case 900000 <= rel && rel < 1000000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD9 + } + case 1000000 <= rel && rel < 1100000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD10 + } + default: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD11 + } + } + rtm.parse = rtm.parseRouteMessage + ifm.parse = ifm.parseInterfaceMessage + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return align, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_netbsd.go b/vendor/golang.org/x/net/route/sys_netbsd.go new file mode 100644 index 0000000..02f71d5 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_netbsd.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// RouteMetrics represents route metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + // NetBSD 6 and above kernels require 64-bit aligned access to + // routing facilities. + return 8, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_IFINFO: ifm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_openbsd.go b/vendor/golang.org/x/net/route/sys_openbsd.go new file mode 100644 index 0000000..c5674e8 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_openbsd.go @@ -0,0 +1,80 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STATS, sysNET_RT_TABLE: + return false + default: + return true + } +} + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[60:64])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[24]), + MTU: int(nativeEndian.Uint32(m.raw[28:32])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: -1, bodyOff: -1} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: -1, bodyOff: -1} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: -1, bodyOff: -1} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: -1, bodyOff: -1} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_DESYNC: rtm, + } +} diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go new file mode 100644 index 0000000..5f69ea6 --- /dev/null +++ b/vendor/golang.org/x/net/route/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "syscall" + "unsafe" +) + +var zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var p unsafe.Pointer + if len(mib) > 0 { + p = unsafe.Pointer(&mib[0]) + } else { + p = unsafe.Pointer(&zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/route/zsys_darwin.go b/vendor/golang.org/x/net/route/zsys_darwin.go new file mode 100644 index 0000000..4e2e1ab --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STAT = 0x4 + sysNET_RT_TRASH = 0x5 + sysNET_RT_IFLIST2 = 0x6 + sysNET_RT_DUMP2 = 0x7 + sysNET_RT_MAXID = 0xa +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_MAXID = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFINFO2 = 0x12 + sysRTM_NEWMADDR2 = 0x13 + sysRTM_GET2 = 0x14 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrDarwin15 = 0x70 + sizeofIfaMsghdrDarwin15 = 0x14 + sizeofIfmaMsghdrDarwin15 = 0x10 + sizeofIfMsghdr2Darwin15 = 0xa0 + sizeofIfmaMsghdr2Darwin15 = 0x14 + sizeofIfDataDarwin15 = 0x60 + sizeofIfData64Darwin15 = 0x80 + + sizeofRtMsghdrDarwin15 = 0x5c + sizeofRtMsghdr2Darwin15 = 0x5c + sizeofRtMetricsDarwin15 = 0x38 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_dragonfly.go b/vendor/golang.org/x/net/route/zsys_dragonfly.go new file mode 100644 index 0000000..719c88d --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_dragonfly.go @@ -0,0 +1,98 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_MAXID = 0x4 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 + sysCTL_LWKT = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x6 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_MPLS1 = 0x100 + sysRTA_MPLS2 = 0x200 + sysRTA_MPLS3 = 0x400 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MPLS1 = 0x8 + sysRTAX_MPLS2 = 0x9 + sysRTAX_MPLS3 = 0xa + sysRTAX_MAX = 0xb +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = 0xb0 + sizeofIfaMsghdrDragonFlyBSD4 = 0x14 + sizeofIfmaMsghdrDragonFlyBSD4 = 0x10 + sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18 + + sizeofRtMsghdrDragonFlyBSD4 = 0x98 + sizeofRtMetricsDragonFlyBSD4 = 0x70 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/vendor/golang.org/x/net/route/zsys_freebsd_386.go new file mode 100644 index 0000000..b03bc01 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_386.go @@ -0,0 +1,126 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x60 + sizeofIfMsghdrFreeBSD8 = 0x60 + sizeofIfMsghdrFreeBSD9 = 0x60 + sizeofIfMsghdrFreeBSD10 = 0x64 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x50 + sizeofIfDataFreeBSD8 = 0x50 + sizeofIfDataFreeBSD9 = 0x50 + sizeofIfDataFreeBSD10 = 0x54 + sizeofIfDataFreeBSD11 = 0x98 + + // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 + // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go new file mode 100644 index 0000000..0b675b3 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0xb0 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0xb0 + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x98 + sizeofRtMetricsFreeBSD10 = 0x70 + + sizeofIfMsghdrFreeBSD7 = 0xa8 + sizeofIfMsghdrFreeBSD8 = 0xa8 + sizeofIfMsghdrFreeBSD9 = 0xa8 + sizeofIfMsghdrFreeBSD10 = 0xa8 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x98 + sizeofIfDataFreeBSD8 = 0x98 + sizeofIfDataFreeBSD9 = 0x98 + sizeofIfDataFreeBSD10 = 0x98 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go new file mode 100644 index 0000000..58f8ea1 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x70 + sizeofIfMsghdrFreeBSD8 = 0x70 + sizeofIfMsghdrFreeBSD9 = 0x70 + sizeofIfMsghdrFreeBSD10 = 0x70 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x60 + sizeofIfDataFreeBSD8 = 0x60 + sizeofIfDataFreeBSD9 = 0x60 + sizeofIfDataFreeBSD10 = 0x60 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0x68 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0x6c + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x5c + sizeofRtMetricsFreeBSD10Emu = 0x38 + + sizeofIfMsghdrFreeBSD7Emu = 0x70 + sizeofIfMsghdrFreeBSD8Emu = 0x70 + sizeofIfMsghdrFreeBSD9Emu = 0x70 + sizeofIfMsghdrFreeBSD10Emu = 0x70 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x60 + sizeofIfDataFreeBSD8Emu = 0x60 + sizeofIfDataFreeBSD9Emu = 0x60 + sizeofIfDataFreeBSD10Emu = 0x60 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_netbsd.go b/vendor/golang.org/x/net/route/zsys_netbsd.go new file mode 100644 index 0000000..e0df45e --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_netbsd.go @@ -0,0 +1,97 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x22 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x5 + sysNET_RT_MAXID = 0x6 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_DDB = 0x9 + sysCTL_PROC = 0xa + sysCTL_VENDOR = 0xb + sysCTL_EMUL = 0xc + sysCTL_SECURITY = 0xd + sysCTL_MAXID = 0xe +) + +const ( + sysRTM_VERSION = 0x4 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFANNOUNCE = 0x10 + sysRTM_IEEE80211 = 0x11 + sysRTM_SETGATE = 0x12 + sysRTM_LLINFO_UPD = 0x13 + sysRTM_IFINFO = 0x14 + sysRTM_CHGADDR = 0x15 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_TAG = 0x100 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_TAG = 0x8 + sysRTAX_MAX = 0x9 +) + +const ( + sizeofIfMsghdrNetBSD7 = 0x98 + sizeofIfaMsghdrNetBSD7 = 0x18 + sizeofIfAnnouncemsghdrNetBSD7 = 0x18 + + sizeofRtMsghdrNetBSD7 = 0x78 + sizeofRtMetricsNetBSD7 = 0x50 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_openbsd.go b/vendor/golang.org/x/net/route/zsys_openbsd.go new file mode 100644 index 0000000..db8c8ef --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_openbsd.go @@ -0,0 +1,101 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STATS = 0x4 + sysNET_RT_TABLE = 0x5 + sysNET_RT_IFNAMES = 0x6 + sysNET_RT_MAXID = 0x7 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_FS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_DDB = 0x9 + sysCTL_VFS = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_IFANNOUNCE = 0xf + sysRTM_DESYNC = 0x10 + sysRTM_INVALIDATE = 0x11 + sysRTM_BFD = 0x12 + sysRTM_PROPOSAL = 0x13 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_SRC = 0x100 + sysRTA_SRCMASK = 0x200 + sysRTA_LABEL = 0x400 + sysRTA_BFD = 0x800 + sysRTA_DNS = 0x1000 + sysRTA_STATIC = 0x2000 + sysRTA_SEARCH = 0x4000 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_SRC = 0x8 + sysRTAX_SRCMASK = 0x9 + sysRTAX_LABEL = 0xa + sysRTAX_BFD = 0xb + sysRTAX_DNS = 0xc + sysRTAX_STATIC = 0xd + sysRTAX_SEARCH = 0xe + sysRTAX_MAX = 0xf +) + +const ( + sizeofRtMsghdr = 0x60 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000..c646a69 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

    /debug/events

    + +
  • + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
    {{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
    + +{{if $.EventLogs}} +


    +

    Family: {{$.Family}}

    + +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
    WhenElapsed
    {{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
    {{$el.Stack|trimSpace}}
    {{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
    +{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000..9bf4286 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
    Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
    +
    + +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
    [{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
    +`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/histogram_test.go b/vendor/golang.org/x/net/trace/histogram_test.go new file mode 100644 index 0000000..d384b93 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram_test.go @@ -0,0 +1,325 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math" + "testing" +) + +type sumTest struct { + value int64 + sum int64 + sumOfSquares float64 + total int64 +} + +var sumTests = []sumTest{ + {100, 100, 10000, 1}, + {50, 150, 12500, 2}, + {50, 200, 15000, 3}, + {50, 250, 17500, 4}, +} + +type bucketingTest struct { + in int64 + log int + bucket int +} + +var bucketingTests = []bucketingTest{ + {0, 0, 0}, + {1, 1, 0}, + {2, 2, 1}, + {3, 2, 1}, + {4, 3, 2}, + {1000, 10, 9}, + {1023, 10, 9}, + {1024, 11, 10}, + {1000000, 20, 19}, +} + +type multiplyTest struct { + in int64 + ratio float64 + expectedSum int64 + expectedTotal int64 + expectedSumOfSquares float64 +} + +var multiplyTests = []multiplyTest{ + {15, 2.5, 37, 2, 562.5}, + {128, 4.6, 758, 13, 77953.9}, +} + +type percentileTest struct { + fraction float64 + expected int64 +} + +var percentileTests = []percentileTest{ + {0.25, 48}, + {0.5, 96}, + {0.6, 109}, + {0.75, 128}, + {0.90, 205}, + {0.95, 230}, + {0.99, 256}, +} + +func TestSum(t *testing.T) { + var h histogram + + for _, test := range sumTests { + h.addMeasurement(test.value) + sum := h.sum + if sum != test.sum { + t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) + } + + sumOfSquares := h.sumOfSquares + if sumOfSquares != test.sumOfSquares { + t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) + } + + total := h.total() + if total != test.total { + t.Errorf("h.Total = %v WANT: %v", total, test.total) + } + } +} + +func TestMultiply(t *testing.T) { + var h histogram + for i, test := range multiplyTests { + h.addMeasurement(test.in) + h.Multiply(test.ratio) + if h.sum != test.expectedSum { + t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) + } + if h.total() != test.expectedTotal { + t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) + } + if h.sumOfSquares != test.expectedSumOfSquares { + t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) + } + } +} + +func TestBucketingFunctions(t *testing.T) { + for _, test := range bucketingTests { + log := log2(test.in) + if log != test.log { + t.Errorf("log2 = %v WANT: %v", log, test.log) + } + + bucket := getBucket(test.in) + if bucket != test.bucket { + t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) + } + } +} + +func TestAverage(t *testing.T) { + a := new(histogram) + average := a.average() + if average != 0 { + t.Errorf("Average of empty histogram was %v WANT: 0", average) + } + + a.addMeasurement(1) + a.addMeasurement(1) + a.addMeasurement(3) + const expected = float64(5) / float64(3) + average = a.average() + + if !isApproximate(average, expected) { + t.Errorf("Average = %g WANT: %v", average, expected) + } +} + +func TestStandardDeviation(t *testing.T) { + a := new(histogram) + add(a, 10, 1<<4) + add(a, 10, 1<<5) + add(a, 10, 1<<6) + stdDev := a.standardDeviation() + const expected = 19.95 + + if !isApproximate(stdDev, expected) { + t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) + } + + // No values + a = new(histogram) + stdDev = a.standardDeviation() + + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 1, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 10, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } +} + +func TestPercentileBoundary(t *testing.T) { + a := new(histogram) + add(a, 5, 1<<4) + add(a, 10, 1<<6) + add(a, 5, 1<<7) + + for _, test := range percentileTests { + percentile := a.percentileBoundary(test.fraction) + if percentile != test.expected { + t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) + } + } +} + +func TestCopyFrom(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} + + a.CopyFrom(&b) + + if a.String() != b.String() { + t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) + } +} + +func TestClear(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + + a.Clear() + + expected := "0, 0.000000, 0, 0, []" + if a.String() != expected { + t.Errorf("a.String = %s WANT %s", a.String(), expected) + } +} + +func TestNew(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := "0, 0.000000, 0, 0, []" + if b.(*histogram).String() != expected { + t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) + } +} + +func TestAdd(t *testing.T) { + // The tests here depend on the associativity of addMeasurement and Add. + // Add empty observation + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := a.String() + a.Add(b) + if a.String() != expected { + t.Errorf("a.String = %s WANT: %s", a.String(), expected) + } + + // Add same bucketed value, no new buckets + c := new(histogram) + d := new(histogram) + e := new(histogram) + c.addMeasurement(12) + d.addMeasurement(11) + e.addMeasurement(12) + e.addMeasurement(11) + c.Add(d) + if c.String() != e.String() { + t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) + } + + // Add bucketed values + f := new(histogram) + g := new(histogram) + h := new(histogram) + f.addMeasurement(4) + f.addMeasurement(12) + f.addMeasurement(100) + g.addMeasurement(18) + g.addMeasurement(36) + g.addMeasurement(255) + h.addMeasurement(4) + h.addMeasurement(12) + h.addMeasurement(100) + h.addMeasurement(18) + h.addMeasurement(36) + h.addMeasurement(255) + f.Add(g) + if f.String() != h.String() { + t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) + } + + // add buckets to no buckets + i := new(histogram) + j := new(histogram) + k := new(histogram) + j.addMeasurement(18) + j.addMeasurement(36) + j.addMeasurement(255) + k.addMeasurement(18) + k.addMeasurement(36) + k.addMeasurement(255) + i.Add(j) + if i.String() != k.String() { + t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) + } + + // add buckets to single value (no overlap) + l := new(histogram) + m := new(histogram) + n := new(histogram) + l.addMeasurement(0) + m.addMeasurement(18) + m.addMeasurement(36) + m.addMeasurement(255) + n.addMeasurement(0) + n.addMeasurement(18) + n.addMeasurement(36) + n.addMeasurement(255) + l.Add(m) + if l.String() != n.String() { + t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) + } + + // mixed order + o := new(histogram) + p := new(histogram) + o.addMeasurement(0) + o.addMeasurement(2) + o.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(2) + if o.String() != p.String() { + t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) + } +} + +func add(h *histogram, times int, val int64) { + for i := 0; i < times; i++ { + h.addMeasurement(val) + } +} + +func isApproximate(x, y float64) bool { + return math.Abs(x-y) < 1e-2 +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000..a46ee0e --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1103 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

    /debug/requests

    +{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
    {{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
    +{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
    +

    Family: {{$.Family}}

    + +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

    Showing {{len $.Traces}} of {{$.Total}} traces.

    +{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
    + {{if $.Active}}Active{{else}}Completed{{end}} Requests +
    WhenElapsed (s)
    {{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
    {{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
    +{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

    Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

    +{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 0000000..d608191 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 0000000..df6e1fb --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_test.go b/vendor/golang.org/x/net/trace/trace_test.go new file mode 100644 index 0000000..bfd9dfe --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_test.go @@ -0,0 +1,178 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "net/http" + "reflect" + "testing" +) + +type s struct{} + +func (s) String() string { return "lazy string" } + +// TestReset checks whether all the fields are zeroed after reset. +func TestReset(t *testing.T) { + tr := New("foo", "bar") + tr.LazyLog(s{}, false) + tr.LazyPrintf("%d", 1) + tr.SetRecycler(func(_ interface{}) {}) + tr.SetTraceInfo(3, 4) + tr.SetMaxEvents(100) + tr.SetError() + tr.Finish() + + tr.(*trace).reset() + + if !reflect.DeepEqual(tr, new(trace)) { + t.Errorf("reset didn't clear all fields: %+v", tr) + } +} + +// TestResetLog checks whether all the fields are zeroed after reset. +func TestResetLog(t *testing.T) { + el := NewEventLog("foo", "bar") + el.Printf("message") + el.Errorf("error") + el.Finish() + + el.(*eventLog).reset() + + if !reflect.DeepEqual(el, new(eventLog)) { + t.Errorf("reset didn't clear all fields: %+v", el) + } +} + +func TestAuthRequest(t *testing.T) { + testCases := []struct { + host string + want bool + }{ + {host: "192.168.23.1", want: false}, + {host: "192.168.23.1:8080", want: false}, + {host: "malformed remote addr", want: false}, + {host: "localhost", want: true}, + {host: "localhost:8080", want: true}, + {host: "127.0.0.1", want: true}, + {host: "127.0.0.1:8080", want: true}, + {host: "::1", want: true}, + {host: "[::1]:8080", want: true}, + } + for _, tt := range testCases { + req := &http.Request{RemoteAddr: tt.host} + any, sensitive := AuthRequest(req) + if any != tt.want || sensitive != tt.want { + t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) + } + } +} + +// TestParseTemplate checks that all templates used by this package are valid +// as they are parsed on first usage +func TestParseTemplate(t *testing.T) { + if tmpl := distTmpl(); tmpl == nil { + t.Error("invalid template returned from distTmpl()") + } + if tmpl := pageTmpl(); tmpl == nil { + t.Error("invalid template returned from pageTmpl()") + } + if tmpl := eventsTmpl(); tmpl == nil { + t.Error("invalid template returned from eventsTmpl()") + } +} + +func benchmarkTrace(b *testing.B, maxEvents, numEvents int) { + numSpans := (b.N + numEvents + 1) / numEvents + + for i := 0; i < numSpans; i++ { + tr := New("test", "test") + tr.SetMaxEvents(maxEvents) + for j := 0; j < numEvents; j++ { + tr.LazyPrintf("%d", j) + } + tr.Finish() + } +} + +func BenchmarkTrace_Default_2(b *testing.B) { + benchmarkTrace(b, 0, 2) +} + +func BenchmarkTrace_Default_10(b *testing.B) { + benchmarkTrace(b, 0, 10) +} + +func BenchmarkTrace_Default_100(b *testing.B) { + benchmarkTrace(b, 0, 100) +} + +func BenchmarkTrace_Default_1000(b *testing.B) { + benchmarkTrace(b, 0, 1000) +} + +func BenchmarkTrace_Default_10000(b *testing.B) { + benchmarkTrace(b, 0, 10000) +} + +func BenchmarkTrace_10_2(b *testing.B) { + benchmarkTrace(b, 10, 2) +} + +func BenchmarkTrace_10_10(b *testing.B) { + benchmarkTrace(b, 10, 10) +} + +func BenchmarkTrace_10_100(b *testing.B) { + benchmarkTrace(b, 10, 100) +} + +func BenchmarkTrace_10_1000(b *testing.B) { + benchmarkTrace(b, 10, 1000) +} + +func BenchmarkTrace_10_10000(b *testing.B) { + benchmarkTrace(b, 10, 10000) +} + +func BenchmarkTrace_100_2(b *testing.B) { + benchmarkTrace(b, 100, 2) +} + +func BenchmarkTrace_100_10(b *testing.B) { + benchmarkTrace(b, 100, 10) +} + +func BenchmarkTrace_100_100(b *testing.B) { + benchmarkTrace(b, 100, 100) +} + +func BenchmarkTrace_100_1000(b *testing.B) { + benchmarkTrace(b, 100, 1000) +} + +func BenchmarkTrace_100_10000(b *testing.B) { + benchmarkTrace(b, 100, 10000) +} + +func BenchmarkTrace_1000_2(b *testing.B) { + benchmarkTrace(b, 1000, 2) +} + +func BenchmarkTrace_1000_10(b *testing.B) { + benchmarkTrace(b, 1000, 10) +} + +func BenchmarkTrace_1000_100(b *testing.B) { + benchmarkTrace(b, 1000, 100) +} + +func BenchmarkTrace_1000_1000(b *testing.B) { + benchmarkTrace(b, 1000, 1000) +} + +func BenchmarkTrace_1000_10000(b *testing.B) { + benchmarkTrace(b, 1000, 10000) +} diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go new file mode 100644 index 0000000..748118d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file.go @@ -0,0 +1,796 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "io" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// A FileSystem implements access to a collection of named files. The elements +// in a file path are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +// +// Each method has the same semantics as the os package's function of the same +// name. +// +// Note that the os.Rename documentation says that "OS-specific restrictions +// might apply". In particular, whether or not renaming a file or directory +// overwriting another existing file or directory is an error is OS-dependent. +type FileSystem interface { + Mkdir(ctx context.Context, name string, perm os.FileMode) error + OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) + RemoveAll(ctx context.Context, name string) error + Rename(ctx context.Context, oldName, newName string) error + Stat(ctx context.Context, name string) (os.FileInfo, error) +} + +// A File is returned by a FileSystem's OpenFile method and can be served by a +// Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. +type File interface { + http.File + io.Writer +} + +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's +// string value is a filename on the native file system, not a URL, so it is +// separated by filepath.Separator, which isn't necessarily '/'. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) resolve(name string) string { + // This implementation is based on Dir.Open's code in the standard net/http package. + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return "" + } + dir := string(d) + if dir == "" { + dir = "." + } + return filepath.Join(dir, filepath.FromSlash(slashClean(name))) +} + +func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + return os.Mkdir(name, perm) +} + +func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +func (d Dir) RemoveAll(ctx context.Context, name string) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + if name == filepath.Clean(string(d)) { + // Prohibit removing the virtual root directory. + return os.ErrInvalid + } + return os.RemoveAll(name) +} + +func (d Dir) Rename(ctx context.Context, oldName, newName string) error { + if oldName = d.resolve(oldName); oldName == "" { + return os.ErrNotExist + } + if newName = d.resolve(newName); newName == "" { + return os.ErrNotExist + } + if root := filepath.Clean(string(d)); root == oldName || root == newName { + // Prohibit renaming from or to the virtual root directory. + return os.ErrInvalid + } + return os.Rename(oldName, newName) +} + +func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + return os.Stat(name) +} + +// NewMemFS returns a new in-memory FileSystem implementation. +func NewMemFS() FileSystem { + return &memFS{ + root: memFSNode{ + children: make(map[string]*memFSNode), + mode: 0660 | os.ModeDir, + modTime: time.Now(), + }, + } +} + +// A memFS implements FileSystem, storing all metadata and actual file data +// in-memory. No limits on filesystem size are used, so it is not recommended +// this be used where the clients are untrusted. +// +// Concurrent access is permitted. The tree structure is protected by a mutex, +// and each node's contents and metadata are protected by a per-node mutex. +// +// TODO: Enforce file permissions. +type memFS struct { + mu sync.Mutex + root memFSNode +} + +// TODO: clean up and rationalize the walk/find code. + +// walk walks the directory tree for the fullname, calling f at each step. If f +// returns an error, the walk will be aborted and return that same error. +// +// dir is the directory at that step, frag is the name fragment, and final is +// whether it is the final step. For example, walking "/foo/bar/x" will result +// in 3 calls to f: +// - "/", "foo", false +// - "/foo/", "bar", false +// - "/foo/bar/", "x", true +// The frag argument will be empty only if dir is the root node and the walk +// ends at that root node. +func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { + original := fullname + fullname = slashClean(fullname) + + // Strip any leading "/"s to make fullname a relative path, as the walk + // starts at fs.root. + if fullname[0] == '/' { + fullname = fullname[1:] + } + dir := &fs.root + + for { + frag, remaining := fullname, "" + i := strings.IndexRune(fullname, '/') + final := i < 0 + if !final { + frag, remaining = fullname[:i], fullname[i+1:] + } + if frag == "" && dir != &fs.root { + panic("webdav: empty path fragment for a clean path") + } + if err := f(dir, frag, final); err != nil { + return &os.PathError{ + Op: op, + Path: original, + Err: err, + } + } + if final { + break + } + child := dir.children[frag] + if child == nil { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrNotExist, + } + } + if !child.mode.IsDir() { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrInvalid, + } + } + dir, fullname = child, remaining + } + return nil +} + +// find returns the parent of the named node and the relative name fragment +// from the parent to the child. For example, if finding "/foo/bar/baz" then +// parent will be the node for "/foo/bar" and frag will be "baz". +// +// If the fullname names the root node, then parent, frag and err will be zero. +// +// find returns an error if the parent does not already exist or the parent +// isn't a directory, but it will not return an error per se if the child does +// not already exist. The error returned is either nil or an *os.PathError +// whose Op is op. +func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { + err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { + if !final { + return nil + } + if frag0 != "" { + parent, frag = parent0, frag0 + } + return nil + }) + return parent, frag, err +} + +func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("mkdir", name) + if err != nil { + return err + } + if dir == nil { + // We can't create the root. + return os.ErrInvalid + } + if _, ok := dir.children[frag]; ok { + return os.ErrExist + } + dir.children[frag] = &memFSNode{ + children: make(map[string]*memFSNode), + mode: perm.Perm() | os.ModeDir, + modTime: time.Now(), + } + return nil +} + +func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("open", name) + if err != nil { + return nil, err + } + var n *memFSNode + if dir == nil { + // We're opening the root. + if flag&(os.O_WRONLY|os.O_RDWR) != 0 { + return nil, os.ErrPermission + } + n, frag = &fs.root, "/" + + } else { + n = dir.children[frag] + if flag&(os.O_SYNC|os.O_APPEND) != 0 { + // memFile doesn't support these flags yet. + return nil, os.ErrInvalid + } + if flag&os.O_CREATE != 0 { + if flag&os.O_EXCL != 0 && n != nil { + return nil, os.ErrExist + } + if n == nil { + n = &memFSNode{ + mode: perm.Perm(), + } + dir.children[frag] = n + } + } + if n == nil { + return nil, os.ErrNotExist + } + if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { + n.mu.Lock() + n.data = nil + n.mu.Unlock() + } + } + + children := make([]os.FileInfo, 0, len(n.children)) + for cName, c := range n.children { + children = append(children, c.stat(cName)) + } + return &memFile{ + n: n, + nameSnapshot: frag, + childrenSnapshot: children, + }, nil +} + +func (fs *memFS) RemoveAll(ctx context.Context, name string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("remove", name) + if err != nil { + return err + } + if dir == nil { + // We can't remove the root. + return os.ErrInvalid + } + delete(dir.children, frag) + return nil +} + +func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + oldName = slashClean(oldName) + newName = slashClean(newName) + if oldName == newName { + return nil + } + if strings.HasPrefix(newName, oldName+"/") { + // We can't rename oldName to be a sub-directory of itself. + return os.ErrInvalid + } + + oDir, oFrag, err := fs.find("rename", oldName) + if err != nil { + return err + } + if oDir == nil { + // We can't rename from the root. + return os.ErrInvalid + } + + nDir, nFrag, err := fs.find("rename", newName) + if err != nil { + return err + } + if nDir == nil { + // We can't rename to the root. + return os.ErrInvalid + } + + oNode, ok := oDir.children[oFrag] + if !ok { + return os.ErrNotExist + } + if oNode.children != nil { + if nNode, ok := nDir.children[nFrag]; ok { + if nNode.children == nil { + return errNotADirectory + } + if len(nNode.children) != 0 { + return errDirectoryNotEmpty + } + } + } + delete(oDir.children, oFrag) + nDir.children[nFrag] = oNode + return nil +} + +func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("stat", name) + if err != nil { + return nil, err + } + if dir == nil { + // We're stat'ting the root. + return fs.root.stat("/"), nil + } + if n, ok := dir.children[frag]; ok { + return n.stat(path.Base(name)), nil + } + return nil, os.ErrNotExist +} + +// A memFSNode represents a single entry in the in-memory filesystem and also +// implements os.FileInfo. +type memFSNode struct { + // children is protected by memFS.mu. + children map[string]*memFSNode + + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property +} + +func (n *memFSNode) stat(name string) *memFileInfo { + n.mu.Lock() + defer n.mu.Unlock() + return &memFileInfo{ + name: name, + size: int64(len(n.data)), + mode: n.mode, + modTime: n.modTime, + } +} + +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + +type memFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (f *memFileInfo) Name() string { return f.name } +func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() os.FileMode { return f.mode } +func (f *memFileInfo) ModTime() time.Time { return f.modTime } +func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } +func (f *memFileInfo) Sys() interface{} { return nil } + +// A memFile is a File implementation for a memFSNode. It is a per-file (not +// per-node) read/write position, and a snapshot of the memFS' tree structure +// (a node's name and children) for that node. +type memFile struct { + n *memFSNode + nameSnapshot string + childrenSnapshot []os.FileInfo + // pos is protected by n.mu. + pos int +} + +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) Read(p []byte) (int, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos >= len(f.n.data) { + return 0, io.EOF + } + n := copy(p, f.n.data[f.pos:]) + f.pos += n + return n, nil +} + +func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if !f.n.mode.IsDir() { + return nil, os.ErrInvalid + } + old := f.pos + if old >= len(f.childrenSnapshot) { + // The os.File Readdir docs say that at the end of a directory, + // the error is io.EOF if count > 0 and nil if count <= 0. + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.pos += count + if f.pos > len(f.childrenSnapshot) { + f.pos = len(f.childrenSnapshot) + } + } else { + f.pos = len(f.childrenSnapshot) + old = 0 + } + return f.childrenSnapshot[old:f.pos], nil +} + +func (f *memFile) Seek(offset int64, whence int) (int64, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + npos := f.pos + // TODO: How to handle offsets greater than the size of system int? + switch whence { + case os.SEEK_SET: + npos = int(offset) + case os.SEEK_CUR: + npos += int(offset) + case os.SEEK_END: + npos = len(f.n.data) + int(offset) + default: + npos = -1 + } + if npos < 0 { + return 0, os.ErrInvalid + } + f.pos = npos + return int64(f.pos), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f.n.stat(f.nameSnapshot), nil +} + +func (f *memFile) Write(p []byte) (int, error) { + lenp := len(p) + f.n.mu.Lock() + defer f.n.mu.Unlock() + + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos < len(f.n.data) { + n := copy(f.n.data[f.pos:], p) + f.pos += n + p = p[n:] + } else if f.pos > len(f.n.data) { + // Write permits the creation of holes, if we've seek'ed past the + // existing end of file. + if f.pos <= cap(f.n.data) { + oldLen := len(f.n.data) + f.n.data = f.n.data[:f.pos] + hole := f.n.data[oldLen:] + for i := range hole { + hole[i] = 0 + } + } else { + d := make([]byte, f.pos, f.pos+len(p)) + copy(d, f.n.data) + f.n.data = d + } + } + + if len(p) > 0 { + // We should only get here if f.pos == len(f.n.data). + f.n.data = append(f.n.data, p...) + f.pos = len(f.n.data) + } + f.n.modTime = time.Now() + return lenp, nil +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) { + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if !os.IsNotExist(err) { + return http.StatusForbidden, err + } + created = true + } else if overwrite { + // Section 9.9.3 says that "If a resource exists at the destination + // and the Overwrite header is "T", then prior to performing the move, + // the server must perform a DELETE with "Depth: infinity" on the + // destination resource. + if err := fs.RemoveAll(ctx, dst); err != nil { + return http.StatusForbidden, err + } + } else { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.Rename(ctx, src, dst); err != nil { + return http.StatusForbidden, err + } + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ + // into /A/B/ could lead to infinite recursion if not handled correctly." + + srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + defer srcFile.Close() + srcStat, err := srcFile.Stat() + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + srcPerm := srcStat.Mode() & os.ModePerm + + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if os.IsNotExist(err) { + created = true + } else { + return http.StatusForbidden, err + } + } else { + if !overwrite { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) { + return http.StatusForbidden, err + } + } + + if srcStat.IsDir() { + if err := fs.Mkdir(ctx, dst, srcPerm); err != nil { + return http.StatusForbidden, err + } + if depth == infiniteDepth { + children, err := srcFile.Readdir(-1) + if err != nil { + return http.StatusForbidden, err + } + for _, c := range children { + name := c.Name() + s := path.Join(src, name) + d := path.Join(dst, name) + cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion) + if cErr != nil { + // TODO: MultiStatus. + return cStatus, cErr + } + } + } + + } else { + dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + if err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusForbidden, err + + } + _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) + closeErr := dstFile.Close() + if copyErr != nil { + return http.StatusInternalServerError, copyErr + } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + } + + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + // Read directory names. + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return walkFn(name, info, err) + } + fileInfos, err := f.Readdir(0) + f.Close() + if err != nil { + return walkFn(name, info, err) + } + + for _, fileInfo := range fileInfos { + filename := path.Join(name, fileInfo.Name()) + fileInfo, err := fs.Stat(ctx, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.6.go b/vendor/golang.org/x/net/webdav/file_go1.6.go new file mode 100644 index 0000000..fa38770 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.6.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package webdav + +import ( + "net/http" + + "golang.org/x/net/context" +) + +func getContext(r *http.Request) context.Context { + return context.Background() +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.7.go b/vendor/golang.org/x/net/webdav/file_go1.7.go new file mode 100644 index 0000000..d1c3de8 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.7.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package webdav + +import ( + "context" + "net/http" +) + +func getContext(r *http.Request) context.Context { + return r.Context() +} diff --git a/vendor/golang.org/x/net/webdav/file_test.go b/vendor/golang.org/x/net/webdav/file_test.go new file mode 100644 index 0000000..bfd96e1 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_test.go @@ -0,0 +1,1184 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSlashClean(t *testing.T) { + testCases := []string{ + "", + ".", + "/", + "/./", + "//", + "//.", + "//a", + "/a", + "/a/b/c", + "/a//b/./../c/d/", + "a", + "a/b/c", + } + for _, tc := range testCases { + got := slashClean(tc) + want := path.Clean("/" + tc) + if got != want { + t.Errorf("tc=%q: got %q, want %q", tc, got, want) + } + } +} + +func TestDirResolve(t *testing.T) { + testCases := []struct { + dir, name, want string + }{ + {"/", "", "/"}, + {"/", "/", "/"}, + {"/", ".", "/"}, + {"/", "./a", "/a"}, + {"/", "..", "/"}, + {"/", "..", "/"}, + {"/", "../", "/"}, + {"/", "../.", "/"}, + {"/", "../a", "/a"}, + {"/", "../..", "/"}, + {"/", "../bar/a", "/bar/a"}, + {"/", "../baz/a", "/baz/a"}, + {"/", "...", "/..."}, + {"/", ".../a", "/.../a"}, + {"/", ".../..", "/"}, + {"/", "a", "/a"}, + {"/", "a/./b", "/a/b"}, + {"/", "a/../../b", "/b"}, + {"/", "a/../b", "/b"}, + {"/", "a/b", "/a/b"}, + {"/", "a/b/c/../../d", "/a/d"}, + {"/", "a/b/c/../../../d", "/d"}, + {"/", "a/b/c/../../../../d", "/d"}, + {"/", "a/b/c/d", "/a/b/c/d"}, + + {"/foo/bar", "", "/foo/bar"}, + {"/foo/bar", "/", "/foo/bar"}, + {"/foo/bar", ".", "/foo/bar"}, + {"/foo/bar", "./a", "/foo/bar/a"}, + {"/foo/bar", "..", "/foo/bar"}, + {"/foo/bar", "../", "/foo/bar"}, + {"/foo/bar", "../.", "/foo/bar"}, + {"/foo/bar", "../a", "/foo/bar/a"}, + {"/foo/bar", "../..", "/foo/bar"}, + {"/foo/bar", "../bar/a", "/foo/bar/bar/a"}, + {"/foo/bar", "../baz/a", "/foo/bar/baz/a"}, + {"/foo/bar", "...", "/foo/bar/..."}, + {"/foo/bar", ".../a", "/foo/bar/.../a"}, + {"/foo/bar", ".../..", "/foo/bar"}, + {"/foo/bar", "a", "/foo/bar/a"}, + {"/foo/bar", "a/./b", "/foo/bar/a/b"}, + {"/foo/bar", "a/../../b", "/foo/bar/b"}, + {"/foo/bar", "a/../b", "/foo/bar/b"}, + {"/foo/bar", "a/b", "/foo/bar/a/b"}, + {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"}, + {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"}, + + {"/foo/bar/", "", "/foo/bar"}, + {"/foo/bar/", "/", "/foo/bar"}, + {"/foo/bar/", ".", "/foo/bar"}, + {"/foo/bar/", "./a", "/foo/bar/a"}, + {"/foo/bar/", "..", "/foo/bar"}, + + {"/foo//bar///", "", "/foo/bar"}, + {"/foo//bar///", "/", "/foo/bar"}, + {"/foo//bar///", ".", "/foo/bar"}, + {"/foo//bar///", "./a", "/foo/bar/a"}, + {"/foo//bar///", "..", "/foo/bar"}, + + {"/x/y/z", "ab/c\x00d/ef", ""}, + + {".", "", "."}, + {".", "/", "."}, + {".", ".", "."}, + {".", "./a", "a"}, + {".", "..", "."}, + {".", "..", "."}, + {".", "../", "."}, + {".", "../.", "."}, + {".", "../a", "a"}, + {".", "../..", "."}, + {".", "../bar/a", "bar/a"}, + {".", "../baz/a", "baz/a"}, + {".", "...", "..."}, + {".", ".../a", ".../a"}, + {".", ".../..", "."}, + {".", "a", "a"}, + {".", "a/./b", "a/b"}, + {".", "a/../../b", "b"}, + {".", "a/../b", "b"}, + {".", "a/b", "a/b"}, + {".", "a/b/c/../../d", "a/d"}, + {".", "a/b/c/../../../d", "d"}, + {".", "a/b/c/../../../../d", "d"}, + {".", "a/b/c/d", "a/b/c/d"}, + + {"", "", "."}, + {"", "/", "."}, + {"", ".", "."}, + {"", "./a", "a"}, + {"", "..", "."}, + } + + for _, tc := range testCases { + d := Dir(filepath.FromSlash(tc.dir)) + if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want { + t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want) + } + } +} + +func TestWalk(t *testing.T) { + type walkStep struct { + name, frag string + final bool + } + + testCases := []struct { + dir string + want []walkStep + }{ + {"", []walkStep{ + {"", "", true}, + }}, + {"/", []walkStep{ + {"", "", true}, + }}, + {"/a", []walkStep{ + {"", "a", true}, + }}, + {"/a/", []walkStep{ + {"", "a", true}, + }}, + {"/a/b", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/c", []walkStep{ + {"", "a", false}, + {"a", "b", false}, + {"b", "c", true}, + }}, + // The following test case is the one mentioned explicitly + // in the method description. + {"/foo/bar/x", []walkStep{ + {"", "foo", false}, + {"foo", "bar", false}, + {"bar", "x", true}, + }}, + } + + ctx := context.Background() + + for _, tc := range testCases { + fs := NewMemFS().(*memFS) + + parts := strings.Split(tc.dir, "/") + for p := 2; p < len(parts); p++ { + d := strings.Join(parts[:p], "/") + if err := fs.Mkdir(ctx, d, 0666); err != nil { + t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) + } + } + + i, prevFrag := 0, "" + err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error { + got := walkStep{ + name: prevFrag, + frag: frag, + final: final, + } + want := tc.want[i] + + if got != want { + return fmt.Errorf("got %+v, want %+v", got, want) + } + i, prevFrag = i+1, frag + return nil + }) + if err != nil { + t.Errorf("tc.dir=%q: %v", tc.dir, err) + } + } +} + +// find appends to ss the names of the named file and its children. It is +// analogous to the Unix find command. +// +// The returned strings are not guaranteed to be in any particular order. +func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) { + stat, err := fs.Stat(ctx, name) + if err != nil { + return nil, err + } + ss = append(ss, name) + if stat.IsDir() { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + return nil, err + } + for _, c := range children { + ss, err = find(ctx, ss, fs, path.Join(name, c.Name())) + if err != nil { + return nil, err + } + } + } + return ss, nil +} + +func testFS(t *testing.T, fs FileSystem) { + errStr := func(err error) string { + switch { + case os.IsExist(err): + return "errExist" + case os.IsNotExist(err): + return "errNotExist" + case err != nil: + return "err" + } + return "ok" + } + + // The non-"find" non-"stat" test cases should change the file system state. The + // indentation of the "find"s and "stat"s helps distinguish such test cases. + testCases := []string{ + " stat / want dir", + " stat /a want errNotExist", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + "create /a A want ok", + " stat /a want 1", + "create /d/e EEE want errNotExist", + "mk-dir /a want errExist", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + " stat /d want dir", + "create /d/e EEE want ok", + " stat /d/e want 3", + " find / /a /d /d/e", + "create /d/f FFFF want ok", + "create /d/g GGGGGGG want ok", + "mk-dir /d/m want ok", + "mk-dir /d/m want errExist", + "create /d/m/p PPPPP want ok", + " stat /d/e want 3", + " stat /d/f want 4", + " stat /d/g want 7", + " stat /d/h want errNotExist", + " stat /d/m want dir", + " stat /d/m/p want 5", + " find / /a /d /d/e /d/f /d/g /d/m /d/m/p", + "rm-all /d want ok", + " stat /a want 1", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + " stat /d/f want errNotExist", + " stat /d/g want errNotExist", + " stat /d/m want errNotExist", + " stat /d/m/p want errNotExist", + " find / /a", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + "create /d/f FFFF want ok", + "rm-all /d/f want ok", + "mk-dir /d/m want ok", + "rm-all /z want ok", + "rm-all / want err", + "create /b BB want ok", + " stat / want dir", + " stat /a want 1", + " stat /b want 2", + " stat /c want errNotExist", + " stat /d want dir", + " stat /d/m want dir", + " find / /a /b /d /d/m", + "move__ o=F /b /c want ok", + " stat /b want errNotExist", + " stat /c want 2", + " stat /d/m want dir", + " stat /d/n want errNotExist", + " find / /a /c /d /d/m", + "move__ o=F /d/m /d/n want ok", + "create /d/n/q QQQQ want ok", + " stat /d/m want errNotExist", + " stat /d/n want dir", + " stat /d/n/q want 4", + "move__ o=F /d /d/n/z want err", + "move__ o=T /c /d/n/q want ok", + " stat /c want errNotExist", + " stat /d/n/q want 2", + " find / /a /d /d/n /d/n/q", + "create /d/n/r RRRRR want ok", + "mk-dir /u want ok", + "mk-dir /u/v want ok", + "move__ o=F /d/n /u want errExist", + "create /t TTTTTT want ok", + "move__ o=F /d/n /t want errExist", + "rm-all /t want ok", + "move__ o=F /d/n /t want ok", + " stat /d want dir", + " stat /d/n want errNotExist", + " stat /d/n/r want errNotExist", + " stat /t want dir", + " stat /t/q want 2", + " stat /t/r want 5", + " find / /a /d /t /t/q /t/r /u /u/v", + "move__ o=F /t / want errExist", + "move__ o=T /t /u/v want ok", + " stat /u/v/r want 5", + "move__ o=F / /z want err", + " find / /a /d /u /u/v /u/v/q /u/v/r", + " stat /a want 1", + " stat /b want errNotExist", + " stat /c want errNotExist", + " stat /u/v/r want 5", + "copy__ o=F d=0 /a /b want ok", + "copy__ o=T d=0 /a /c want ok", + " stat /a want 1", + " stat /b want 1", + " stat /c want 1", + " stat /u/v/r want 5", + "copy__ o=F d=0 /u/v/r /b want errExist", + " stat /b want 1", + "copy__ o=T d=0 /u/v/r /b want ok", + " stat /a want 1", + " stat /b want 5", + " stat /u/v/r want 5", + "rm-all /a want ok", + "rm-all /b want ok", + "mk-dir /u/v/w want ok", + "create /u/v/w/s SSSSSSSS want ok", + " stat /d want dir", + " stat /d/x want errNotExist", + " stat /d/y want errNotExist", + " stat /u/v/r want 5", + " stat /u/v/w/s want 8", + " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s", + "copy__ o=T d=0 /u/v /d/x want ok", + "copy__ o=T d=∞ /u/v /d/y want ok", + "rm-all /u want ok", + " stat /d/x want dir", + " stat /d/x/q want errNotExist", + " stat /d/x/r want errNotExist", + " stat /d/x/w want errNotExist", + " stat /d/x/w/s want errNotExist", + " stat /d/y want dir", + " stat /d/y/q want 2", + " stat /d/y/r want 5", + " stat /d/y/w want dir", + " stat /d/y/w/s want 8", + " stat /u want errNotExist", + " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s", + "copy__ o=F d=∞ /d/y /d/x want errExist", + } + + ctx := context.Background() + + for i, tc := range testCases { + tc = strings.TrimSpace(tc) + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create": + parts := strings.Split(arg, " ") + if len(parts) != 4 || parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid write", i, tc) + } + f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if got := errStr(opErr); got != parts[3] { + t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) + } + if f != nil { + if _, err := f.Write([]byte(parts[1])); err != nil { + t.Fatalf("test case #%d %q: Write: %v", i, tc, err) + } + if err := f.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + } + + case "find": + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Fatalf("test case #%d %q: find: %v", i, tc, err) + } + sort.Strings(got) + want := strings.Split(arg, " ") + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want) + } + + case "copy__", "mk-dir", "move__", "rm-all", "stat": + nParts := 3 + switch op { + case "copy__": + nParts = 6 + case "move__": + nParts = 5 + } + parts := strings.Split(arg, " ") + if len(parts) != nParts { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + + got, opErr := "", error(nil) + switch op { + case "copy__": + depth := 0 + if parts[1] == "d=∞" { + depth = infiniteDepth + } + _, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) + case "mk-dir": + opErr = fs.Mkdir(ctx, parts[0], 0777) + case "move__": + _, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T") + case "rm-all": + opErr = fs.RemoveAll(ctx, parts[0]) + case "stat": + var stat os.FileInfo + fileName := parts[0] + if stat, opErr = fs.Stat(ctx, fileName); opErr == nil { + if stat.IsDir() { + got = "dir" + } else { + got = strconv.Itoa(int(stat.Size())) + } + + if fileName == "/" { + // For a Dir FileSystem, the virtual file system root maps to a + // real file system name like "/tmp/webdav-test012345", which does + // not end with "/". We skip such cases. + } else if statName := stat.Name(); path.Base(fileName) != statName { + t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q", + i, tc, fileName, statName) + } + } + } + if got == "" { + got = errStr(opErr) + } + + if parts[len(parts)-2] != "want" { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + if want := parts[len(parts)-1]; got != want { + t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want) + } + } + } +} + +func TestDir(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("see golang.org/issue/12004") + case "plan9": + t.Skip("see golang.org/issue/11453") + } + + td, err := ioutil.TempDir("", "webdav-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + testFS(t, Dir(td)) +} + +func TestMemFS(t *testing.T) { + testFS(t, NewMemFS()) +} + +func TestMemFSRoot(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + for i := 0; i < 5; i++ { + stat, err := fs.Stat(ctx, "/") + if err != nil { + t.Fatalf("i=%d: Stat: %v", i, err) + } + if !stat.IsDir() { + t.Fatalf("i=%d: Stat.IsDir is false, want true", i) + } + + f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("i=%d: OpenFile: %v", i, err) + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + t.Fatalf("i=%d: Readdir: %v", i, err) + } + if len(children) != i { + t.Fatalf("i=%d: got %d children, want %d", i, len(children), i) + } + + if _, err := f.Write(make([]byte, 1)); err == nil { + t.Fatalf("i=%d: Write: got nil error, want non-nil", i) + } + + if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil { + t.Fatalf("i=%d: Mkdir: %v", i, err) + } + } +} + +func TestMemFileReaddir(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + if err := fs.Mkdir(ctx, "/foo", 0777); err != nil { + t.Fatalf("Mkdir: %v", err) + } + readdir := func(count int) ([]os.FileInfo, error) { + f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + return f.Readdir(count) + } + if got, err := readdir(-1); len(got) != 0 || err != nil { + t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, ", len(got), err) + } + if got, err := readdir(+1); len(got) != 0 || err != io.EOF { + t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err) + } +} + +func TestMemFile(t *testing.T) { + testCases := []string{ + "wantData ", + "wantSize 0", + "write abc", + "wantData abc", + "write de", + "wantData abcde", + "wantSize 5", + "write 5*x", + "write 4*y+2*z", + "write 3*st", + "wantData abcdexxxxxyyyyzzststst", + "wantSize 22", + "seek set 4 want 4", + "write EFG", + "wantData abcdEFGxxxyyyyzzststst", + "wantSize 22", + "seek set 2 want 2", + "read cdEF", + "read Gx", + "seek cur 0 want 8", + "seek cur 2 want 10", + "seek cur -1 want 9", + "write J", + "wantData abcdEFGxxJyyyyzzststst", + "wantSize 22", + "seek cur -4 want 6", + "write ghijk", + "wantData abcdEFghijkyyyzzststst", + "wantSize 22", + "read yyyz", + "seek cur 0 want 15", + "write ", + "seek cur 0 want 15", + "read ", + "seek cur 0 want 15", + "seek end -3 want 19", + "write ZZ", + "wantData abcdEFghijkyyyzzstsZZt", + "wantSize 22", + "write 4*A", + "wantData abcdEFghijkyyyzzstsZZAAAA", + "wantSize 25", + "seek end 0 want 25", + "seek end -5 want 20", + "read Z+4*A", + "write 5*B", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB", + "wantSize 30", + "seek end 10 want 40", + "write C", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C", + "wantSize 41", + "write D", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD", + "wantSize 42", + "seek set 43 want 43", + "write E", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E", + "wantSize 44", + "seek set 0 want 0", + "write 5*123456789_", + "wantData 123456789_123456789_123456789_123456789_123456789_", + "wantSize 50", + "seek cur 0 want 50", + "seek cur -99 want err", + } + + ctx := context.Background() + + const filename = "/foo" + fs := NewMemFS() + f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + // Expand an arg like "3*a+2*b" to "aaabb". + parts := strings.Split(arg, "+") + for j, part := range parts { + if k := strings.IndexByte(part, '*'); k >= 0 { + repeatCount, repeatStr := part[:k], part[k+1:] + n, err := strconv.Atoi(repeatCount) + if err != nil { + t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount) + } + parts[j] = strings.Repeat(repeatStr, n) + } + } + arg = strings.Join(parts, "") + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "read": + buf := make([]byte, len(arg)) + if _, err := io.ReadFull(f, buf); err != nil { + t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err) + } + if got := string(buf); got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + + case "seek": + parts := strings.Split(arg, " ") + if len(parts) != 4 { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + + whence := 0 + switch parts[0] { + default: + t.Fatalf("test case #%d %q: invalid seek whence", i, tc) + case "set": + whence = os.SEEK_SET + case "cur": + whence = os.SEEK_CUR + case "end": + whence = os.SEEK_END + } + offset, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1]) + } + + if parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + if parts[3] == "err" { + _, err := f.Seek(int64(offset), whence) + if err == nil { + t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc) + } + } else { + got, err := f.Seek(int64(offset), whence) + if err != nil { + t.Fatalf("test case #%d %q: Seek: %v", i, tc, err) + } + want, err := strconv.Atoi(parts[3]) + if err != nil { + t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3]) + } + if got != int64(want) { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + + case "write": + n, err := f.Write([]byte(arg)) + if err != nil { + t.Fatalf("test case #%d %q: write: %v", i, tc, err) + } + if n != len(arg) { + t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg)) + } + + case "wantData": + g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666) + if err != nil { + t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) + } + gotBytes, err := ioutil.ReadAll(g) + if err != nil { + t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err) + } + for i, c := range gotBytes { + if c == '\x00' { + gotBytes[i] = '.' + } + } + got := string(gotBytes) + if got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + if err := g.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + + case "wantSize": + n, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) + } + fi, err := fs.Stat(ctx, filename) + if err != nil { + t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) + } + if got, want := fi.Size(), int64(n); got != want { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + } +} + +// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a +// memFile doesn't allocate a new buffer for each of those N times. Otherwise, +// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. +func TestMemFileWriteAllocs(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo allocates here") + } + ctx := context.Background() + fs := NewMemFS() + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + a := testing.AllocsPerRun(100, func() { + f.Write(xxx) + }) + // AllocsPerRun returns an integral value, so we compare the rounded-down + // number to zero. + if a > 0 { + t.Fatalf("%v allocs per run, want 0", a) + } +} + +func BenchmarkMemFileWrite(b *testing.B) { + ctx := context.Background() + fs := NewMemFS() + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + b.Fatalf("OpenFile: %v", err) + } + for j := 0; j < 100; j++ { + f.Write(xxx) + } + if err := f.Close(); err != nil { + b.Fatalf("Close: %v", err) + } + if err := fs.RemoveAll(ctx, "/xxx"); err != nil { + b.Fatalf("RemoveAll: %v", err) + } + } +} + +func TestCopyMoveProps(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + create := func(name string) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + _, wErr := f.Write([]byte("contents")) + cErr := f.Close() + if wErr != nil { + return wErr + } + return cErr + } + patch := func(name string, patches ...Proppatch) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return err + } + _, pErr := f.(DeadPropsHolder).Patch(patches) + cErr := f.Close() + if pErr != nil { + return pErr + } + return cErr + } + props := func(name string) (map[xml.Name]Property, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + m, pErr := f.(DeadPropsHolder).DeadProps() + cErr := f.Close() + if pErr != nil { + return nil, pErr + } + if cErr != nil { + return nil, cErr + } + return m, nil + } + + p0 := Property{ + XMLName: xml.Name{Space: "x:", Local: "boat"}, + InnerXML: []byte("pea-green"), + } + p1 := Property{ + XMLName: xml.Name{Space: "x:", Local: "ring"}, + InnerXML: []byte("1 shilling"), + } + p2 := Property{ + XMLName: xml.Name{Space: "x:", Local: "spoon"}, + InnerXML: []byte("runcible"), + } + p3 := Property{ + XMLName: xml.Name{Space: "x:", Local: "moon"}, + InnerXML: []byte("light"), + } + + if err := create("/src"); err != nil { + t.Fatalf("create /src: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { + t.Fatalf("patch /src +p0 +p1: %v", err) + } + if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + t.Fatalf("copyFiles /src /tmp: %v", err) + } + if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil { + t.Fatalf("moveFiles /tmp /dst: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { + t.Fatalf("patch /src -p0: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { + t.Fatalf("patch /src +p2: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { + t.Fatalf("patch /dst -p1: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { + t.Fatalf("patch /dst +p3: %v", err) + } + + gotSrc, err := props("/src") + if err != nil { + t.Fatalf("props /src: %v", err) + } + wantSrc := map[xml.Name]Property{ + p1.XMLName: p1, + p2.XMLName: p2, + } + if !reflect.DeepEqual(gotSrc, wantSrc) { + t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) + } + + gotDst, err := props("/dst") + if err != nil { + t.Fatalf("props /dst: %v", err) + } + wantDst := map[xml.Name]Property{ + p0.XMLName: p0, + p3.XMLName: p3, + } + if !reflect.DeepEqual(gotDst, wantDst) { + t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) + } +} + +func TestWalkFS(t *testing.T) { + testCases := []struct { + desc string + buildfs []string + startAt string + depth int + walkFn filepath.WalkFunc + want []string + }{{ + "just root", + []string{}, + "/", + infiniteDepth, + nil, + []string{ + "/", + }, + }, { + "infinite walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + infiniteDepth, + nil, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/d", + "/e", + "/f", + }, + }, { + "infinite walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/a", + infiniteDepth, + nil, + []string{ + "/a", + "/a/b", + "/a/b/c", + "/a/d", + }, + }, { + "depth 1 walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + 1, + nil, + []string{ + "/", + "/a", + "/e", + "/f", + }, + }, { + "depth 1 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 1, + nil, + []string{ + "/a/b", + "/a/b/c", + "/a/b/g", + }, + }, { + "depth 0 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk from file", + []string{ + "mkdir /a", + "touch /a/b", + "touch /a/c", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk with skipped subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + "touch /a/b/z", + }, + "/", + infiniteDepth, + func(path string, info os.FileInfo, err error) error { + if path == "/a/b/g" { + return filepath.SkipDir + } + return nil + }, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/z", + }, + }} + ctx := context.Background() + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + var got []string + traceFn := func(path string, info os.FileInfo, err error) error { + if tc.walkFn != nil { + err = tc.walkFn(path, info, err) + if err != nil { + return err + } + } + got = append(got, path) + return nil + } + fi, err := fs.Stat(ctx, tc.startAt) + if err != nil { + t.Fatalf("%s: cannot stat: %v", tc.desc, err) + } + err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn) + if err != nil { + t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) + continue + } + sort.Strings(got) + sort.Strings(tc.want) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) + continue + } + } +} + +func buildTestFS(buildfs []string) (FileSystem, error) { + // TODO: Could this be merged with the build logic in TestFS? + + ctx := context.Background() + fs := NewMemFS() + for _, b := range buildfs { + op := strings.Split(b, " ") + switch op[0] { + case "mkdir": + err := fs.Mkdir(ctx, op[1], os.ModeDir|0777) + if err != nil { + return nil, err + } + case "touch": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + f.Close() + case "write": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, err + } + _, err = f.Write([]byte(op[2])) + f.Close() + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown file operation %q", op[0]) + } + } + return fs, nil +} diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go new file mode 100644 index 0000000..416e81c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/vendor/golang.org/x/net/webdav/if_test.go b/vendor/golang.org/x/net/webdav/if_test.go new file mode 100644 index 0000000..aad61a4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if_test.go @@ -0,0 +1,322 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseIfHeader(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + want ifHeader + }{{ + "bad: empty", + ``, + ifHeader{}, + }, { + "bad: no parens", + `foobar`, + ifHeader{}, + }, { + "bad: empty list #1", + `()`, + ifHeader{}, + }, { + "bad: empty list #2", + `(a) (b c) () (d)`, + ifHeader{}, + }, { + "bad: no list after resource #1", + ``, + ifHeader{}, + }, { + "bad: no list after resource #2", + ` (a)`, + ifHeader{}, + }, { + "bad: no list after resource #3", + ` (a) (b) `, + ifHeader{}, + }, { + "bad: no-tag-list followed by tagged-list", + `(a) (b) (c)`, + ifHeader{}, + }, { + "bad: unfinished list", + `(a`, + ifHeader{}, + }, { + "bad: unfinished ETag", + `([b`, + ifHeader{}, + }, { + "bad: unfinished Notted list", + `(Not a`, + ifHeader{}, + }, { + "bad: double Not", + `(Not Not a)`, + ifHeader{}, + }, { + "good: one list with a Token", + `(a)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }}, + }, + }, { + "good: one list with an ETag", + `([a])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + ETag: `a`, + }}, + }}, + }, + }, { + "good: one list with three Nots", + `(Not a Not b Not [d])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }, { + Not: true, + Token: `b`, + }, { + Not: true, + ETag: `d`, + }}, + }}, + }, + }, { + "good: two lists", + `(a) (b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Token: `b`, + }}, + }}, + }, + }, { + "good: two Notted lists", + `(Not a) (Not b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `b`, + }}, + }}, + }, + }, { + "section 7.5.1", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/users/f/fielding/index.html`, + conditions: []Condition{{ + Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, + }}, + }}, + }, + }, { + "section 7.5.2 #1", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #2", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #3", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/member`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 9.9.6", + `() + ()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, + }}, + }, { + conditions: []Condition{{ + Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, + }}, + }}, + }, + }, { + "section 9.10.8", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, + }}, + }}, + }, + }, { + "section 10.4.6", + `( + ["I am an ETag"]) + (["I am another ETag"])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `"I am an ETag"`, + }}, + }, { + conditions: []Condition{{ + ETag: `"I am another ETag"`, + }}, + }}, + }, + }, { + "section 10.4.7", + `(Not + )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, + }}, + }}, + }, + }, { + "section 10.4.8", + `() + (Not )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `DAV:no-lock`, + }}, + }}, + }, + }, { + "section 10.4.9", + ` + ( + [W/"A weak ETag"]) (["strong ETag"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/resource1`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `W/"A weak ETag"`, + }}, + }, { + resourceTag: `/resource1`, + conditions: []Condition{{ + ETag: `"strong ETag"`, + }}, + }}, + }, + }, { + "section 10.4.10", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/specs/`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }}, + }, + }, { + "section 10.4.11 #1", + ` (["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + ETag: `"4217"`, + }}, + }}, + }, + }, { + "section 10.4.11 #2", + ` (Not ["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + Not: true, + ETag: `"4217"`, + }}, + }}, + }, + }} + + for _, tc := range testCases { + got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1)) + if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { + t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README new file mode 100644 index 0000000..89656f4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go new file mode 100644 index 0000000..a712843 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `` + + `Example Feed` + + `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + + `` + + `John Doe` + + `` + + `Atom-Powered Robots Run Amok` + + `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + + `` + + `2003-12-13T18:30:02Z` + + `` + + `Some text.` + + `` + + `` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go new file mode 100644 index 0000000..21b48de --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + + Grace R. Emlin + Example Inc. + + gre@example.com + + + gre@work.com + + + Friends + Squash + + Hanga Roa + Easter Island + + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go new file mode 100644 index 0000000..cb82ec2 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go new file mode 100644 index 0000000..226cfd0 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go @@ -0,0 +1,1939 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithNSTag struct { + XMLName Name `xml:"ns InXMLNameWithNSTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct{} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MyMarshalerValueAttrTest struct{} + +var _ MarshalerAttr = MyMarshalerValueAttrTest{} + +func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type MarshalerValueStruct struct { + Foo MyMarshalerValueAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +type NestedAndChardata struct { + AB []string `xml:"A>B"` + Chardata string `xml:",chardata"` +} + +type NestedAndComment struct { + AB []string `xml:"A>B"` + Comment string `xml:",comment"` +} + +type XMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body string +} + +type NamedXMLNSFieldStruct struct { + XMLName struct{} `xml:"testns test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type XMLNSFieldStructWithOmitEmpty struct { + Ns string `xml:"xmlns,attr,omitempty"` + Body string +} + +type NamedXMLNSFieldStructWithEmptyNamespace struct { + XMLName struct{} `xml:"test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type RecursiveXMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` + Text string `xml:",omitempty"` +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `true`}, + {Value: &Plain{false}, ExpectXML: `false`}, + {Value: &Plain{int(42)}, ExpectXML: `42`}, + {Value: &Plain{int8(42)}, ExpectXML: `42`}, + {Value: &Plain{int16(42)}, ExpectXML: `42`}, + {Value: &Plain{int32(42)}, ExpectXML: `42`}, + {Value: &Plain{uint(42)}, ExpectXML: `42`}, + {Value: &Plain{uint8(42)}, ExpectXML: `42`}, + {Value: &Plain{uint16(42)}, ExpectXML: `42`}, + {Value: &Plain{uint32(42)}, ExpectXML: `42`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, + {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, + {Value: &Plain{""}, ExpectXML: `</>`}, + {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `2001-09-09T01:46:40.123456789Z`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `abcd`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + ExpectXML: `James Bond`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "James Bond", + }, + ExpectXML: `James Bond`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Type: ""}, ExpectXML: ``}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, + {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, + {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, + {Value: &Particle{HasMass: true}, ExpectXML: `true`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `` + + `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + + `1` + + `` + + `Zaphod` + + `Beeblebrox` + + `7.25` + + `` + + `` + + `Trisha` + + `McMillen` + + `5.5` + + `` + + `` + + `Ford` + + `Prefect` + + `7` + + `` + + `` + + `Arthur` + + `Dent` + + `6.75` + + `` + + ``, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `` + + `` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `` + + `` + + `` + + ``, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `` + + `` + + `A` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `` + + `` + + `A` + + `B` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `` + + `` + + `A` + + `B` + + `C` + + `` + + ``, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `` + + `` + + `C` + + `B` + + `A` + + `` + + ``, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `` + + `` + + `A` + + `C` + + `` + + ``, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `` + + `A` + + `B` + + `` + + `C` + + `D` + + `` + + ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `80`, + }, + { + Value: &Service{}, + ExpectXML: ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `` + + `80` + + `A` + + `B` + + ``, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `` + + `80` + + `example` + + ``, + MarshalOnly: true, + }, + { + Value: &struct { + XMLName struct{} `xml:"space top"` + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + E1 string `xml:"x>e"` + }{ + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + E1: "e1", + }, + ExpectXML: `` + + `abc` + + `` + + `c1` + + `d1` + + `` + + `` + + `e1` + + `` + + ``, + }, + { + Value: &struct { + XMLName Name + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + }{ + XMLName: Name{ + Space: "space0", + Local: "top", + }, + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + }, + ExpectXML: `` + + `ab` + + `c` + + `` + + `c1` + + `d1` + + `` + + ``, + }, + { + Value: &struct { + XMLName struct{} `xml:"top"` + B string `xml:"space x>b"` + B1 string `xml:"space1 x>b"` + }{ + B: "b", + B1: "b1", + }, + ExpectXML: `` + + `b` + + `b1` + + ``, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `` + + `A.C.B` + + `A.C.C` + + `` + + `A.B.B` + + `` + + `A.B.C.A1` + + `A.B.C.A2` + + `` + + `A.B.C.C` + + `` + + `A.A` + + ``, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `mixedupper`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: ``, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `lorem ipsum`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `` + + `8` + + `9` + + `23.5` + + `255` + + `true` + + `str` + + `byt` + + `` + + ``, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: ``, + }, + + // Test ",any" + { + ExpectXML: `knownunknown`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "unknown", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `known`, + }, + { + ExpectXML: `b`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `bei`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "e", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "i", + }, + }, + }, + }, + { + ExpectXML: `b`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `a1a2b1`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: ``, + Value: &IgnoreTest{}, + }, + { + ExpectXML: ``, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `ignore me`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `newline: ; cr: ; tab: ;`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "1\r2\r\n3\n\r4\n5", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `42`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: ``, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `hello world`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: ``, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: ``, + Value: &MarshalerValueStruct{}, + }, + { + ExpectXML: ``, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, + { + ExpectXML: `test`, + Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, + }, + { + ExpectXML: ``, + Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, + }, + { + // The xmlns attribute must be ignored because the + // element is in the empty namespace, so it's not possible + // to set the default namespace to something non-empty. + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, + MarshalOnly: true, + }, + { + ExpectXML: `hello world`, + Value: &RecursiveXMLNSFieldStruct{ + Ns: "foo", + Body: &RecursiveXMLNSFieldStruct{ + Text: "hello world", + }, + }, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +var encodeElementTests = []struct { + desc string + value interface{} + start StartElement + expectXML string +}{{ + desc: "simple string", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + }, + expectXML: `hello`, +}, { + desc: "string with added attributes", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "x"}, + Value: "y", + }, { + Name: Name{Local: "foo"}, + Value: "bar", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element with default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element in name space with different default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "XMLMarshaler with start element with default name space", + value: &MyMarshalerTest{}, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello world`, +}} + +func TestEncodeElement(t *testing.T) { + for idx, test := range encodeElementTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeElement(test.value, test.start) + if err != nil { + t.Fatalf("enc.EncodeElement: %v", err) + } + err = enc.Flush() + if err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if got, want := buf.String(), test.expectXML; got != want { + t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) + } + } +} + +func BenchmarkMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + b.ReportAllocs() + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "x" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + desc string + toks []Token + want string + err string +}{{ + desc: "start element with name space", + toks: []Token{ + StartElement{Name{"space", "local"}, nil}, + }, + want: ``, +}, { + desc: "start element with no name", + toks: []Token{ + StartElement{Name{"space", ""}, nil}, + }, + err: "xml: start tag with no name", +}, { + desc: "end element with no name", + toks: []Token{ + EndElement{Name{"space", ""}}, + }, + err: "xml: end tag with no name", +}, { + desc: "char data", + toks: []Token{ + CharData("foo"), + }, + want: `foo`, +}, { + desc: "char data with escaped chars", + toks: []Token{ + CharData(" \t\n"), + }, + want: " \n", +}, { + desc: "comment", + toks: []Token{ + Comment("foo"), + }, + want: ``, +}, { + desc: "comment with invalid content", + toks: []Token{ + Comment("foo-->"), + }, + err: "xml: EncodeToken of Comment containing --> marker", +}, { + desc: "proc instruction", + toks: []Token{ + ProcInst{"Target", []byte("Instruction")}, + }, + want: ``, +}, { + desc: "proc instruction with empty target", + toks: []Token{ + ProcInst{"", []byte("Instruction")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "proc instruction with bad content", + toks: []Token{ + ProcInst{"", []byte("Instruction?>")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "directive", + toks: []Token{ + Directive("foo"), + }, + want: ``, +}, { + desc: "more complex directive", + toks: []Token{ + Directive("DOCTYPE doc [ '> ]"), + }, + want: `'> ]>`, +}, { + desc: "directive instruction with bad name", + toks: []Token{ + Directive("foo>"), + }, + err: "xml: EncodeToken of Directive containing wrong < or > markers", +}, { + desc: "end tag without start tag", + toks: []Token{ + EndElement{Name{"foo", "bar"}}, + }, + err: "xml: end tag without start tag", +}, { + desc: "mismatching end tag local name", + toks: []Token{ + StartElement{Name{"", "foo"}, nil}, + EndElement{Name{"", "bar"}}, + }, + err: "xml: end tag does not match start tag ", + want: ``, +}, { + desc: "mismatching end tag namespace", + toks: []Token{ + StartElement{Name{"space", "foo"}, nil}, + EndElement{Name{"another", "foo"}}, + }, + err: "xml: end tag in namespace another does not match start tag in namespace space", + want: ``, +}, { + desc: "start element with explicit namespace", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "start element with explicit namespace and colliding prefix", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + {Name{"x", "bar"}, "other"}, + }}, + }, + want: ``, +}, { + desc: "start element using previously defined namespace", + toks: []Token{ + StartElement{Name{"", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"space", "x"}, "y"}, + }}, + }, + want: ``, +}, { + desc: "nested name space with same prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space1"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space2"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + EndElement{Name{"", "foo"}}, + EndElement{Name{"", "foo"}}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + }, + want: ``, +}, { + desc: "start element defining several prefixes for the same name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "a"}, "space"}, + {Name{"xmlns", "b"}, "space"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element redefines name space", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element creates alias for default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element defines default name space with existing prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element uses empty attribute name space when default ns defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "redefine xmlns", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"foo", "xmlns"}, "space"}, + }}, + }, + err: `xml: cannot redefine xmlns attribute prefix`, +}, { + desc: "xmlns with explicit name space #1", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xml", "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "xmlns with explicit name space #2", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{xmlURL, "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "empty name space declaration is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "foo"}, ""}, + }}, + }, + want: ``, +}, { + desc: "attribute with no name is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", ""}, "value"}, + }}, + }, + want: ``, +}, { + desc: "namespace URL with non-valid name", + toks: []Token{ + StartElement{Name{"/34", "foo"}, []Attr{ + {Name{"/34", "x"}, "value"}, + }}, + }, + want: `<_:foo xmlns:_="/34" _:x="value">`, +}, { + desc: "nested element resets default namespace to empty", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, ""}, + {Name{"", "x"}, "value"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element requires empty default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, nil}, + }, + want: ``, +}, { + desc: "attribute uses name space from xmlns", + toks: []Token{ + StartElement{Name{"some/space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + {Name{"some/space", "other"}, "other value"}, + }}, + }, + want: ``, +}, { + desc: "default name space should not be used by attributes", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"xmlns", "bar"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "default name space not used by attributes, not explicitly defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "impossible xmlns declaration", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "bar"}, []Attr{ + {Name{"space", "attr"}, "value"}, + }}, + }, + want: ``, +}} + +func TestEncodeToken(t *testing.T) { +loop: + for i, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + var err error + for j, tok := range tt.toks { + err = enc.EncodeToken(tok) + if err != nil && j < len(tt.toks)-1 { + t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) + continue loop + } + } + errorf := func(f string, a ...interface{}) { + t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) + } + switch { + case tt.err != "" && err == nil: + errorf(" expected error; got none") + continue + case tt.err == "" && err != nil: + errorf(" got error: %v", err) + continue + case tt.err != "" && err != nil && tt.err != err.Error(): + errorf(" error mismatch; got %v, want %v", err, tt.err) + continue + } + if err := enc.Flush(); err != nil { + errorf(" %v", err) + continue + } + if got := buf.String(); got != tt.want { + errorf("\ngot %v\nwant %v", got, tt.want) + continue + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(` + + + +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} + +// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. +func TestRace9796(t *testing.T) { + type A struct{} + type B struct { + C []A `xml:"X>Y"` + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + Marshal(B{[]A{{}}}) + wg.Done() + }() + } + wg.Wait() +} + +func TestIsValidDirective(t *testing.T) { + testOK := []string{ + "<>", + "< < > >", + "' '>' >", + " ]>", + " '<' ' doc ANY> ]>", + ">>> a < comment --> [ ] >", + } + testKO := []string{ + "<", + ">", + "", + "< > > < < >", + " -->", + "", + "'", + "", + } + for _, s := range testOK { + if !isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be valid", s) + } + } + for _, s := range testKO { + if isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be invalid", s) + } + } +} + +// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. +func TestSimpleUseOfEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: StartElement %s", err) + } + if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: EndElement %s", err) + } + if err := enc.EncodeToken(Universe{}); err == nil { + t.Errorf("enc.EncodeToken: invalid type not caught") + } + if err := enc.Flush(); err != nil { + t.Errorf("enc.Flush: %s", err) + } + if buf.Len() == 0 { + t.Errorf("enc.EncodeToken: empty buffer") + } + want := "" + if buf.String() != want { + t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go new file mode 100644 index 0000000..4089056 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go new file mode 100644 index 0000000..02f1e10 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go @@ -0,0 +1,744 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "rietveld<>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a <link rel="hub" href="hub-server"> tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can't quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed's actual URL in +the link rel="self", but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn't know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` + + 1 + + + A + + + B + + + C + D + + <_> + E + + + 2 + +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` + +` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(``) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `` + + `hello
    ` + + `world
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world
    ` + + `hello
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world` + + `hello` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `bogus
    ` + + `
    `, + tab: Tables{}, + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestRoundTrip(t *testing.T) { + // From issue 7535 + const s = `` + in := bytes.NewBufferString(s) + for i := 0; i < 10; i++ { + out := &bytes.Buffer{} + d := NewDecoder(in) + e := NewEncoder(out) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + fmt.Println("failed:", err) + return + } + e.EncodeToken(t) + } + e.Flush() + in = out + } + if got := in.String(); got != s { + t.Errorf("have: %q\nwant: %q\n", got, s) + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `hello
    world
    ` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: ``, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := ` + + hello world + howdy world + + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://golang.org/issue/6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `Green stuff` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go new file mode 100644 index 0000000..fdde288 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go new file mode 100644 index 0000000..5b79cbe --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
    +// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for ` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` + +VALUE` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "", + "", + " c;", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "cdata]]>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, testInput, rawTokens) +} + +const nonStrictInput = ` +non&entity +&unknown;entity +{ +&#zzz; +&ãªã¾ãˆ3; +<-gt; +&; +&0a; +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("{"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&ãªã¾ãˆ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("<-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictInput, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) + for i, want := range rawTokens { + start := d.InputOffset() + have, err := d.RawToken() + end := d.InputOffset() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +]> +">]> +]> +'>]> +]> +'>]> +]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE [">]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = ` + true + 1 + false + 0 + 1 + -2 + 3 + -4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12.0 + 13.0 + 14.0 + 15 + 16 +` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `abcd` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"

    ", "p", "nowrap"}, + {"

    ", "p", "nowrap"}, + {"", "input", "checked"}, + {"", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "

    Foo

    \n\n

    Bar\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12", "illegal character code U+0012"}, + {"\x0b", "illegal character code U+000B"}, + {"\xef\xbf\xbe", "illegal character code U+FFFE"}, + {"\r\n\x07", "illegal character code U+0007"}, + {"what's up", "expected attribute name in element"}, + {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, + {"&\x01;", "invalid character entity & (no semicolon)"}, + {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, + {"&hello;", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input string + expect [2]string +}{ + {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, + {`encoding="FOO" `, [2]string{"", "FOO"}}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + if got := procInst("version", test.input); got != test.expect[0] { + t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) + } + if got := procInst("encoding", test.input); got != test.expect[1] { + t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +]> +]> + --> --> []> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go new file mode 100644 index 0000000..514db5d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/litmus_test_server.go @@ -0,0 +1,94 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program is a server for the WebDAV 'litmus' compliance test at +http://www.webdav.org/neon/litmus/ +To run the test: + +go run litmus_test_server.go + +and separately, from the downloaded litmus-xxx directory: + +make URL=http://localhost:9999/ check +*/ +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/webdav" +) + +var port = flag.Int("port", 9999, "server port") + +func main() { + flag.Parse() + log.SetFlags(0) + h := &webdav.Handler{ + FileSystem: webdav.NewMemFS(), + LockSystem: webdav.NewMemLS(), + Logger: func(r *http.Request, err error) { + litmus := r.Header.Get("X-Litmus") + if len(litmus) > 19 { + litmus = litmus[:16] + "..." + } + + switch r.Method { + case "COPY", "MOVE": + dst := "" + if u, err := url.Parse(r.Header.Get("Destination")); err == nil { + dst = u.Path + } + o := r.Header.Get("Overwrite") + log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err) + default: + log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) + } + }, + } + + // The next line would normally be: + // http.Handle("/", h) + // but we wrap that HTTP handler h to cater for a special case. + // + // The propfind_invalid2 litmus test case expects an empty namespace prefix + // declaration to be an error. The FAQ in the webdav litmus test says: + // + // "What does the "propfind_invalid2" test check for?... + // + // If a request was sent with an XML body which included an empty namespace + // prefix declaration (xmlns:ns1=""), then the server must reject that with + // a "400 Bad Request" response, as it is invalid according to the XML + // Namespace specification." + // + // On the other hand, the Go standard library's encoding/xml package + // accepts an empty xmlns namespace, as per the discussion at + // https://github.com/golang/go/issues/8068 + // + // Empty namespaces seem disallowed in the second (2006) edition of the XML + // standard, but allowed in a later edition. The grammar differs between + // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and + // http://www.w3.org/TR/REC-xml-names/#dt-prefix + // + // Thus, we assume that the propfind_invalid2 test is obsolete, and + // hard-code the 400 Bad Request response that the test expects. + http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + h.ServeHTTP(w, r) + })) + + addr := fmt.Sprintf(":%d", *port) + log.Printf("Serving %v", addr) + log.Fatal(http.ListenAndServe(addr, nil)) +} diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go new file mode 100644 index 0000000..344ac5c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock.go @@ -0,0 +1,445 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "container/heap" + "errors" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // ErrConfirmationFailed is returned by a LockSystem's Confirm method. + ErrConfirmationFailed = errors.New("webdav: confirmation failed") + // ErrForbidden is returned by a LockSystem's Unlock method. + ErrForbidden = errors.New("webdav: forbidden") + // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. + ErrLocked = errors.New("webdav: locked") + // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. + ErrNoSuchLock = errors.New("webdav: no such lock") +) + +// Condition can match a WebDAV resource, based on a token or ETag. +// Exactly one of Token and ETag should be non-empty. +type Condition struct { + Not bool + Token string + ETag string +} + +// LockSystem manages access to a collection of named resources. The elements +// in a lock name are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +type LockSystem interface { + // Confirm confirms that the caller can claim all of the locks specified by + // the given conditions, and that holding the union of all of those locks + // gives exclusive access to all of the named resources. Up to two resources + // can be named. Empty names are ignored. + // + // Exactly one of release and err will be non-nil. If release is non-nil, + // all of the requested locks are held until release is called. Calling + // release does not unlock the lock, in the WebDAV UNLOCK sense, but once + // Confirm has confirmed that a lock claim is valid, that lock cannot be + // Confirmed again until it has been released. + // + // If Confirm returns ErrConfirmationFailed then the Handler will continue + // to try any other set of locks presented (a WebDAV HTTP request can + // present more than one set of locks). If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) + + // Create creates a lock with the given depth, duration, owner and root + // (name). The depth will either be negative (meaning infinite) or zero. + // + // If Create returns ErrLocked then the Handler will write a "423 Locked" + // HTTP status. If it returns any other non-nil error, the Handler will + // write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + // + // The token returned identifies the created lock. It should be an absolute + // URI as defined by RFC 3986, Section 4.3. In particular, it should not + // contain whitespace. + Create(now time.Time, details LockDetails) (token string, err error) + + // Refresh refreshes the lock with the given token. + // + // If Refresh returns ErrLocked then the Handler will write a "423 Locked" + // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write + // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) + + // Unlock unlocks the lock with the given token. + // + // If Unlock returns ErrForbidden then the Handler will write a "403 + // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler + // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock + // then the Handler will write a "409 Conflict" HTTP Status. If it returns + // any other non-nil error, the Handler will write a "500 Internal Server + // Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for + // when to use each error. + Unlock(now time.Time, token string) error +} + +// LockDetails are a lock's metadata. +type LockDetails struct { + // Root is the root resource name being locked. For a zero-depth lock, the + // root is the only resource being locked. + Root string + // Duration is the lock timeout. A negative duration means infinite. + Duration time.Duration + // OwnerXML is the verbatim XML given in a LOCK HTTP request. + // + // TODO: does the "verbatim" nature play well with XML namespaces? + // Does the OwnerXML field need to have more structure? See + // https://codereview.appspot.com/175140043/#msg2 + OwnerXML string + // ZeroDepth is whether the lock has zero depth. If it does not have zero + // depth, it has infinite depth. + ZeroDepth bool +} + +// NewMemLS returns a new in-memory LockSystem. +func NewMemLS() LockSystem { + return &memLS{ + byName: make(map[string]*memLSNode), + byToken: make(map[string]*memLSNode), + gen: uint64(time.Now().Unix()), + } +} + +type memLS struct { + mu sync.Mutex + byName map[string]*memLSNode + byToken map[string]*memLSNode + gen uint64 + // byExpiry only contains those nodes whose LockDetails have a finite + // Duration and are yet to expire. + byExpiry byExpiry +} + +func (m *memLS) nextToken() string { + m.gen++ + return strconv.FormatUint(m.gen, 10) +} + +func (m *memLS) collectExpiredNodes(now time.Time) { + for len(m.byExpiry) > 0 { + if now.Before(m.byExpiry[0].expiry) { + break + } + m.remove(m.byExpiry[0]) + } +} + +func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + var n0, n1 *memLSNode + if name0 != "" { + if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil { + return nil, ErrConfirmationFailed + } + } + if name1 != "" { + if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil { + return nil, ErrConfirmationFailed + } + } + + // Don't hold the same node twice. + if n1 == n0 { + n1 = nil + } + + if n0 != nil { + m.hold(n0) + } + if n1 != nil { + m.hold(n1) + } + return func() { + m.mu.Lock() + defer m.mu.Unlock() + if n1 != nil { + m.unhold(n1) + } + if n0 != nil { + m.unhold(n0) + } + }, nil +} + +// lookup returns the node n that locks the named resource, provided that n +// matches at least one of the given conditions and that lock isn't held by +// another party. Otherwise, it returns nil. +// +// n may be a parent of the named resource, if n is an infinite depth lock. +func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) { + // TODO: support Condition.Not and Condition.ETag. + for _, c := range conditions { + n = m.byToken[c.Token] + if n == nil || n.held { + continue + } + if name == n.details.Root { + return n + } + if n.details.ZeroDepth { + continue + } + if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") { + return n + } + } + return nil +} + +func (m *memLS) hold(n *memLSNode) { + if n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = true + if n.details.Duration >= 0 && n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func (m *memLS) unhold(n *memLSNode) { + if !n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = false + if n.details.Duration >= 0 { + heap.Push(&m.byExpiry, n) + } +} + +func (m *memLS) Create(now time.Time, details LockDetails) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + details.Root = slashClean(details.Root) + + if !m.canCreate(details.Root, details.ZeroDepth) { + return "", ErrLocked + } + n := m.create(details.Root) + n.token = m.nextToken() + m.byToken[n.token] = n + n.details = details + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.token, nil +} + +func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return LockDetails{}, ErrNoSuchLock + } + if n.held { + return LockDetails{}, ErrLocked + } + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } + n.details.Duration = duration + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.details, nil +} + +func (m *memLS) Unlock(now time.Time, token string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return ErrNoSuchLock + } + if n.held { + return ErrLocked + } + m.remove(n) + return nil +} + +func (m *memLS) canCreate(name string, zeroDepth bool) bool { + return walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + return true + } + if first { + if n.token != "" { + // The target node is already locked. + return false + } + if !zeroDepth { + // The requested lock depth is infinite, and the fact that n exists + // (n != nil) means that a descendent of the target node is locked. + return false + } + } else if n.token != "" && !n.details.ZeroDepth { + // An ancestor of the target node is locked with infinite depth. + return false + } + return true + }) +} + +func (m *memLS) create(name string) (ret *memLSNode) { + walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + n = &memLSNode{ + details: LockDetails{ + Root: name0, + }, + byExpiryIndex: -1, + } + m.byName[name0] = n + } + n.refCount++ + if first { + ret = n + } + return true + }) + return ret +} + +func (m *memLS) remove(n *memLSNode) { + delete(m.byToken, n.token) + n.token = "" + walkToRoot(n.details.Root, func(name0 string, first bool) bool { + x := m.byName[name0] + x.refCount-- + if x.refCount == 0 { + delete(m.byName, name0) + } + return true + }) + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func walkToRoot(name string, f func(name0 string, first bool) bool) bool { + for first := true; ; first = false { + if !f(name, first) { + return false + } + if name == "/" { + break + } + name = name[:strings.LastIndex(name, "/")] + if name == "" { + name = "/" + } + } + return true +} + +type memLSNode struct { + // details are the lock metadata. Even if this node's name is not explicitly locked, + // details.Root will still equal the node's name. + details LockDetails + // token is the unique identifier for this node's lock. An empty token means that + // this node is not explicitly locked. + token string + // refCount is the number of self-or-descendent nodes that are explicitly locked. + refCount int + // expiry is when this node's lock expires. + expiry time.Time + // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1 + // if this node does not expire, or has expired. + byExpiryIndex int + // held is whether this node's lock is actively held by a Confirm call. + held bool +} + +type byExpiry []*memLSNode + +func (b *byExpiry) Len() int { + return len(*b) +} + +func (b *byExpiry) Less(i, j int) bool { + return (*b)[i].expiry.Before((*b)[j].expiry) +} + +func (b *byExpiry) Swap(i, j int) { + (*b)[i], (*b)[j] = (*b)[j], (*b)[i] + (*b)[i].byExpiryIndex = i + (*b)[j].byExpiryIndex = j +} + +func (b *byExpiry) Push(x interface{}) { + n := x.(*memLSNode) + n.byExpiryIndex = len(*b) + *b = append(*b, n) +} + +func (b *byExpiry) Pop() interface{} { + i := len(*b) - 1 + n := (*b)[i] + (*b)[i] = nil + n.byExpiryIndex = -1 + *b = (*b)[:i] + return n +} + +const infiniteTimeout = -1 + +// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is +// empty, an infiniteTimeout is returned. +func parseTimeout(s string) (time.Duration, error) { + if s == "" { + return infiniteTimeout, nil + } + if i := strings.IndexByte(s, ','); i >= 0 { + s = s[:i] + } + s = strings.TrimSpace(s) + if s == "Infinite" { + return infiniteTimeout, nil + } + const pre = "Second-" + if !strings.HasPrefix(s, pre) { + return 0, errInvalidTimeout + } + s = s[len(pre):] + if s == "" || s[0] < '0' || '9' < s[0] { + return 0, errInvalidTimeout + } + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || 1<<32-1 < n { + return 0, errInvalidTimeout + } + return time.Duration(n) * time.Second, nil +} diff --git a/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/golang.org/x/net/webdav/lock_test.go new file mode 100644 index 0000000..5cf14cd --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock_test.go @@ -0,0 +1,731 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "fmt" + "math/rand" + "path" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +func TestWalkToRoot(t *testing.T) { + testCases := []struct { + name string + want []string + }{{ + "/a/b/c/d", + []string{ + "/a/b/c/d", + "/a/b/c", + "/a/b", + "/a", + "/", + }, + }, { + "/a", + []string{ + "/a", + "/", + }, + }, { + "/", + []string{ + "/", + }, + }} + + for _, tc := range testCases { + var got []string + if !walkToRoot(tc.name, func(name0 string, first bool) bool { + if first != (len(got) == 0) { + t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got)) + return false + } + got = append(got, name0) + return true + }) { + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want) + } + } +} + +var lockTestDurations = []time.Duration{ + infiniteTimeout, // infiniteTimeout means to never expire. + 0, // A zero duration means to expire immediately. + 100 * time.Hour, // A very large duration will not expire in these tests. +} + +// lockTestNames are the names of a set of mutually compatible locks. For each +// name fragment: +// - _ means no explicit lock. +// - i means an infinite-depth lock, +// - z means a zero-depth lock, +var lockTestNames = []string{ + "/_/_/_/_/z", + "/_/_/i", + "/_/z", + "/_/z/i", + "/_/z/z", + "/_/z/_/i", + "/_/z/_/z", + "/i", + "/z", + "/z/_/i", + "/z/_/z", +} + +func lockTestZeroDepth(name string) bool { + switch name[len(name)-1] { + case 'i': + return false + case 'z': + return true + } + panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name)) +} + +func TestMemLSCanCreate(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + for _, name := range lockTestNames { + _, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + } + + wantCanCreate := func(name string, zeroDepth bool) bool { + for _, n := range lockTestNames { + switch { + case n == name: + // An existing lock has the same name as the proposed lock. + return false + case strings.HasPrefix(n, name): + // An existing lock would be a child of the proposed lock, + // which conflicts if the proposed lock has infinite depth. + if !zeroDepth { + return false + } + case strings.HasPrefix(name, n): + // An existing lock would be an ancestor of the proposed lock, + // which conflicts if the ancestor has infinite depth. + if n[len(n)-1] == 'i' { + return false + } + } + } + return true + } + + var check func(int, string) + check = func(recursion int, name string) { + for _, zeroDepth := range []bool{false, true} { + got := m.canCreate(name, zeroDepth) + want := wantCanCreate(name, zeroDepth) + if got != want { + t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want) + } + } + if recursion == 6 { + return + } + if name != "/" { + name += "/" + } + for _, c := range "_iz" { + check(recursion+1, name+string(c)) + } + } + check(0, "/") +} + +func TestMemLSLookup(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + badToken := m.nextToken() + t.Logf("badToken=%q", badToken) + + for _, name := range lockTestNames { + token, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token) + } + + baseNames := append([]string{"/a", "/b/c"}, lockTestNames...) + for _, baseName := range baseNames { + for _, suffix := range []string{"", "/0", "/1/2/3"} { + name := baseName + suffix + + goodToken := "" + base := m.byName[baseName] + if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) { + goodToken = base.token + } + + for _, token := range []string{badToken, goodToken} { + if token == "" { + continue + } + + got := m.lookup(name, Condition{Token: token}) + want := base + if token == badToken { + want = nil + } + if got != want { + t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p", + name, token, token == badToken, got, want) + } + } + } + } +} + +func TestMemLSConfirm(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + alice, err := m.Create(now, LockDetails{ + Root: "/alice", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + tweedle, err := m.Create(now, LockDetails{ + Root: "/tweedle", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + + // Test a mismatch between name and condition. + _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (mismatch): inconsistent state: %v", err) + } + + // Test two names (that fall under the same lock) in the one Confirm call. + release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (twins): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (twins): inconsistent state: %v", err) + } + release() + if err := m.consistent(); err != nil { + t.Fatalf("release (twins): inconsistent state: %v", err) + } + + // Test the same two names in overlapping Confirm / release calls. + releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #0): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err) + } + + _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err) + } + + releaseDee() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #2): inconsistent state: %v", err) + } + + releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #3): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err) + } + + // Test that you can't unlock a held lock. + err = m.Unlock(now, tweedle) + if err != ErrLocked { + t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err) + } + + releaseDum() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #5): inconsistent state: %v", err) + } + + err = m.Unlock(now, tweedle) + if err != nil { + t.Fatalf("Unlock (sequence #6): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err) + } +} + +func TestMemLSNonCanonicalRoot(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + token, err := m.Create(now, LockDetails{ + Root: "/foo/./bar//", + Duration: 1 * time.Second, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + if err := m.Unlock(now, token); err != nil { + t.Fatalf("Unlock: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock: inconsistent state: %v", err) + } +} + +func TestMemLSExpiry(t *testing.T) { + m := NewMemLS().(*memLS) + testCases := []string{ + "setNow 0", + "create /a.5", + "want /a.5", + "create /c.6", + "want /a.5 /c.6", + "create /a/b.7", + "want /a.5 /a/b.7 /c.6", + "setNow 4", + "want /a.5 /a/b.7 /c.6", + "setNow 5", + "want /a/b.7 /c.6", + "setNow 6", + "want /a/b.7", + "setNow 7", + "want ", + "setNow 8", + "want ", + "create /a.12", + "create /b.13", + "create /c.15", + "create /a/d.16", + "want /a.12 /a/d.16 /b.13 /c.15", + "refresh /a.14", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 12", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 13", + "want /a.14 /a/d.16 /c.15", + "setNow 14", + "want /a/d.16 /c.15", + "refresh /a/d.20", + "refresh /c.20", + "want /a/d.20 /c.20", + "setNow 20", + "want ", + } + + tokens := map[string]string{} + zTime := time.Unix(0, 0) + now := zTime + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create", "refresh": + parts := strings.Split(arg, ".") + if len(parts) != 2 { + t.Fatalf("test case #%d %q: invalid create", i, tc) + } + root := parts[0] + d, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now) + + switch op { + case "create": + token, err := m.Create(now, LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + }) + if err != nil { + t.Fatalf("test case #%d %q: Create: %v", i, tc, err) + } + tokens[root] = token + + case "refresh": + token := tokens[root] + if token == "" { + t.Fatalf("test case #%d %q: no token for %q", i, tc, root) + } + got, err := m.Refresh(now, token, dur) + if err != nil { + t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err) + } + want := LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + } + if got != want { + t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want) + } + } + + case "setNow": + d, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + now = time.Unix(0, 0).Add(time.Duration(d) * time.Second) + + case "want": + m.mu.Lock() + m.collectExpiredNodes(now) + got := make([]string, 0, len(m.byToken)) + for _, n := range m.byToken { + got = append(got, fmt.Sprintf("%s.%d", + n.details.Root, n.expiry.Sub(zTime)/time.Second)) + } + m.mu.Unlock() + sort.Strings(got) + want := []string{} + if arg != "" { + want = strings.Split(arg, " ") + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want) + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err) + } + } +} + +func TestMemLS(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + rng := rand.New(rand.NewSource(0)) + tokens := map[string]string{} + nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0 + const N = 2000 + + for i := 0; i < N; i++ { + name := lockTestNames[rng.Intn(len(lockTestNames))] + duration := lockTestDurations[rng.Intn(len(lockTestDurations))] + confirmed, unlocked := false, false + + // If the name was already locked, we randomly confirm/release, refresh + // or unlock it. Otherwise, we create a lock. + token := tokens[name] + if token != "" { + switch rng.Intn(3) { + case 0: + confirmed = true + nConfirm++ + release, err := m.Confirm(now, name, "", Condition{Token: token}) + if err != nil { + t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err) + } + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + release() + + case 1: + nRefresh++ + if _, err := m.Refresh(now, token, duration); err != nil { + t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err) + } + + case 2: + unlocked = true + nUnlock++ + if err := m.Unlock(now, token); err != nil { + t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err) + } + } + + } else { + nCreate++ + var err error + token, err = m.Create(now, LockDetails{ + Root: name, + Duration: duration, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("iteration #%d: Create %q: %v", i, name, err) + } + } + + if !confirmed { + if duration == 0 || unlocked { + // A zero-duration lock should expire immediately and is + // effectively equivalent to being unlocked. + tokens[name] = "" + } else { + tokens[name] = token + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + } + + if nConfirm < N/10 { + t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10) + } + if nCreate < N/10 { + t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10) + } + if nRefresh < N/10 { + t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10) + } + if nUnlock < N/10 { + t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10) + } +} + +func (m *memLS) consistent() error { + m.mu.Lock() + defer m.mu.Unlock() + + // If m.byName is non-empty, then it must contain an entry for the root "/", + // and its refCount should equal the number of locked nodes. + if len(m.byName) > 0 { + n := m.byName["/"] + if n == nil { + return fmt.Errorf(`non-empty m.byName does not contain the root "/"`) + } + if n.refCount != len(m.byToken) { + return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken)) + } + } + + for name, n := range m.byName { + // The map keys should be consistent with the node's copy of the key. + if n.details.Root != name { + return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name) + } + + // A name must be clean, and start with a "/". + if len(name) == 0 || name[0] != '/' { + return fmt.Errorf(`node name %q does not start with "/"`, name) + } + if name != path.Clean(name) { + return fmt.Errorf(`node name %q is not clean`, name) + } + + // A node's refCount should be positive. + if n.refCount <= 0 { + return fmt.Errorf("non-positive refCount for node at name %q", name) + } + + // A node's refCount should be the number of self-or-descendents that + // are locked (i.e. have a non-empty token). + var list []string + for name0, n0 := range m.byName { + // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z', + // so strings.HasPrefix is equivalent to self-or-descendent name match. + // We don't have to worry about "/foo/bar" being a false positive match + // for "/foo/b". + if strings.HasPrefix(name0, name) && n0.token != "" { + list = append(list, name0) + } + } + if n.refCount != len(list) { + sort.Strings(list) + return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)", + name, n.refCount, list, len(list)) + } + + // A node n is in m.byToken if it has a non-empty token. + if n.token != "" { + if _, ok := m.byToken[n.token]; !ok { + return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token) + } + } + + // A node n is in m.byExpiry if it has a non-negative byExpiryIndex. + if n.byExpiryIndex >= 0 { + if n.byExpiryIndex >= len(m.byExpiry) { + return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry)) + } + if n != m.byExpiry[n.byExpiryIndex] { + return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex) + } + } + } + + for token, n := range m.byToken { + // The map keys should be consistent with the node's copy of the key. + if n.token != token { + return fmt.Errorf("node token %q != byToken map key %q", n.token, token) + } + + // Every node in m.byToken is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root) + } + } + + for i, n := range m.byExpiry { + // The slice indices should be consistent with the node's copy of the index. + if n.byExpiryIndex != i { + return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i) + } + + // Every node in m.byExpiry is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root) + } + + // No node in m.byExpiry should be held. + if n.held { + return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root) + } + } + return nil +} + +func TestParseTimeout(t *testing.T) { + testCases := []struct { + s string + want time.Duration + wantErr error + }{{ + "", + infiniteTimeout, + nil, + }, { + "Infinite", + infiniteTimeout, + nil, + }, { + "Infinitesimal", + 0, + errInvalidTimeout, + }, { + "infinite", + 0, + errInvalidTimeout, + }, { + "Second-0", + 0 * time.Second, + nil, + }, { + "Second-123", + 123 * time.Second, + nil, + }, { + " Second-456 ", + 456 * time.Second, + nil, + }, { + "Second-4100000000", + 4100000000 * time.Second, + nil, + }, { + "junk", + 0, + errInvalidTimeout, + }, { + "Second-", + 0, + errInvalidTimeout, + }, { + "Second--1", + 0, + errInvalidTimeout, + }, { + "Second--123", + 0, + errInvalidTimeout, + }, { + "Second-+123", + 0, + errInvalidTimeout, + }, { + "Second-0x123", + 0, + errInvalidTimeout, + }, { + "second-123", + 0, + errInvalidTimeout, + }, { + "Second-4294967295", + 4294967295 * time.Second, + nil, + }, { + // Section 10.7 says that "The timeout value for TimeType "Second" + // must not be greater than 2^32-1." + "Second-4294967296", + 0, + errInvalidTimeout, + }, { + // This test case comes from section 9.10.9 of the spec. It says, + // + // "In this request, the client has specified that it desires an + // infinite-length lock, if available, otherwise a timeout of 4.1 + // billion seconds, if available." + // + // The Go WebDAV package always supports infinite length locks, + // and ignores the fallback after the comma. + "Infinite, Second-4100000000", + infiniteTimeout, + nil, + }} + + for _, tc := range testCases { + got, gotErr := parseTimeout(tc.s) + if got != tc.want || gotErr != tc.wantErr { + t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go new file mode 100644 index 0000000..e36a3b3 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop.go @@ -0,0 +1,418 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" + + "golang.org/x/net/context" +) + +// Proppatch describes a property update instruction as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH +type Proppatch struct { + // Remove specifies whether this patch removes properties. If it does not + // remove them, it sets them. + Remove bool + // Props contains the properties to be set or removed. + Props []Property +} + +// Propstat describes a XML propstat element as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +type Propstat struct { + // Props contains the properties for which Status applies. + Props []Property + + // Status defines the HTTP status code of the properties in Prop. + // Allowed values include, but are not limited to the WebDAV status + // code extensions for HTTP/1.1. + // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 + Status int + + // XMLError contains the XML representation of the optional error element. + // XML content within this field must not rely on any predefined + // namespace declarations or prefixes. If empty, the XML error element + // is omitted. + XMLError string + + // ResponseDescription contains the contents of the optional + // responsedescription field. If empty, the XML element is omitted. + ResponseDescription string +} + +// makePropstats returns a slice containing those of x and y whose Props slice +// is non-empty. If both are empty, it returns a slice containing an otherwise +// zero Propstat whose HTTP status code is 200 OK. +func makePropstats(x, y Propstat) []Propstat { + pstats := make([]Propstat, 0, 2) + if len(x.Props) != 0 { + pstats = append(pstats, x) + } + if len(y.Props) != 0 { + pstats = append(pstats, y) + } + if len(pstats) == 0 { + pstats = append(pstats, Propstat{ + Status: http.StatusOK, + }) + } + return pstats +} + +// DeadPropsHolder holds the dead properties of a resource. +// +// Dead properties are those properties that are explicitly defined. In +// comparison, live properties, such as DAV:getcontentlength, are implicitly +// defined by the underlying resource, and cannot be explicitly overridden or +// removed. See the Terminology section of +// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 +// +// There is a whitelist of the names of live properties. This package handles +// all live properties, and will only pass non-whitelisted names to the Patch +// method of DeadPropsHolder implementations. +type DeadPropsHolder interface { + // DeadProps returns a copy of the dead properties held. + DeadProps() (map[xml.Name]Property, error) + + // Patch patches the dead properties held. + // + // Patching is atomic; either all or no patches succeed. It returns (nil, + // non-nil) if an internal server error occurred, otherwise the Propstats + // collectively contain one Property for each proposed patch Property. If + // all patches succeed, Patch returns a slice of length one and a Propstat + // element with a 200 OK HTTP status code. If none succeed, for reasons + // other than an internal server error, no Propstat has status 200 OK. + // + // For more details on when various HTTP status codes apply, see + // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status + Patch([]Proppatch) ([]Propstat, error) +} + +// liveProps contains all supported, protected DAV: properties. +var liveProps = map[xml.Name]struct { + // findFn implements the propfind function of this property. If nil, + // it indicates a hidden property. + findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error) + // dir is true if the property applies to directories. + dir bool +}{ + {Space: "DAV:", Local: "resourcetype"}: { + findFn: findResourceType, + dir: true, + }, + {Space: "DAV:", Local: "displayname"}: { + findFn: findDisplayName, + dir: true, + }, + {Space: "DAV:", Local: "getcontentlength"}: { + findFn: findContentLength, + dir: false, + }, + {Space: "DAV:", Local: "getlastmodified"}: { + findFn: findLastModified, + // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified + // suggests that getlastmodified should only apply to GETable + // resources, and this package does not support GET on directories. + // + // Nonetheless, some WebDAV clients expect child directories to be + // sortable by getlastmodified date, so this value is true, not false. + // See golang.org/issue/15334. + dir: true, + }, + {Space: "DAV:", Local: "creationdate"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontentlanguage"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontenttype"}: { + findFn: findContentType, + dir: false, + }, + {Space: "DAV:", Local: "getetag"}: { + findFn: findETag, + // findETag implements ETag as the concatenated hex values of a file's + // modification time and size. This is not a reliable synchronization + // mechanism for directories, so we do not advertise getetag for DAV + // collections. + dir: false, + }, + + // TODO: The lockdiscovery property requires LockSystem to list the + // active locks on a resource. + {Space: "DAV:", Local: "lockdiscovery"}: {}, + {Space: "DAV:", Local: "supportedlock"}: { + findFn: findSupportedLock, + dir: true, + }, +} + +// TODO(nigeltao) merge props and allprop? + +// Props returns the status of the properties named pnames for resource name. +// +// Each Propstat has a unique status and each property name will only be part +// of one Propstat element. +func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pstatOK := Propstat{Status: http.StatusOK} + pstatNotFound := Propstat{Status: http.StatusNotFound} + for _, pn := range pnames { + // If this file has dead properties, check if they contain pn. + if dp, ok := deadProps[pn]; ok { + pstatOK.Props = append(pstatOK.Props, dp) + continue + } + // Otherwise, it must either be a live property or we don't know it. + if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { + innerXML, err := prop.findFn(ctx, fs, ls, name, fi) + if err != nil { + return nil, err + } + pstatOK.Props = append(pstatOK.Props, Property{ + XMLName: pn, + InnerXML: []byte(innerXML), + }) + } else { + pstatNotFound.Props = append(pstatNotFound.Props, Property{ + XMLName: pn, + }) + } + } + return makePropstats(pstatOK, pstatNotFound), nil +} + +// Propnames returns the property names defined for resource name. +func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) + for pn, prop := range liveProps { + if prop.findFn != nil && (prop.dir || !isDir) { + pnames = append(pnames, pn) + } + } + for pn := range deadProps { + pnames = append(pnames, pn) + } + return pnames, nil +} + +// Allprop returns the properties defined for resource name and the properties +// named in include. +// +// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined +// within the RFC plus dead properties. Other live properties should only be +// returned if they are named in 'include'. +// +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND +func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(ctx, fs, ls, name) + if err != nil { + return nil, err + } + // Add names from include if they are not already covered in pnames. + nameset := make(map[xml.Name]bool) + for _, pn := range pnames { + nameset[pn] = true + } + for _, pn := range include { + if !nameset[pn] { + pnames = append(pnames, pn) + } + } + return props(ctx, fs, ls, name, pnames) +} + +// Patch patches the properties of resource name. The return values are +// constrained in the same manner as DeadPropsHolder.Patch. +func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { + conflict := false +loop: + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + conflict = true + break loop + } + } + } + if conflict { + pstatForbidden := Propstat{ + Status: http.StatusForbidden, + XMLError: ``, + } + pstatFailedDep := Propstat{ + Status: StatusFailedDependency, + } + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) + } else { + pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) + } + } + } + return makePropstats(pstatForbidden, pstatFailedDep), nil + } + + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0) + if err != nil { + return nil, err + } + defer f.Close() + if dph, ok := f.(DeadPropsHolder); ok { + ret, err := dph.Patch(patches) + if err != nil { + return nil, err + } + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that + // "The contents of the prop XML element must only list the names of + // properties to which the result in the status element applies." + for _, pstat := range ret { + for i, p := range pstat.Props { + pstat.Props[i] = Property{XMLName: p.XMLName} + } + } + return ret, nil + } + // The file doesn't implement the optional DeadPropsHolder interface, so + // all patches are forbidden. + pstat := Propstat{Status: http.StatusForbidden} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + } + } + return []Propstat{pstat}, nil +} + +func escapeXML(s string) string { + for i := 0; i < len(s); i++ { + // As an optimization, if s contains only ASCII letters, digits or a + // few special characters, the escaped value is s itself and we don't + // need to allocate a buffer and convert between string and []byte. + switch c := s[i]; { + case c == ' ' || c == '_' || + ('+' <= c && c <= '9') || // Digits as well as + , - . and / + ('A' <= c && c <= 'Z') || + ('a' <= c && c <= 'z'): + continue + } + // Otherwise, go through the full escaping process. + var buf bytes.Buffer + xml.EscapeText(&buf, []byte(s)) + return buf.String() + } + return s +} + +func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + return ``, nil + } + return "", nil +} + +func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if slashClean(name) == "/" { + // Hide the real name of a possibly prefixed root directory. + return "", nil + } + return escapeXML(fi.Name()), nil +} + +func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return strconv.FormatInt(fi.Size(), 10), nil +} + +func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return fi.ModTime().Format(http.TimeFormat), nil +} + +func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer f.Close() + // This implementation is based on serveContent's code in the standard net/http package. + ctype := mime.TypeByExtension(filepath.Ext(name)) + if ctype != "" { + return ctype, nil + } + // Read a chunk to decide between utf-8 text and binary. + var buf [512]byte + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return "", err + } + ctype = http.DetectContentType(buf[:n]) + // Rewind file. + _, err = f.Seek(0, os.SEEK_SET) + return ctype, err +} + +func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + // The Apache http 2.4 web server by default concatenates the + // modification time and size of a file. We replicate the heuristic + // with nanosecond granularity. + return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil +} + +func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return `` + + `` + + `` + + `` + + ``, nil +} diff --git a/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/golang.org/x/net/webdav/prop_test.go new file mode 100644 index 0000000..57d0e82 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop_test.go @@ -0,0 +1,613 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "net/http" + "os" + "reflect" + "sort" + "testing" + + "golang.org/x/net/context" +) + +func TestMemPS(t *testing.T) { + ctx := context.Background() + // calcProps calculates the getlastmodified and getetag DAV: property + // values in pstats for resource name in file-system fs. + calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { + fi, err := fs.Stat(ctx, name) + if err != nil { + return err + } + for _, pst := range pstats { + for i, p := range pst.Props { + switch p.XMLName { + case xml.Name{Space: "DAV:", Local: "getlastmodified"}: + p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat)) + pst.Props[i] = p + case xml.Name{Space: "DAV:", Local: "getetag"}: + if fi.IsDir() { + continue + } + etag, err := findETag(ctx, fs, ls, name, fi) + if err != nil { + return err + } + p.InnerXML = []byte(etag) + pst.Props[i] = p + } + } + } + return nil + } + + const ( + lockEntry = `` + + `` + + `` + + `` + + `` + statForbiddenError = `` + ) + + type propOp struct { + op string + name string + pnames []xml.Name + patches []Proppatch + wantPnames []xml.Name + wantPropstats []Propstat + } + + testCases := []struct { + desc string + noDeadProps bool + buildfs []string + propOp []propOp + }{{ + desc: "propname", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propname", + name: "/dir", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "getlastmodified"}, + }, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + }, + }}, + }, { + desc: "allprop dir and file", + buildfs: []string{"mkdir /dir", "write /file foobarbaz"}, + propOp: []propOp{{ + op: "allprop", + name: "/dir", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("dir"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + pnames: []xml.Name{ + {"DAV:", "resourcetype"}, + {"foo", "bar"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}}, { + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}}, + }, + }}, + }, { + desc: "propfind DAV:resourcetype", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }}, + }}, + }}, + }, { + desc: "propfind unsupported DAV properties", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "creationdate"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "creationdate"}, + }}, + }}, + }}, + }, { + desc: "propfind getetag for files but not for directories", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }}, + }}, + }}, + }, { + desc: "proppatch property on no-dead-properties file system", + buildfs: []string{"mkdir /dir"}, + noDeadProps: true, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }}, + }, { + desc: "proppatch dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + }}, + }, { + desc: "proppatch dead property with failed dependency", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }, { + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("xxx"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + }}, + }, { + Status: StatusFailedDependency, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "proppatch remove dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }, { + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }}, + }, { + desc: "propname with dead property", + buildfs: []string{"touch /file"}, + propOp: []propOp{{ + op: "proppatch", + name: "/file", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, + }, + }}, + }, { + desc: "proppatch remove unknown dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "bad: propfind unknown property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"foo:", "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo:", Local: "bar"}, + }}, + }}, + }}, + }} + + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + if tc.noDeadProps { + fs = noDeadPropsFS{fs} + } + ls := NewMemLS() + for _, op := range tc.propOp { + desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) + if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { + t.Fatalf("%s: calcProps: %v", desc, err) + } + + // Call property system. + var propstats []Propstat + switch op.op { + case "propname": + pnames, err := propnames(ctx, fs, ls, op.name) + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + sort.Sort(byXMLName(pnames)) + sort.Sort(byXMLName(op.wantPnames)) + if !reflect.DeepEqual(pnames, op.wantPnames) { + t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) + } + continue + case "allprop": + propstats, err = allprop(ctx, fs, ls, op.name, op.pnames) + case "propfind": + propstats, err = props(ctx, fs, ls, op.name, op.pnames) + case "proppatch": + propstats, err = patch(ctx, fs, ls, op.name, op.patches) + default: + t.Fatalf("%s: %s not implemented", desc, op.op) + } + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + // Compare return values from allprop, propfind or proppatch. + for _, pst := range propstats { + sort.Sort(byPropname(pst.Props)) + } + for _, pst := range op.wantPropstats { + sort.Sort(byPropname(pst.Props)) + } + sort.Sort(byStatus(propstats)) + sort.Sort(byStatus(op.wantPropstats)) + if !reflect.DeepEqual(propstats, op.wantPropstats) { + t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats) + } + } + } +} + +func cmpXMLName(a, b xml.Name) bool { + if a.Space != b.Space { + return a.Space < b.Space + } + return a.Local < b.Local +} + +type byXMLName []xml.Name + +func (b byXMLName) Len() int { return len(b) } +func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } + +type byPropname []Property + +func (b byPropname) Len() int { return len(b) } +func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } + +type byStatus []Propstat + +func (b byStatus) Len() int { return len(b) } +func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } + +type noDeadPropsFS struct { + FileSystem +} + +func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm) + if err != nil { + return nil, err + } + return noDeadPropsFile{f}, nil +} + +// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods +// provided by the underlying File implementation. +type noDeadPropsFile struct { + f File +} + +func (f noDeadPropsFile) Close() error { return f.f.Close() } +func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } +func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } +func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } +func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } +func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go new file mode 100644 index 0000000..7b56687 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav.go @@ -0,0 +1,702 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package webdav provides a WebDAV server implementation. +package webdav // import "golang.org/x/net/webdav" + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" +) + +type Handler struct { + // Prefix is the URL path prefix to strip from WebDAV resource paths. + Prefix string + // FileSystem is the virtual file system. + FileSystem FileSystem + // LockSystem is the lock management system. + LockSystem LockSystem + // Logger is an optional error logger. If non-nil, it will be called + // for all HTTP requests. + Logger func(*http.Request, error) +} + +func (h *Handler) stripPrefix(p string) (string, int, error) { + if h.Prefix == "" { + return p, http.StatusOK, nil + } + if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) { + return r, http.StatusOK, nil + } + return p, http.StatusNotFound, errPrefixMismatch +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + status, err := http.StatusBadRequest, errUnsupportedMethod + if h.FileSystem == nil { + status, err = http.StatusInternalServerError, errNoFileSystem + } else if h.LockSystem == nil { + status, err = http.StatusInternalServerError, errNoLockSystem + } else { + switch r.Method { + case "OPTIONS": + status, err = h.handleOptions(w, r) + case "GET", "HEAD", "POST": + status, err = h.handleGetHeadPost(w, r) + case "DELETE": + status, err = h.handleDelete(w, r) + case "PUT": + status, err = h.handlePut(w, r) + case "MKCOL": + status, err = h.handleMkcol(w, r) + case "COPY", "MOVE": + status, err = h.handleCopyMove(w, r) + case "LOCK": + status, err = h.handleLock(w, r) + case "UNLOCK": + status, err = h.handleUnlock(w, r) + case "PROPFIND": + status, err = h.handlePropfind(w, r) + case "PROPPATCH": + status, err = h.handleProppatch(w, r) + } + } + + if status != 0 { + w.WriteHeader(status) + if status != http.StatusNoContent { + w.Write([]byte(StatusText(status))) + } + } + if h.Logger != nil { + h.Logger(r, err) + } +} + +func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) { + token, err = h.LockSystem.Create(now, LockDetails{ + Root: root, + Duration: infiniteTimeout, + ZeroDepth: true, + }) + if err != nil { + if err == ErrLocked { + return "", StatusLocked, err + } + return "", http.StatusInternalServerError, err + } + return token, 0, nil +} + +func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) { + hdr := r.Header.Get("If") + if hdr == "" { + // An empty If header means that the client hasn't previously created locks. + // Even if this client doesn't care about locks, we still need to check that + // the resources aren't locked by another client, so we create temporary + // locks that would conflict with another client's locks. These temporary + // locks are unlocked at the end of the HTTP request. + now, srcToken, dstToken := time.Now(), "", "" + if src != "" { + srcToken, status, err = h.lock(now, src) + if err != nil { + return nil, status, err + } + } + if dst != "" { + dstToken, status, err = h.lock(now, dst) + if err != nil { + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + return nil, status, err + } + } + + return func() { + if dstToken != "" { + h.LockSystem.Unlock(now, dstToken) + } + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + }, 0, nil + } + + ih, ok := parseIfHeader(hdr) + if !ok { + return nil, http.StatusBadRequest, errInvalidIfHeader + } + // ih is a disjunction (OR) of ifLists, so any ifList will do. + for _, l := range ih.lists { + lsrc := l.resourceTag + if lsrc == "" { + lsrc = src + } else { + u, err := url.Parse(lsrc) + if err != nil { + continue + } + if u.Host != r.Host { + continue + } + lsrc, status, err = h.stripPrefix(u.Path) + if err != nil { + return nil, status, err + } + } + release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) + if err == ErrConfirmationFailed { + continue + } + if err != nil { + return nil, http.StatusInternalServerError, err + } + return release, 0, nil + } + // Section 10.4.1 says that "If this header is evaluated and all state lists + // fail, then the request must fail with a 412 (Precondition Failed) status." + // We follow the spec even though the cond_put_corrupt_token test case from + // the litmus test warns on seeing a 412 instead of a 423 (Locked). + return nil, http.StatusPreconditionFailed, ErrLocked +} + +func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := getContext(r) + allow := "OPTIONS, LOCK, PUT, MKCOL" + if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil { + if fi.IsDir() { + allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" + } else { + allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT" + } + } + w.Header().Set("Allow", allow) + // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes + w.Header().Set("DAV", "1, 2") + // http://msdn.microsoft.com/en-au/library/cc250217.aspx + w.Header().Set("MS-Author-Via", "DAV") + return 0, nil +} + +func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + // TODO: check locks for read-only access?? + ctx := getContext(r) + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0) + if err != nil { + return http.StatusNotFound, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return http.StatusNotFound, err + } + if fi.IsDir() { + return http.StatusMethodNotAllowed, nil + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + // Let ServeContent determine the Content-Type header. + http.ServeContent(w, r, reqPath, fi.ModTime(), f) + return 0, nil +} + +func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + // TODO: return MultiStatus where appropriate. + + // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll + // returns nil (no error)." WebDAV semantics are that it should return a + // "404 Not Found". We therefore have to Stat before we RemoveAll. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil { + return http.StatusMethodNotAllowed, err + } + return http.StatusNoContent, nil +} + +func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' + // comments in http.checkEtag. + ctx := getContext(r) + + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return http.StatusNotFound, err + } + _, copyErr := io.Copy(f, r.Body) + fi, statErr := f.Stat() + closeErr := f.Close() + // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. + if copyErr != nil { + return http.StatusMethodNotAllowed, copyErr + } + if statErr != nil { + return http.StatusMethodNotAllowed, statErr + } + if closeErr != nil { + return http.StatusMethodNotAllowed, closeErr + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + return http.StatusCreated, nil +} + +func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + if r.ContentLength > 0 { + return http.StatusUnsupportedMediaType, nil + } + if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusMethodNotAllowed, err + } + return http.StatusCreated, nil +} + +func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { + hdr := r.Header.Get("Destination") + if hdr == "" { + return http.StatusBadRequest, errInvalidDestination + } + u, err := url.Parse(hdr) + if err != nil { + return http.StatusBadRequest, errInvalidDestination + } + if u.Host != r.Host { + return http.StatusBadGateway, errInvalidDestination + } + + src, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + + dst, status, err := h.stripPrefix(u.Path) + if err != nil { + return status, err + } + + if dst == "" { + return http.StatusBadGateway, errInvalidDestination + } + if dst == src { + return http.StatusForbidden, errDestinationEqualsSource + } + + ctx := getContext(r) + + if r.Method == "COPY" { + // Section 7.5.1 says that a COPY only needs to lock the destination, + // not both destination and source. Strictly speaking, this is racy, + // even though a COPY doesn't modify the source, if a concurrent + // operation modifies the source. However, the litmus test explicitly + // checks that COPYing a locked-by-another source is OK. + release, status, err := h.confirmLocks(r, "", dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.8.3 says that "The COPY method on a collection without a Depth + // header must act as if a Depth header with value "infinity" was included". + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.8.3 says that "A client may submit a Depth header on a + // COPY on a collection with a value of "0" or "infinity"." + return http.StatusBadRequest, errInvalidDepth + } + } + return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) + } + + release, status, err := h.confirmLocks(r, src, dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.9.2 says that "The MOVE method on a collection must act as if + // a "Depth: infinity" header was used on it. A client must not submit a + // Depth header on a MOVE on a collection with any value but "infinity"." + if hdr := r.Header.Get("Depth"); hdr != "" { + if parseDepth(hdr) != infiniteDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") +} + +func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { + duration, err := parseTimeout(r.Header.Get("Timeout")) + if err != nil { + return http.StatusBadRequest, err + } + li, status, err := readLockInfo(r.Body) + if err != nil { + return status, err + } + + ctx := getContext(r) + token, ld, now, created := "", LockDetails{}, time.Now(), false + if li == (lockInfo{}) { + // An empty lockInfo means to refresh the lock. + ih, ok := parseIfHeader(r.Header.Get("If")) + if !ok { + return http.StatusBadRequest, errInvalidIfHeader + } + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + token = ih.lists[0].conditions[0].Token + } + if token == "" { + return http.StatusBadRequest, errInvalidLockToken + } + ld, err = h.LockSystem.Refresh(now, token, duration) + if err != nil { + if err == ErrNoSuchLock { + return http.StatusPreconditionFailed, err + } + return http.StatusInternalServerError, err + } + + } else { + // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, + // then the request MUST act as if a "Depth:infinity" had been submitted." + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.10.3 says that "Values other than 0 or infinity must not be + // used with the Depth header on a LOCK method". + return http.StatusBadRequest, errInvalidDepth + } + } + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ld = LockDetails{ + Root: reqPath, + Duration: duration, + OwnerXML: li.Owner.InnerXML, + ZeroDepth: depth == 0, + } + token, err = h.LockSystem.Create(now, ld) + if err != nil { + if err == ErrLocked { + return StatusLocked, err + } + return http.StatusInternalServerError, err + } + defer func() { + if retErr != nil { + h.LockSystem.Unlock(now, token) + } + }() + + // Create the resource if it didn't previously exist. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + // TODO: detect missing intermediate dirs and return http.StatusConflict? + return http.StatusInternalServerError, err + } + f.Close() + created = true + } + + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+token+">") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + if created { + // This is "w.WriteHeader(http.StatusCreated)" and not "return + // http.StatusCreated, nil" because we write our own (XML) response to w + // and Handler.ServeHTTP would otherwise write "Created". + w.WriteHeader(http.StatusCreated) + } + writeLockInfo(w, token, ld) + return 0, nil +} + +func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We strip its angle brackets. + t := r.Header.Get("Lock-Token") + if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' { + return http.StatusBadRequest, errInvalidLockToken + } + t = t[1 : len(t)-1] + + switch err = h.LockSystem.Unlock(time.Now(), t); err { + case nil: + return http.StatusNoContent, err + case ErrForbidden: + return http.StatusForbidden, err + case ErrLocked: + return StatusLocked, err + case ErrNoSuchLock: + return http.StatusConflict, err + default: + return http.StatusInternalServerError, err + } +} + +func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := getContext(r) + fi, err := h.FileSystem.Stat(ctx, reqPath) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth == invalidDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + pf, status, err := readPropfind(r.Body) + if err != nil { + return status, err + } + + mw := multistatusWriter{w: w} + + walkFn := func(reqPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + var pstats []Propstat + if pf.Propname != nil { + pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath) + if err != nil { + return err + } + pstat := Propstat{Status: http.StatusOK} + for _, xmlname := range pnames { + pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) + } + pstats = append(pstats, pstat) + } else if pf.Allprop != nil { + pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } else { + pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } + if err != nil { + return err + } + return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats)) + } + + walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn) + closeErr := mw.close() + if walkErr != nil { + return http.StatusInternalServerError, walkErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + patches, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches) + if err != nil { + return http.StatusInternalServerError, err + } + mw := multistatusWriter{w: w} + writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats)) + closeErr := mw.close() + if writeErr != nil { + return http.StatusInternalServerError, writeErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func makePropstatResponse(href string, pstats []Propstat) *response { + resp := response{ + Href: []string{(&url.URL{Path: href}).EscapedPath()}, + Propstat: make([]propstat, 0, len(pstats)), + } + for _, p := range pstats { + var xmlErr *xmlError + if p.XMLError != "" { + xmlErr = &xmlError{InnerXML: []byte(p.XMLError)} + } + resp.Propstat = append(resp.Propstat, propstat{ + Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)), + Prop: p.Props, + ResponseDescription: p.ResponseDescription, + Error: xmlErr, + }) + } + return &resp +} + +const ( + infiniteDepth = -1 + invalidDepth = -2 +) + +// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and +// infiniteDepth. Parsing any other string returns invalidDepth. +// +// Different WebDAV methods have further constraints on valid depths: +// - PROPFIND has no further restrictions, as per section 9.1. +// - COPY accepts only "0" or "infinity", as per section 9.8.3. +// - MOVE accepts only "infinity", as per section 9.9.2. +// - LOCK accepts only "0" or "infinity", as per section 9.10.3. +// These constraints are enforced by the handleXxx methods. +func parseDepth(s string) int { + switch s { + case "0": + return 0 + case "1": + return 1 + case "infinity": + return infiniteDepth + } + return invalidDepth +} + +// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 +const ( + StatusMulti = 207 + StatusUnprocessableEntity = 422 + StatusLocked = 423 + StatusFailedDependency = 424 + StatusInsufficientStorage = 507 +) + +func StatusText(code int) string { + switch code { + case StatusMulti: + return "Multi-Status" + case StatusUnprocessableEntity: + return "Unprocessable Entity" + case StatusLocked: + return "Locked" + case StatusFailedDependency: + return "Failed Dependency" + case StatusInsufficientStorage: + return "Insufficient Storage" + } + return http.StatusText(code) +} + +var ( + errDestinationEqualsSource = errors.New("webdav: destination equals source") + errDirectoryNotEmpty = errors.New("webdav: directory not empty") + errInvalidDepth = errors.New("webdav: invalid depth") + errInvalidDestination = errors.New("webdav: invalid destination") + errInvalidIfHeader = errors.New("webdav: invalid If header") + errInvalidLockInfo = errors.New("webdav: invalid lock info") + errInvalidLockToken = errors.New("webdav: invalid lock token") + errInvalidPropfind = errors.New("webdav: invalid propfind") + errInvalidProppatch = errors.New("webdav: invalid proppatch") + errInvalidResponse = errors.New("webdav: invalid response") + errInvalidTimeout = errors.New("webdav: invalid timeout") + errNoFileSystem = errors.New("webdav: no file system") + errNoLockSystem = errors.New("webdav: no lock system") + errNotADirectory = errors.New("webdav: not a directory") + errPrefixMismatch = errors.New("webdav: prefix mismatch") + errRecursionTooDeep = errors.New("webdav: recursion too deep") + errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") + errUnsupportedMethod = errors.New("webdav: unsupported method") +) diff --git a/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/golang.org/x/net/webdav/webdav_test.go new file mode 100644 index 0000000..25e0d54 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav_test.go @@ -0,0 +1,344 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strings" + "testing" + + "golang.org/x/net/context" +) + +// TODO: add tests to check XML responses with the expected prefix path +func TestPrefix(t *testing.T) { + const dst, blah = "Destination", "blah blah blah" + + // createLockBody comes from the example in Section 9.10.7. + const createLockBody = ` + + + + + http://example.org/~ejw/contact.html + + + ` + + do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) { + var bodyReader io.Reader + if body != "" { + bodyReader = strings.NewReader(body) + } + req, err := http.NewRequest(method, urlStr, bodyReader) + if err != nil { + return nil, err + } + for len(headers) >= 2 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != wantStatusCode { + return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + } + return res.Header, nil + } + + prefixes := []string{ + "/", + "/a/", + "/a/b/", + "/a/b/c/", + } + ctx := context.Background() + for _, prefix := range prefixes { + fs := NewMemFS() + h := &Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + } + mux := http.NewServeMux() + if prefix != "/" { + h.Prefix = prefix + } + mux.Handle(prefix, h) + srv := httptest.NewServer(mux) + defer srv.Close() + + // The script is: + // MKCOL /a + // MKCOL /a/b + // PUT /a/b/c + // COPY /a/b/c /a/b/d + // MKCOL /a/b/e + // MOVE /a/b/d /a/b/e/f + // LOCK /a/b/e/g + // PUT /a/b/e/g + // which should yield the (possibly stripped) filenames /a/b/c, + // /a/b/e/f and /a/b/e/g, plus their parent directories. + + wantA := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusMovedPermanently, + "/a/b/": http.StatusNotFound, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil { + t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) + continue + } + + wantB := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusMovedPermanently, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) + continue + } + + wantC := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) + continue + } + + wantD := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil { + t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) + continue + } + + wantE := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) + continue + } + + wantF := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) + continue + } + + var lockToken string + wantG := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil { + t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err) + continue + } else { + lockToken = h.Get("Lock-Token") + } + + ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken) + wantH := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err) + continue + } + + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Errorf("prefix=%-9q find: %v", prefix, err) + continue + } + sort.Strings(got) + want := map[string][]string{ + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"}, + "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"}, + "/a/b/c/": {"/"}, + }[prefix] + if !reflect.DeepEqual(got, want) { + t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) + continue + } + } +} + +func TestEscapeXML(t *testing.T) { + // These test cases aren't exhaustive, and there is more than one way to + // escape e.g. a quot (as """ or """) or an apos. We presume that + // the encoding/xml package tests xml.EscapeText more thoroughly. This test + // here is just a sanity check for this package's escapeXML function, and + // its attempt to provide a fast path (and avoid a bytes.Buffer allocation) + // when escaping filenames is obviously a no-op. + testCases := map[string]string{ + "": "", + " ": " ", + "&": "&", + "*": "*", + "+": "+", + ",": ",", + "-": "-", + ".": ".", + "/": "/", + "0": "0", + "9": "9", + ":": ":", + "<": "<", + ">": ">", + "A": "A", + "_": "_", + "a": "a", + "~": "~", + "\u0201": "\u0201", + "&": "&amp;", + "foo&baz": "foo&<b/ar>baz", + } + + for in, want := range testCases { + if got := escapeXML(in); got != want { + t.Errorf("in=%q: got %q, want %q", in, got, want) + } + } +} + +func TestFilenameEscape(t *testing.T) { + hrefRe := regexp.MustCompile(`([^<]*)`) + displayNameRe := regexp.MustCompile(`([^<]*)`) + do := func(method, urlStr string) (string, string, error) { + req, err := http.NewRequest(method, urlStr, nil) + if err != nil { + return "", "", err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + hrefMatch := hrefRe.FindStringSubmatch(string(b)) + if len(hrefMatch) != 2 { + return "", "", errors.New("D:href not found") + } + displayNameMatch := displayNameRe.FindStringSubmatch(string(b)) + if len(displayNameMatch) != 2 { + return "", "", errors.New("D:displayname not found") + } + + return hrefMatch[1], displayNameMatch[1], nil + } + + testCases := []struct { + name, wantHref, wantDisplayName string + }{{ + name: `/foo%bar`, + wantHref: `/foo%25bar`, + wantDisplayName: `foo%bar`, + }, { + name: `/ã“ã‚“ã«ã¡ã‚世界`, + wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + wantDisplayName: `ã“ã‚“ã«ã¡ã‚世界`, + }, { + name: `/Program Files/`, + wantHref: `/Program%20Files`, + wantDisplayName: `Program Files`, + }, { + name: `/go+lang`, + wantHref: `/go+lang`, + wantDisplayName: `go+lang`, + }, { + name: `/go&lang`, + wantHref: `/go&lang`, + wantDisplayName: `go&lang`, + }, { + name: `/goexclusive"` + Shared *struct{} `xml:"lockscope>shared"` + Write *struct{} `xml:"locktype>write"` + Owner owner `xml:"owner"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type owner struct { + InnerXML string `xml:",innerxml"` +} + +func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { + c := &countingReader{r: r} + if err = ixml.NewDecoder(c).Decode(&li); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to refresh the lock. + // http://www.webdav.org/specs/rfc4918.html#refreshing-locks + return lockInfo{}, 0, nil + } + err = errInvalidLockInfo + } + return lockInfo{}, http.StatusBadRequest, err + } + // We only support exclusive (non-shared) write locks. In practice, these are + // the only types of locks that seem to matter. + if li.Exclusive == nil || li.Shared != nil || li.Write == nil { + return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo + } + return li, 0, nil +} + +type countingReader struct { + n int + r io.Reader +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { + depth := "infinity" + if ld.ZeroDepth { + depth = "0" + } + timeout := ld.Duration / time.Second + return fmt.Fprintf(w, "\n"+ + "\n"+ + " \n"+ + " \n"+ + " %s\n"+ + " %s\n"+ + " Second-%d\n"+ + " %s\n"+ + " %s\n"+ + "", + depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root), + ) +} + +func escape(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '&', '\'', '<', '>': + b := bytes.NewBuffer(nil) + ixml.EscapeText(b, []byte(s)) + return b.String() + } + } + return s +} + +// Next returns the next token, if any, in the XML stream of d. +// RFC 4918 requires to ignore comments, processing instructions +// and directives. +// http://www.webdav.org/specs/rfc4918.html#property_values +// http://www.webdav.org/specs/rfc4918.html#xml-extensibility +func next(d *ixml.Decoder) (ixml.Token, error) { + for { + t, err := d.Token() + if err != nil { + return t, err + } + switch t.(type) { + case ixml.Comment, ixml.Directive, ixml.ProcInst: + continue + default: + return t, nil + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) +type propfindProps []xml.Name + +// UnmarshalXML appends the property names enclosed within start to pn. +// +// It returns an error if start does not contain any properties or if +// properties contain values. Character data between properties is ignored. +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + for { + t, err := next(d) + if err != nil { + return err + } + switch t.(type) { + case ixml.EndElement: + if len(*pn) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + name := t.(ixml.StartElement).Name + t, err = next(d) + if err != nil { + return err + } + if _, ok := t.(ixml.EndElement); !ok { + return fmt.Errorf("unexpected token %T", t) + } + *pn = append(*pn, xml.Name(name)) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind +type propfind struct { + XMLName ixml.Name `xml:"DAV: propfind"` + Allprop *struct{} `xml:"DAV: allprop"` + Propname *struct{} `xml:"DAV: propname"` + Prop propfindProps `xml:"DAV: prop"` + Include propfindProps `xml:"DAV: include"` +} + +func readPropfind(r io.Reader) (pf propfind, status int, err error) { + c := countingReader{r: r} + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to propfind allprop. + // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND + return propfind{Allprop: new(struct{})}, 0, nil + } + err = errInvalidPropfind + } + return propfind{}, http.StatusBadRequest, err + } + + if pf.Allprop == nil && pf.Include != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Prop != nil && pf.Propname != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + return pf, 0, nil +} + +// Property represents a single DAV resource property as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties +type Property struct { + // XMLName is the fully qualified name that identifies this property. + XMLName xml.Name + + // Lang is an optional xml:lang attribute. + Lang string `xml:"xml:lang,attr,omitempty"` + + // InnerXML contains the XML representation of the property value. + // See http://www.webdav.org/specs/rfc4918.html#property_values + // + // Property values of complex type or mixed-content must have fully + // expanded XML namespaces or be self-contained with according + // XML namespace declarations. They must not rely on any XML + // namespace declarations within the scope of the XML document, + // even including the DAV: namespace. + InnerXML []byte `xml:",innerxml"` +} + +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +// See multistatusWriter for the "D:" namespace prefix. +type xmlError struct { + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +// See multistatusWriter for the "D:" namespace prefix. +type propstat struct { + Prop []Property `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace +// before encoding. See multistatusWriter. +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } + for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { + if prop.XMLName.Space == "DAV:" { + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop + } + } + // Distinct type to avoid infinite recursion of MarshalXML. + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response +// See multistatusWriter for the "D:" namespace prefix. +type response struct { + XMLName ixml.Name `xml:"D:response"` + Href []string `xml:"D:href"` + Propstat []propstat `xml:"D:propstat"` + Status string `xml:"D:status,omitempty"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MultistatusWriter marshals one or more Responses into a XML +// multistatus response. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus +// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as +// "DAV:" on this element, is prepended on the nested response, as well as on all +// its nested elements. All property names in the DAV: namespace are prefixed as +// well. This is because some versions of Mini-Redirector (on windows 7) ignore +// elements with a default namespace (no prefixed namespace). A less intrusive fix +// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 +type multistatusWriter struct { + // ResponseDescription contains the optional responsedescription + // of the multistatus XML element. Only the latest content before + // close will be emitted. Empty response descriptions are not + // written. + responseDescription string + + w http.ResponseWriter + enc *ixml.Encoder +} + +// Write validates and emits a DAV response as part of a multistatus response +// element. +// +// It sets the HTTP status code of its underlying http.ResponseWriter to 207 +// (Multi-Status) and populates the Content-Type header. If r is the +// first, valid response to be written, Write prepends the XML representation +// of r with a multistatus tag. Callers must call close after the last response +// has been written. +func (w *multistatusWriter) write(r *response) error { + switch len(r.Href) { + case 0: + return errInvalidResponse + case 1: + if len(r.Propstat) > 0 != (r.Status == "") { + return errInvalidResponse + } + default: + if len(r.Propstat) > 0 || r.Status == "" { + return errInvalidResponse + } + } + err := w.writeHeader() + if err != nil { + return err + } + return w.enc.Encode(r) +} + +// writeHeader writes a XML multistatus start element on w's underlying +// http.ResponseWriter and returns the result of the write operation. +// After the first write attempt, writeHeader becomes a no-op. +func (w *multistatusWriter) writeHeader() error { + if w.enc != nil { + return nil + } + w.w.Header().Add("Content-Type", "text/xml; charset=utf-8") + w.w.WriteHeader(StatusMulti) + _, err := fmt.Fprintf(w.w, ``) + if err != nil { + return err + } + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ + Space: "DAV:", + Local: "multistatus", + }, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, + Value: "DAV:", + }}, + }) +} + +// Close completes the marshalling of the multistatus response. It returns +// an error if the multistatus response could not be completed. If both the +// return value and field enc of w are nil, then no multistatus response has +// been written. +func (w *multistatusWriter) close() error { + if w.enc == nil { + return nil + } + var end []ixml.Token + if w.responseDescription != "" { + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} + end = append(end, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, + ) + } + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, + }) + for _, t := range end { + err := w.enc.EncodeToken(t) + if err != nil { + return err + } + } + return w.enc.Flush() +} + +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} + +func xmlLang(s ixml.StartElement, d string) string { + for _, attr := range s.Attr { + if attr.Name == xmlLangName { + return attr.Value + } + } + return d +} + +type xmlValue []byte + +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + // The XML value of a property can be arbitrary, mixed-content XML. + // To make sure that the unmarshalled value contains all required + // namespaces, we encode all the property value XML tokens into a + // buffer. This forces the encoder to redeclare any used namespaces. + var b bytes.Buffer + e := ixml.NewEncoder(&b) + for { + t, err := next(d) + if err != nil { + return err + } + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { + break + } + if err = e.EncodeToken(t); err != nil { + return err + } + } + err := e.Flush() + if err != nil { + return err + } + *v = b.Bytes() + return nil +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + +// UnmarshalXML appends the property names and values enclosed within start +// to ps. +// +// An xml:lang attribute that is defined either on the DAV:prop or property +// name XML element is propagated to the property's Lang field. +// +// UnmarshalXML returns an error if start does not contain any properties or if +// property values contain syntactically incorrect XML. +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + lang := xmlLang(start, "") + for { + t, err := next(d) + if err != nil { + return err + } + switch elem := t.(type) { + case ixml.EndElement: + if len(*ps) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + p := Property{ + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), + } + err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) + if err != nil { + return err + } + *ps = append(*ps, p) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove +type setRemove struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + Prop proppatchProps `xml:"DAV: prop"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate +type propertyupdate struct { + XMLName ixml.Name `xml:"DAV: propertyupdate"` + Lang string `xml:"xml:lang,attr,omitempty"` + SetRemove []setRemove `xml:",any"` +} + +func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { + var pu propertyupdate + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { + return nil, http.StatusBadRequest, err + } + for _, op := range pu.SetRemove { + remove := false + switch op.XMLName { + case ixml.Name{Space: "DAV:", Local: "set"}: + // No-op. + case ixml.Name{Space: "DAV:", Local: "remove"}: + for _, p := range op.Prop { + if len(p.InnerXML) > 0 { + return nil, http.StatusBadRequest, errInvalidProppatch + } + } + remove = true + default: + return nil, http.StatusBadRequest, errInvalidProppatch + } + patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) + } + return patches, 0, nil +} diff --git a/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/golang.org/x/net/webdav/xml_test.go new file mode 100644 index 0000000..a3d9e1e --- /dev/null +++ b/vendor/golang.org/x/net/webdav/xml_test.go @@ -0,0 +1,906 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + + ixml "golang.org/x/net/webdav/internal/xml" +) + +func TestReadLockInfo(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + wantLI lockInfo + wantStatus int + }{{ + "bad: junk", + "xxx", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid owner XML", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " no end tag \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid UTF-8", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \xff \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #1", + "" + + "\n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #2", + "" + + "\n" + + " \n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "good: empty", + "", + lockInfo{}, + 0, + }, { + "good: plain-text owner", + "" + + "\n" + + " \n" + + " \n" + + " gopher\n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "gopher", + }, + }, + 0, + }, { + "section 9.10.7", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " http://example.org/~ejw/contact.html\n" + + " \n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "\n http://example.org/~ejw/contact.html\n ", + }, + }, + 0, + }} + + for _, tc := range testCases { + li, status, err := readLockInfo(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus { + t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v", + tc.desc, li, status, tc.wantLI, tc.wantStatus) + continue + } + } +} + +func TestReadPropfind(t *testing.T) { + testCases := []struct { + desc string + input string + wantPF propfind + wantStatus int + }{{ + desc: "propfind: propname", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: empty body means allprop", + input: "", + wantPF: propfind{ + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop followed by include", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: include followed by allprop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: prop with ignored comments", + input: "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored whitespace", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored mixed-content", + input: "" + + "\n" + + " foobar\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propname with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + " *boss*\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: bad: junk", + input: "xxx", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and allprop (section A.3)", + input: "" + + "\n" + + " " + + " " + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: allprop and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty propfind with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty prop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: prop with just chardata", + input: "" + + "\n" + + " foo\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: interrupted prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: malformed end element prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with chardata value", + input: "" + + "\n" + + " bar\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with whitespace value", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: include without allprop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pf, status, err := readPropfind(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus { + t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v", + tc.desc, pf, status, tc.wantPF, tc.wantStatus) + continue + } + } +} + +func TestMultistatusWriter(t *testing.T) { + ///The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + responses []response + respdesc string + writeHeader bool + wantXML string + wantCode int + wantErr error + }{{ + desc: "section 9.2.2 (failed dependency)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Authors", + }, + }}, + Status: "HTTP/1.1 424 Failed Dependency", + }, { + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Copyright-Owner", + }, + }}, + Status: "HTTP/1.1 409 Conflict", + }}, + ResponseDescription: "Copyright Owner cannot be deleted or altered.", + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 424 Failed Dependency` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 409 Conflict` + + ` ` + + ` Copyright Owner cannot be deleted or altered.` + + `` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.6.2 (lock-token-submitted)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Status: "HTTP/1.1 423 Locked", + Error: &xmlError{ + InnerXML: []byte(``), + }, + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` HTTP/1.1 423 Locked` + + ` ` + + ` ` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.1.3", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"}, + InnerXML: []byte(`` + + `` + + `Box type A` + + ``), + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"}, + InnerXML: []byte(`` + + `` + + `J.J. Johnson` + + ``), + }}, + Status: "HTTP/1.1 200 OK", + }, { + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"}, + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"}, + }}, + Status: "HTTP/1.1 403 Forbidden", + ResponseDescription: "The user does not have access to the DingALing property.", + }}, + }}, + respdesc: "There has been an access violation error.", + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` Box type A` + + ` J.J. Johnson` + + ` ` + + ` HTTP/1.1 200 OK` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 403 Forbidden` + + ` The user does not have access to the DingALing property.` + + ` ` + + ` ` + + ` There has been an access violation error.` + + ``, + wantCode: StatusMulti, + }, { + desc: "no response written", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "no response written (with description)", + respdesc: "too bad", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "empty multistatus with header", + writeHeader: true, + wantXML: ``, + wantCode: StatusMulti, + }, { + desc: "bad: no href", + responses: []response{{ + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and no status", + responses: []response{{ + Href: []string{"http://example.com/foo", "http://example.com/bar"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: one href and no propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: status with one href and propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + Status: "HTTP/1.1 200 OK", + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and propstat", + responses: []response{{ + Href: []string{ + "http://example.com/foo", + "http://example.com/bar", + }, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }} + + n := xmlNormalizer{omitWhitespace: true} +loop: + for _, tc := range testCases { + rec := httptest.NewRecorder() + w := multistatusWriter{w: rec, responseDescription: tc.respdesc} + if tc.writeHeader { + if err := w.writeHeader(); err != nil { + t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err) + continue + } + } + for _, r := range tc.responses { + if err := w.write(&r); err != nil { + if err != tc.wantErr { + t.Errorf("%s: got write error %v, want %v", + tc.desc, err, tc.wantErr) + } + continue loop + } + } + if err := w.close(); err != tc.wantErr { + t.Errorf("%s: got close error %v, want %v", + tc.desc, err, tc.wantErr) + continue + } + if rec.Code != tc.wantCode { + t.Errorf("%s: got HTTP status code %d, want %d\n", + tc.desc, rec.Code, tc.wantCode) + continue + } + gotXML := rec.Body.String() + eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) + } + } +} + +func TestReadProppatch(t *testing.T) { + ppStr := func(pps []Proppatch) string { + var outer []string + for _, pp := range pps { + var inner []string + for _, p := range pp.Props { + inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", + p.XMLName, p.Lang, p.InnerXML)) + } + outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", + pp.Remove, strings.Join(inner, ", "))) + } + return "[" + strings.Join(outer, ", ") + "]" + } + + testCases := []struct { + desc string + input string + wantPP []Proppatch + wantStatus int + }{{ + desc: "proppatch: section 9.2 (with simple property value)", + input: `` + + `` + + `` + + ` ` + + ` somevalue` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, + "", + []byte(`somevalue`), + }}, + }, { + Remove: true, + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"}, + "", + nil, + }}, + }}, + }, { + desc: "proppatch: lang attribute on prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://example.com/ns", Local: "foo"}, + "en", + nil, + }}, + }}, + }, { + desc: "bad: remove with value", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` Jim Whitehead` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty propertyupdate", + input: `` + + `` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pp, status, err := readProppatch(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if status != tc.wantStatus { + t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus) + continue + } + if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { + t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) + } + } +} + +func TestUnmarshalXMLValue(t *testing.T) { + testCases := []struct { + desc string + input string + wantVal string + }{{ + desc: "simple char data", + input: "foo", + wantVal: "foo", + }, { + desc: "empty element", + input: "", + wantVal: "", + }, { + desc: "preserve namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve root element namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve whitespace", + input: " \t ", + wantVal: " \t ", + }, { + desc: "preserve mixed content", + input: ` a `, + wantVal: ` a `, + }, { + desc: "section 9.2", + input: `` + + `` + + ` Jim Whitehead` + + ` Roy Fielding` + + ``, + wantVal: `` + + ` Jim Whitehead` + + ` Roy Fielding`, + }, { + desc: "section 4.3.1 (mixed content)", + input: `` + + `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of ]]>.` + + ` ` + + ``, + wantVal: `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of <RFC2518>.` + + ` `, + }} + + var n xmlNormalizer + for _, tc := range testCases { + d := ixml.NewDecoder(strings.NewReader(tc.input)) + var v xmlValue + if err := d.Decode(&v); err != nil { + t.Errorf("%s: got error %v, want nil", tc.desc, err) + continue + } + eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) + } + } +} + +// xmlNormalizer normalizes XML. +type xmlNormalizer struct { + // omitWhitespace instructs to ignore whitespace between element tags. + omitWhitespace bool + // omitComments instructs to ignore XML comments. + omitComments bool +} + +// normalize writes the normalized XML content of r to w. It applies the +// following rules +// +// * Rename namespace prefixes according to an internal heuristic. +// * Remove unnecessary namespace declarations. +// * Sort attributes in XML start elements in lexical order of their +// fully qualified name. +// * Remove XML directives and processing instructions. +// * Remove CDATA between XML tags that only contains whitespace, if +// instructed to do so. +// * Remove comments, if instructed to do so. +// +func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) + for { + t, err := d.Token() + if err != nil { + if t == nil && err == io.EOF { + break + } + return err + } + switch val := t.(type) { + case ixml.Directive, ixml.ProcInst: + continue + case ixml.Comment: + if n.omitComments { + continue + } + case ixml.CharData: + if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { + continue + } + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) + attr := start.Attr[:0] + for _, a := range start.Attr { + if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { + continue + } + attr = append(attr, a) + } + sort.Sort(byName(attr)) + start.Attr = attr + t = start + } + err = e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// equalXML tests for equality of the normalized XML contents of a and b. +func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { + var buf bytes.Buffer + if err := n.normalize(&buf, a); err != nil { + return false, err + } + normA := buf.String() + buf.Reset() + if err := n.normalize(&buf, b); err != nil { + return false, err + } + normB := buf.String() + return normA == normB, nil +} + +type byName []ixml.Attr + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 0000000..69a4ac7 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 0000000..2dab943 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/dial_test.go b/vendor/golang.org/x/net/websocket/dial_test.go new file mode 100644 index 0000000..aa03e30 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial_test.go @@ -0,0 +1,43 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http/httptest" + "testing" + "time" +) + +// This test depend on Go 1.3+ because in earlier versions the Dialer won't be +// used in TLS connections and a timeout won't be triggered. +func TestDialConfigTLSWithDialer(t *testing.T) { + tlsServer := httptest.NewTLSServer(nil) + tlsServerAddr := tlsServer.Listener.Addr().String() + log.Print("Test TLS WebSocket server listening on ", tlsServerAddr) + defer tlsServer.Close() + config, _ := NewConfig(fmt.Sprintf("wss://%s/echo", tlsServerAddr), "http://localhost") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + config.TlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} diff --git a/vendor/golang.org/x/net/websocket/exampledial_test.go b/vendor/golang.org/x/net/websocket/exampledial_test.go new file mode 100644 index 0000000..72bb9d4 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/exampledial_test.go @@ -0,0 +1,31 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "fmt" + "log" + + "golang.org/x/net/websocket" +) + +// This example demonstrates a trivial client. +func ExampleDial() { + origin := "http://localhost/" + url := "ws://localhost:12345/ws" + ws, err := websocket.Dial(url, "", origin) + if err != nil { + log.Fatal(err) + } + if _, err := ws.Write([]byte("hello, world!\n")); err != nil { + log.Fatal(err) + } + var msg = make([]byte, 512) + var n int + if n, err = ws.Read(msg); err != nil { + log.Fatal(err) + } + fmt.Printf("Received: %s.\n", msg[:n]) +} diff --git a/vendor/golang.org/x/net/websocket/examplehandler_test.go b/vendor/golang.org/x/net/websocket/examplehandler_test.go new file mode 100644 index 0000000..f22a98f --- /dev/null +++ b/vendor/golang.org/x/net/websocket/examplehandler_test.go @@ -0,0 +1,26 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "io" + "net/http" + + "golang.org/x/net/websocket" +) + +// Echo the data received on the WebSocket. +func EchoServer(ws *websocket.Conn) { + io.Copy(ws, ws) +} + +// This example demonstrates a trivial echo server. +func ExampleHandler() { + http.Handle("/echo", websocket.Handler(EchoServer)) + err := http.ListenAndServe(":12345", nil) + if err != nil { + panic("ListenAndServe: " + err.Error()) + } +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 0000000..8cffdd1 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/hybi_test.go b/vendor/golang.org/x/net/websocket/hybi_test.go new file mode 100644 index 0000000..9504aa2 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi_test.go @@ -0,0 +1,608 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "testing" +) + +// Test the getNonceAccept function with values in +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 +func TestSecWebSocketAccept(t *testing.T) { + nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==") + expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") + accept, err := getNonceAccept(nonce) + if err != nil { + t.Errorf("getNonceAccept: returned error %v", err) + return + } + if !bytes.Equal(expected, accept) { + t.Errorf("getNonceAccept: expected %q got %q", expected, accept) + } +} + +func TestHybiClientHandshake(t *testing.T) { + type test struct { + url, host string + } + tests := []test{ + {"ws://server.example.com/chat", "server.example.com"}, + {"ws://127.0.0.1/chat", "127.0.0.1"}, + } + if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil { + tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"}) + } + + for _, tt := range tests { + var b bytes.Buffer + bw := bufio.NewWriter(&b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + var config Config + config.Location, err = url.ParseRequestURI(tt.url) + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + if err := hybiClientHandshake(&config, br, bw); err != nil { + t.Fatal("handshake", err) + } + req, err := http.ReadRequest(bufio.NewReader(&b)) + if err != nil { + t.Fatal("read request", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %s", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %s", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto) + } + if req.Host != tt.host { + t.Errorf("request host expected %s, but got %s", tt.host, req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k)) + } + } + } +} + +func TestHybiClientHandshakeWithHeader(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + config := new(Config) + config.Location, err = url.ParseRequestURI("ws://server.example.com/chat") + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.Header = http.Header(make(map[string][]string)) + config.Header.Add("User-Agent", "test") + + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + err = hybiClientHandshake(config, br, bw) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + req, err := http.ReadRequest(bufio.NewReader(b)) + if err != nil { + t.Fatalf("read request: %v", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %q", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %q", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto) + } + if req.Host != "server.example.com" { + t.Errorf("request Host expected server.example.com, but got %v", req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + "User-Agent": "test", + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k))) + } + } +} + +func TestHybiServerHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + expectedProtocols := []string{"chat", "superchat"} + if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) { + t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = config.Protocol[:1] + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeNoSubProtocol(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + if len(config.Protocol) != 0 { + t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol)) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeHybiBadVersion(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Sec-WebSocket-Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 9 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != ErrBadWebSocketVersion { + t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err) + } + if code != http.StatusBadRequest { + t.Errorf("status expected %q but got %q", http.StatusBadRequest, code) + } +} + +func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) { + b := bytes.NewBuffer([]byte{}) + frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false} + w, _ := frameWriterFactory.NewFrameWriter(TextFrame) + w.(*hybiFrameWriter).header = frameHeader + _, err := w.Write(testPayload) + w.Close() + if err != nil { + t.Errorf("Write error %q", err) + } + var expectedFrame []byte + expectedFrame = append(expectedFrame, testHeader...) + expectedFrame = append(expectedFrame, testMaskedPayload...) + if !bytes.Equal(expectedFrame, b.Bytes()) { + t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes()) + } + frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)} + r, err := frameReaderFactory.NewFrameReader() + if err != nil { + t.Errorf("Read error %q", err) + } + if header := r.HeaderReader(); header == nil { + t.Errorf("no header") + } else { + actualHeader := make([]byte, r.Len()) + n, err := header.Read(actualHeader) + if err != nil { + t.Errorf("Read header error %q", err) + } else { + if n < len(testHeader) { + t.Errorf("header too short %q got %q", testHeader, actualHeader[:n]) + } + if !bytes.Equal(testHeader, actualHeader[:n]) { + t.Errorf("header expected %q got %q", testHeader, actualHeader[:n]) + } + } + } + if trailer := r.TrailerReader(); trailer != nil { + t.Errorf("unexpected trailer %q", trailer) + } + frame := r.(*hybiFrameReader) + if frameHeader.Fin != frame.header.Fin || + frameHeader.OpCode != frame.header.OpCode || + len(testPayload) != int(frame.header.Length) { + t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame) + } + payload := make([]byte, len(testPayload)) + _, err = r.Read(payload) + if err != nil && err != io.EOF { + t.Errorf("read %v", err) + } + if !bytes.Equal(testPayload, payload) { + t.Errorf("payload %q vs %q", testPayload, payload) + } +} + +func TestHybiShortTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader) +} + +func TestHybiShortMaskedTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame, + MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}} + payload := []byte("hello") + maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3} + header := []byte{0x81, 0x85} + header = append(header, frameHeader.MaskingKey...) + testHybiFrame(t, header, payload, maskedPayload, frameHeader) +} + +func TestHybiShortBinaryFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader) +} + +func TestHybiControlFrame(t *testing.T) { + payload := []byte("hello") + + frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame} + payload = []byte{0x03, 0xe8} // 1000 + testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader) +} + +func TestHybiLongFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := make([]byte, 126) + testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader) + + payload = make([]byte, 65535) + testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader) + + payload = make([]byte, 65536) + testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader) +} + +func TestHybiClientRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[2:7], msg[:n]) { + t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n]) + } + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[16:21], msg[:n]) { + t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n]) + } + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiShortRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + step := 0 + pos := 0 + expectedPos := []int{2, 5, 16, 19} + expectedLen := []int{3, 2, 3, 2} + for { + msg := make([]byte, 3) + n, err := conn.Read(msg) + if step >= len(expectedPos) { + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } + return + } + pos = expectedPos[step] + endPos := pos + expectedLen[step] + if err != nil { + t.Errorf("read from %d, got error %q", pos, err) + return + } + if n != endPos-pos { + t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n) + } + if !bytes.Equal(wireData[pos:endPos], msg[:n]) { + t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n]) + } + step++ + } +} + +func TestHybiServerRead(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + 0x89, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello + 0x81, 0x85, 0xed, 0x83, 0xb4, 0x24, + 0x9a, 0xec, 0xc6, 0x48, 0x89, // world + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + + expected := [][]byte{[]byte("hello"), []byte("world")} + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[0], msg[:n]) { + t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n]) + } + + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[1], msg[:n]) { + t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n]) + } + + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiServerReadWithoutMasking(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + // server MUST close the connection upon receiving a non-masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +func TestHybiClientReadWithMasking(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + // client MUST close the connection upon receiving a masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +// Test the hybiServerHandshaker supports firefox implementation and +// checks Connection request header include (but it's not necessary +// equal to) "upgrade" +func TestHybiServerFirefoxHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: keep-alive, upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = []string{"chat"} + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 0000000..0895dea --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 0000000..e242c89 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,448 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/net/websocket/websocket_test.go b/vendor/golang.org/x/net/websocket/websocket_test.go new file mode 100644 index 0000000..2054ce8 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket_test.go @@ -0,0 +1,665 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +var serverAddr string +var once sync.Once + +func echoServer(ws *Conn) { + defer ws.Close() + io.Copy(ws, ws) +} + +type Count struct { + S string + N int +} + +func countServer(ws *Conn) { + defer ws.Close() + for { + var count Count + err := JSON.Receive(ws, &count) + if err != nil { + return + } + count.N++ + count.S = strings.Repeat(count.S, count.N) + err = JSON.Send(ws, count) + if err != nil { + return + } + } +} + +type testCtrlAndDataHandler struct { + hybiFrameHandler +} + +func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) { + h.hybiFrameHandler.conn.wio.Lock() + defer h.hybiFrameHandler.conn.wio.Unlock() + w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame) + if err != nil { + return 0, err + } + n, err := w.Write(b) + w.Close() + return n, err +} + +func ctrlAndDataServer(ws *Conn) { + defer ws.Close() + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + go func() { + for i := 0; ; i++ { + var b []byte + if i%2 != 0 { // with or without payload + b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i)) + } + if _, err := h.WritePing(b); err != nil { + break + } + if _, err := h.WritePong(b); err != nil { // unsolicited pong + break + } + time.Sleep(10 * time.Millisecond) + } + }() + + b := make([]byte, 128) + for { + n, err := ws.Read(b) + if err != nil { + break + } + if _, err := ws.Write(b[:n]); err != nil { + break + } + } +} + +func subProtocolHandshake(config *Config, req *http.Request) error { + for _, proto := range config.Protocol { + if proto == "chat" { + config.Protocol = []string{proto} + return nil + } + } + return ErrBadWebSocketProtocol +} + +func subProtoServer(ws *Conn) { + for _, proto := range ws.Config().Protocol { + io.WriteString(ws, proto) + } +} + +func startServer() { + http.Handle("/echo", Handler(echoServer)) + http.Handle("/count", Handler(countServer)) + http.Handle("/ctrldata", Handler(ctrlAndDataServer)) + subproto := Server{ + Handshake: subProtocolHandshake, + Handler: Handler(subProtoServer), + } + http.Handle("/subproto", subproto) + server := httptest.NewServer(nil) + serverAddr = server.Listener.Addr().String() + log.Print("Test WebSocket server listening on ", serverAddr) +} + +func newConfig(t *testing.T, path string) *Config { + config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost") + return config +} + +func TestEcho(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var actual_msg = make([]byte, 512) + n, err := conn.Read(actual_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + actual_msg = actual_msg[0:n] + if !bytes.Equal(msg, actual_msg) { + t.Errorf("Echo: expected %q got %q", msg, actual_msg) + } + conn.Close() +} + +func TestAddr(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + ra := conn.RemoteAddr().String() + if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") { + t.Errorf("Bad remote addr: %v", ra) + } + la := conn.LocalAddr().String() + if !strings.HasPrefix(la, "http://") { + t.Errorf("Bad local addr: %v", la) + } + conn.Close() +} + +func TestCount(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/count"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + var count Count + count.S = "hello" + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 1 { + t.Errorf("count: expected %d got %d", 1, count.N) + } + if count.S != "hello" { + t.Errorf("count: expected %q got %q", "hello", count.S) + } + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 2 { + t.Errorf("count: expected %d got %d", 2, count.N) + } + if count.S != "hellohello" { + t.Errorf("count: expected %q got %q", "hellohello", count.S) + } + conn.Close() +} + +func TestWithQuery(t *testing.T) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/echo") + config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr)) + if err != nil { + t.Fatal("location url", err) + } + + ws, err := NewClient(config, client) + if err != nil { + t.Errorf("WebSocket handshake: %v", err) + return + } + ws.Close() +} + +func testWithProtocol(t *testing.T, subproto []string) (string, error) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/subproto") + config.Protocol = subproto + + ws, err := NewClient(config, client) + if err != nil { + return "", err + } + msg := make([]byte, 16) + n, err := ws.Read(msg) + if err != nil { + return "", err + } + ws.Close() + return string(msg[:n]), nil +} + +func TestWithProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithTwoProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"test", "chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithBadProtocol(t *testing.T) { + _, err := testWithProtocol(t, []string{"test"}) + if err != ErrBadStatus { + t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err) + } +} + +func TestHTTP(t *testing.T) { + once.Do(startServer) + + // If the client did not send a handshake that matches the protocol + // specification, the server MUST return an HTTP response with an + // appropriate error code (such as 400 Bad Request) + resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr)) + if err != nil { + t.Errorf("Get: error %#v", err) + return + } + if resp == nil { + t.Error("Get: resp is null") + return + } + if resp.StatusCode != http.StatusBadRequest { + t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode) + } +} + +func TestTrailingSpaces(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=955 + // The last runs of this create keys with trailing spaces that should not be + // generated by the client. + once.Do(startServer) + config := newConfig(t, "/echo") + for i := 0; i < 30; i++ { + // body + ws, err := DialConfig(config) + if err != nil { + t.Errorf("Dial #%d failed: %v", i, err) + break + } + ws.Close() + } +} + +func TestDialConfigBadVersion(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Version = 1234 + + _, err := DialConfig(config) + + if dialerr, ok := err.(*DialError); ok { + if dialerr.Err != ErrBadProtocolVersion { + t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err) + } + } +} + +func TestDialConfigWithDialer(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} + +func TestSmallBuffer(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=1145 + // Read should be able to handle reading a fragment of a frame. + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var small_msg = make([]byte, 8) + n, err := conn.Read(small_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + if !bytes.Equal(msg[:len(small_msg)], small_msg) { + t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg) + } + var second_msg = make([]byte, len(msg)) + n, err = conn.Read(second_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + second_msg = second_msg[0:n] + if !bytes.Equal(msg[len(small_msg):], second_msg) { + t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg) + } + conn.Close() +} + +var parseAuthorityTests = []struct { + in *url.URL + out string +}{ + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com", + }, + "www.google.com:443", + }, + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com:443", + }, + "www.google.com:443", + }, + // some invalid ones for parseAuthority. parseAuthority doesn't + // concern itself with the scheme unless it actually knows about it + { + &url.URL{ + Scheme: "http", + Host: "www.google.com", + }, + "www.google.com", + }, + { + &url.URL{ + Scheme: "http", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "127.0.0.1", + }, + "127.0.0.1", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "www.google.com", + }, + "www.google.com", + }, +} + +func TestParseAuthority(t *testing.T) { + for _, tt := range parseAuthorityTests { + out := parseAuthority(tt.in) + if out != tt.out { + t.Errorf("got %v; want %v", out, tt.out) + } + } +} + +type closerConn struct { + net.Conn + closed int // count of the number of times Close was called +} + +func (c *closerConn) Close() error { + c.closed++ + return c.Conn.Close() +} + +func TestClose(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/11454") + } + + once.Do(startServer) + + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + cc := closerConn{Conn: conn} + + client, err := NewClient(newConfig(t, "/echo"), &cc) + if err != nil { + t.Fatalf("WebSocket handshake: %v", err) + } + + // set the deadline to ten minutes ago, which will have expired by the time + // client.Close sends the close status frame. + conn.SetDeadline(time.Now().Add(-10 * time.Minute)) + + if err := client.Close(); err == nil { + t.Errorf("ws.Close(): expected error, got %v", err) + } + if cc.closed < 1 { + t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed) + } +} + +var originTests = []struct { + req *http.Request + origin *url.URL +}{ + { + req: &http.Request{ + Header: http.Header{ + "Origin": []string{"http://www.example.com"}, + }, + }, + origin: &url.URL{ + Scheme: "http", + Host: "www.example.com", + }, + }, + { + req: &http.Request{}, + }, +} + +func TestOrigin(t *testing.T) { + conf := newConfig(t, "/echo") + conf.Version = ProtocolVersionHybi13 + for i, tt := range originTests { + origin, err := Origin(conf, tt.req) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(origin, tt.origin) { + t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin) + continue + } + } +} + +func TestCtrlAndData(t *testing.T) { + once.Do(startServer) + + c, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal(err) + } + ws, err := NewClient(newConfig(t, "/ctrldata"), c) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + b := make([]byte, 128) + for i := 0; i < 2; i++ { + data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i)) + if _, err := ws.Write(data); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var ctrl []byte + if i%2 != 0 { // with or without payload + ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i)) + } + if _, err := h.WritePing(ctrl); err != nil { + t.Fatalf("#%d: %v", i, err) + } + n, err := ws.Read(b) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + if !bytes.Equal(b[:n], data) { + t.Fatalf("#%d: got %v; want %v", i, b[:n], data) + } + } +} + +func TestCodec_ReceiveLimited(t *testing.T) { + const limit = 2048 + var payloads [][]byte + for _, size := range []int{ + 1024, + 2048, + 4096, // receive of this message would be interrupted due to limit + 2048, // this one is to make sure next receive recovers discarding leftovers + } { + b := make([]byte, size) + rand.Read(b) + payloads = append(payloads, b) + } + handlerDone := make(chan struct{}) + limitedHandler := func(ws *Conn) { + defer close(handlerDone) + ws.MaxPayloadBytes = limit + defer ws.Close() + for i, p := range payloads { + t.Logf("payload #%d (size %d, exceeds limit: %v)", i, len(p), len(p) > limit) + var recv []byte + err := Message.Receive(ws, &recv) + switch err { + case nil: + case ErrFrameTooLarge: + if len(p) <= limit { + t.Fatalf("unexpected frame size limit: expected %d bytes of payload having limit at %d", len(p), limit) + } + continue + default: + t.Fatalf("unexpected error: %v (want either nil or ErrFrameTooLarge)", err) + } + if len(recv) > limit { + t.Fatalf("received %d bytes of payload having limit at %d", len(recv), limit) + } + if !bytes.Equal(p, recv) { + t.Fatalf("received payload differs:\ngot:\t%v\nwant:\t%v", recv, p) + } + } + } + server := httptest.NewServer(Handler(limitedHandler)) + defer server.CloseClientConnections() + defer server.Close() + addr := server.Listener.Addr().String() + ws, err := Dial("ws://"+addr+"/", "", "http://localhost/") + if err != nil { + t.Fatal(err) + } + defer ws.Close() + for i, p := range payloads { + if err := Message.Send(ws, p); err != nil { + t.Fatalf("payload #%d (size %d): %v", i, len(p), err) + } + } + <-handlerDone +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go new file mode 100644 index 0000000..bc861e1 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xsrftoken provides methods for generating and validating secure XSRF tokens. +package xsrftoken // import "golang.org/x/net/xsrftoken" + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" +) + +// Timeout is the duration for which XSRF tokens are valid. +// It is exported so clients may set cookie timeouts that match generated tokens. +const Timeout = 24 * time.Hour + +// clean sanitizes a string for inclusion in a token by replacing all ":"s. +func clean(s string) string { + return strings.Replace(s, ":", "_", -1) +} + +// Generate returns a URL-safe secure XSRF token that expires in 24 hours. +// +// key is a secret key for your application; it must be non-empty. +// userID is an optional unique identifier for the user. +// actionID is an optional action the user is taking (e.g. POSTing to a particular path). +func Generate(key, userID, actionID string) string { + return generateTokenAtTime(key, userID, actionID, time.Now()) +} + +// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now. +func generateTokenAtTime(key, userID, actionID string, now time.Time) string { + if len(key) == 0 { + panic("zero length xsrf secret key") + } + // Round time up and convert to milliseconds. + milliTime := (now.UnixNano() + 1e6 - 1) / 1e6 + + h := hmac.New(sha1.New, []byte(key)) + fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime) + + // Get the padded base64 string then removing the padding. + tok := string(h.Sum(nil)) + tok = base64.URLEncoding.EncodeToString([]byte(tok)) + tok = strings.TrimRight(tok, "=") + + return fmt.Sprintf("%s:%d", tok, milliTime) +} + +// Valid reports whether a token is a valid, unexpired token returned by Generate. +func Valid(token, key, userID, actionID string) bool { + return validTokenAtTime(token, key, userID, actionID, time.Now()) +} + +// validTokenAtTime reports whether a token is valid at the given time. +func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { + if len(key) == 0 { + panic("zero length xsrf secret key") + } + // Extract the issue time of the token. + sep := strings.LastIndex(token, ":") + if sep < 0 { + return false + } + millis, err := strconv.ParseInt(token[sep+1:], 10, 64) + if err != nil { + return false + } + issueTime := time.Unix(0, millis*1e6) + + // Check that the token is not expired. + if now.Sub(issueTime) >= Timeout { + return false + } + + // Check that the token is not from the future. + // Allow 1 minute grace period in case the token is being verified on a + // machine whose clock is behind the machine that issued the token. + if issueTime.After(now.Add(1 * time.Minute)) { + return false + } + + expected := generateTokenAtTime(key, userID, actionID, issueTime) + + // Check that the token matches the expected value. + // Use constant time comparison to avoid timing attacks. + return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go new file mode 100644 index 0000000..6c8e7d9 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xsrftoken + +import ( + "encoding/base64" + "testing" + "time" +) + +const ( + key = "quay" + userID = "12345678" + actionID = "POST /form" +) + +var ( + now = time.Now() + oneMinuteFromNow = now.Add(1 * time.Minute) +) + +func TestValidToken(t *testing.T) { + tok := generateTokenAtTime(key, userID, actionID, now) + if !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) { + t.Error("One second later: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) { + t.Error("Just before timeout: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) { + t.Error("One minute in the past: Expected token to be valid") + } +} + +// TestSeparatorReplacement tests that separators are being correctly substituted +func TestSeparatorReplacement(t *testing.T) { + tok := generateTokenAtTime("foo:bar", "baz", "wah", now) + tok2 := generateTokenAtTime("foo", "bar:baz", "wah", now) + if tok == tok2 { + t.Errorf("Expected generated tokens to be different") + } +} + +func TestInvalidToken(t *testing.T) { + invalidTokenTests := []struct { + name, key, userID, actionID string + t time.Time + }{ + {"Bad key", "foobar", userID, actionID, oneMinuteFromNow}, + {"Bad userID", key, "foobar", actionID, oneMinuteFromNow}, + {"Bad actionID", key, userID, "foobar", oneMinuteFromNow}, + {"Expired", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)}, + {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)}, + } + + tok := generateTokenAtTime(key, userID, actionID, now) + for _, itt := range invalidTokenTests { + if validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) { + t.Errorf("%v: Expected token to be invalid", itt.name) + } + } +} + +// TestValidateBadData primarily tests that no unexpected panics are triggered +// during parsing +func TestValidateBadData(t *testing.T) { + badDataTests := []struct { + name, tok string + }{ + {"Invalid Base64", "ASDab24(@)$*=="}, + {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))}, + {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))}, + {"Wrong length", "1234" + generateTokenAtTime(key, userID, actionID, now)}, + } + + for _, bdt := range badDataTests { + if validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) { + t.Errorf("%v: Expected token to be invalid", bdt.name) + } + } +} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 0000000..fa139db --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - tip + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 0000000..dfbed62 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 0000000..eb8dcee --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,77 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means it's no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + +```go +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" +) + +func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") +} +``` + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/amazon/amazon.go b/vendor/golang.org/x/oauth2/amazon/amazon.go new file mode 100644 index 0000000..d21da11 --- /dev/null +++ b/vendor/golang.org/x/oauth2/amazon/amazon.go @@ -0,0 +1,16 @@ +// Copyright 2017 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package amazon provides constants for using OAuth2 to access Amazon. +package amazon + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Amazon's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.amazon.com/ap/oa", + TokenURL: "https://api.amazon.com/auth/o2/token", +} diff --git a/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go new file mode 100644 index 0000000..44af1f1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitbucket provides constants for using OAuth2 to access Bitbucket. +package bitbucket + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Bitbucket's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://bitbucket.org/site/oauth2/authorize", + TokenURL: "https://bitbucket.org/site/oauth2/access_token", +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 0000000..c4e840d --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as the "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See https://tools.ietf.org/html/rfc6749#section-4.4 +package clientcredentials // import "golang.org/x/oauth2/clientcredentials" + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +// Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scope specifies optional requested permissions. + Scopes []string + + // EndpointParams specifies additional parameters for requests to the token endpoint. + EndpointParams url.Values +} + +// Token uses client credentials to retrieve a token. +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return c.TokenSource(ctx).Token() +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + v := url.Values{ + "grant_type": {"client_credentials"}, + } + if len(c.conf.Scopes) > 0 { + v.Set("scope", strings.Join(c.conf.Scopes, " ")) + } + for k, p := range c.conf.EndpointParams { + if _, ok := v[k]; ok { + return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k) + } + v[k] = p + } + tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*oauth2.RetrieveError)(rErr) + } + return nil, err + } + t := &oauth2.Token{ + AccessToken: tk.AccessToken, + TokenType: tk.TokenType, + RefreshToken: tk.RefreshToken, + Expiry: tk.Expiry, + } + return t.WithExtra(tk.Raw), nil +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go new file mode 100644 index 0000000..108520c --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package clientcredentials + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func newConf(serverURL string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + Scopes: []string{"scope1", "scope2"}, + TokenURL: serverURL + "/token", + EndpointParams: url.Values{"audience": {"audience1"}}, + } +} + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +func TestTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token") + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want { + t.Errorf("Content-Type header = %q; want %q", got, want) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + r.Body.Close() + } + if err != nil { + t.Errorf("failed reading request body: %s.", err) + } + if string(body) != "audience=audience1&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("payload = %q; want %q", string(body), "grant_type=client_credentials&scope=scope1+scope2") + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Token(context.Background()) + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("token invalid. got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c") + } + if tok.TokenType != "bearer" { + t.Errorf("token type = %q; want %q", tok.TokenType, "bearer") + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "audience=audience1&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(context.Background()) + c.Get(ts.URL + "/somethingelse") +} diff --git a/vendor/golang.org/x/oauth2/example_test.go b/vendor/golang.org/x/oauth2/example_test.go new file mode 100644 index 0000000..fc2f793 --- /dev/null +++ b/vendor/golang.org/x/oauth2/example_test.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2_test + +import ( + "context" + "fmt" + "log" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +func ExampleConfig() { + ctx := context.Background() + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Scopes: []string{"SCOPE1", "SCOPE2"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://provider.com/o/oauth2/auth", + TokenURL: "https://provider.com/o/oauth2/token", + }, + } + + // Redirect user to consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Use the authorization code that is pushed to the redirect + // URL. Exchange will do the handshake to retrieve the + // initial access token. The HTTP Client returned by + // conf.Client will refresh the token as necessary. + var code string + if _, err := fmt.Scan(&code); err != nil { + log.Fatal(err) + } + tok, err := conf.Exchange(ctx, code) + if err != nil { + log.Fatal(err) + } + + client := conf.Client(ctx, tok) + client.Get("...") +} + +func ExampleConfig_customHTTP() { + ctx := context.Background() + + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Scopes: []string{"SCOPE1", "SCOPE2"}, + Endpoint: oauth2.Endpoint{ + TokenURL: "https://provider.com/o/oauth2/token", + AuthURL: "https://provider.com/o/oauth2/auth", + }, + } + + // Redirect user to consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Use the authorization code that is pushed to the redirect + // URL. Exchange will do the handshake to retrieve the + // initial access token. The HTTP Client returned by + // conf.Client will refresh the token as necessary. + var code string + if _, err := fmt.Scan(&code); err != nil { + log.Fatal(err) + } + + // Use the custom HTTP client when requesting a token. + httpClient := &http.Client{Timeout: 2 * time.Second} + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + + tok, err := conf.Exchange(ctx, code) + if err != nil { + log.Fatal(err) + } + + client := conf.Client(ctx, tok) + _ = client +} diff --git a/vendor/golang.org/x/oauth2/facebook/facebook.go b/vendor/golang.org/x/oauth2/facebook/facebook.go new file mode 100644 index 0000000..14c801a --- /dev/null +++ b/vendor/golang.org/x/oauth2/facebook/facebook.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facebook provides constants for using OAuth2 to access Facebook. +package facebook // import "golang.org/x/oauth2/facebook" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Facebook's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.facebook.com/dialog/oauth", + TokenURL: "https://graph.facebook.com/oauth/access_token", +} diff --git a/vendor/golang.org/x/oauth2/fitbit/fitbit.go b/vendor/golang.org/x/oauth2/fitbit/fitbit.go new file mode 100644 index 0000000..b31b82a --- /dev/null +++ b/vendor/golang.org/x/oauth2/fitbit/fitbit.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fitbit provides constants for using OAuth2 to access the Fitbit API. +package fitbit // import "golang.org/x/oauth2/fitbit" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is the Fitbit API's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.fitbit.com/oauth2/authorize", + TokenURL: "https://api.fitbit.com/oauth2/token", +} diff --git a/vendor/golang.org/x/oauth2/foursquare/foursquare.go b/vendor/golang.org/x/oauth2/foursquare/foursquare.go new file mode 100644 index 0000000..d2fa099 --- /dev/null +++ b/vendor/golang.org/x/oauth2/foursquare/foursquare.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package foursquare provides constants for using OAuth2 to access Foursquare. +package foursquare // import "golang.org/x/oauth2/foursquare" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Foursquare's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://foursquare.com/oauth2/authorize", + TokenURL: "https://foursquare.com/oauth2/access_token", +} diff --git a/vendor/golang.org/x/oauth2/github/github.go b/vendor/golang.org/x/oauth2/github/github.go new file mode 100644 index 0000000..f297801 --- /dev/null +++ b/vendor/golang.org/x/oauth2/github/github.go @@ -0,0 +1,16 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package github provides constants for using OAuth2 to access Github. +package github // import "golang.org/x/oauth2/github" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Github's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://github.com/login/oauth/authorize", + TokenURL: "https://github.com/login/oauth/access_token", +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 0000000..50d918b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex. +var appengineFlex bool + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 0000000..56669ea --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go new file mode 100644 index 0000000..5d0231a --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +func init() { + appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server. +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 0000000..a316074 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// Common implementation for FindDefaultCredentials. +func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCredentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scopes) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil && !appengineFlex { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scopes...), + }, nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource(""), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +// Common implementation for CredentialsFromJSON. +func credentialsFromJSON(ctx context.Context, jsonData []byte, scopes []string) (*DefaultCredentials, error) { + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + }, nil +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSON(ctx, b, scopes...) +} diff --git a/vendor/golang.org/x/oauth2/google/doc_go19.go b/vendor/golang.org/x/oauth2/google/doc_go19.go new file mode 100644 index 0000000..2a86325 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc_go19.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/doc_not_go19.go b/vendor/golang.org/x/oauth2/google/doc_not_go19.go new file mode 100644 index 0000000..5c3c6e1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc_not_go19.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The DefaultCredentials type represents Google Application Default Credentials, as +// well as other forms of credential. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON +// formats described in OAuth2 Configs, above. (The DefaultCredentials returned may +// not be "Application Default Credentials".) The TokenSource in the returned value +// is the same as the one obtained from the oauth2.Config returned from +// ConfigFromJSON or JWTConfigFromJSON, but the DefaultCredentials may contain +// additional information that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/example_test.go b/vendor/golang.org/x/oauth2/google/example_test.go new file mode 100644 index 0000000..643f507 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/example_test.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google_test + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/appengine" + "google.golang.org/appengine/urlfetch" +) + +func ExampleDefaultClient() { + client, err := google.DefaultClient(oauth2.NoContext, + "https://www.googleapis.com/auth/devstorage.full_control") + if err != nil { + log.Fatal(err) + } + client.Get("...") +} + +func Example_webServer() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + RedirectURL: "YOUR_REDIRECT_URL", + Scopes: []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/blogger", + }, + Endpoint: google.Endpoint, + } + // Redirect user to Google's consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state") + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Handle the exchange code to initiate a transport. + tok, err := conf.Exchange(oauth2.NoContext, "authorization-code") + if err != nil { + log.Fatal(err) + } + client := conf.Client(oauth2.NoContext, tok) + client.Get("...") +} + +func ExampleJWTConfigFromJSON() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + // Navigate to your project, then see the "Credentials" page + // under "APIs & Auth". + // To create a service account client, click "Create new Client ID", + // select "Service Account", and click "Create Client ID". A JSON + // key file will then be downloaded to your computer. + data, err := ioutil.ReadFile("/path/to/your-project-key.json") + if err != nil { + log.Fatal(err) + } + conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of + // your service account. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func ExampleSDKConfig() { + // The credentials will be obtained from the first account that + // has been authorized with `gcloud auth login`. + conf, err := google.NewSDKConfig("") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of the SDK user. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func Example_serviceAccount() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + conf := &jwt.Config{ + Email: "xxx@developer.gserviceaccount.com", + // The contents of your RSA private key or your PEM file + // that contains a private key. + // If you have a p12 file instead, you + // can use `openssl` to export the private key into a pem file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // The field only supports PEM containers with no passphrase. + // The openssl command will convert p12 keys to passphrase-less PEM containers. + PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), + Scopes: []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/blogger", + }, + TokenURL: google.JWTTokenURL, + // If you would like to impersonate a user, you can + // create a transport with a subject. The following GET + // request will be made on the behalf of user@example.com. + // Optional. + Subject: "user@example.com", + } + // Initiate an http.Client, the following GET request will be + // authorized and authenticated on the behalf of user@example.com. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func ExampleAppEngineTokenSource() { + var req *http.Request // from the ServeHTTP handler + ctx := appengine.NewContext(req) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"), + Base: &urlfetch.Transport{ + Context: ctx, + }, + }, + } + client.Get("...") +} + +func ExampleComputeTokenSource() { + client := &http.Client{ + Transport: &oauth2.Transport{ + // Fetch from Google Compute Engine's metadata server to retrieve + // an access token for the provided account. + // If no account is specified, "default" is used. + Source: google.ComputeTokenSource(""), + }, + } + client.Get("...") +} + +func ExampleCredentialsFromJSON() { + ctx := context.Background() + data, err := ioutil.ReadFile("/path/to/key-file.json") + if err != nil { + log.Fatal(err) + } + creds, err := google.CredentialsFromJSON(ctx, data, "https://www.googleapis.com/auth/bigquery") + if err != nil { + log.Fatal(err) + } + _ = creds // TODO: Use creds. +} diff --git a/vendor/golang.org/x/oauth2/google/go19.go b/vendor/golang.org/x/oauth2/google/go19.go new file mode 100644 index 0000000..4d0318b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/go19.go @@ -0,0 +1,57 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package google + +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + return findDefaultCredentials(ctx, scopes) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + return credentialsFromJSON(ctx, jsonData, scopes) +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 0000000..f7481fb --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,192 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/google_test.go b/vendor/golang.org/x/oauth2/google/google_test.go new file mode 100644 index 0000000..287c699 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google_test.go @@ -0,0 +1,116 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "strings" + "testing" +) + +var webJSONKey = []byte(` +{ + "web": { + "auth_uri": "https://google.com/o/oauth2/auth", + "client_secret": "3Oknc4jS_wA2r9i", + "token_uri": "https://google.com/o/oauth2/token", + "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "redirect_uris": ["https://www.example.com/oauth2callback"], + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "javascript_origins": ["https://www.example.com"] + } +}`) + +var installedJSONKey = []byte(`{ + "installed": { + "client_id": "222-installed.apps.googleusercontent.com", + "redirect_uris": ["https://www.example.com/oauth2callback"] + } +}`) + +var jwtJSONKey = []byte(`{ + "private_key_id": "268f54e43a1af97cfc71731688434f45aca15c8b", + "private_key": "super secret key", + "client_email": "gopher@developer.gserviceaccount.com", + "client_id": "gopher.apps.googleusercontent.com", + "token_uri": "https://accounts.google.com/o/gophers/token", + "type": "service_account" +}`) + +var jwtJSONKeyNoTokenURL = []byte(`{ + "private_key_id": "268f54e43a1af97cfc71731688434f45aca15c8b", + "private_key": "super secret key", + "client_email": "gopher@developer.gserviceaccount.com", + "client_id": "gopher.apps.googleusercontent.com", + "type": "service_account" +}`) + +func TestConfigFromJSON(t *testing.T) { + conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2") + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } + if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want { + t.Errorf("ClientSecret = %q; want %q", got, want) + } + if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want { + t.Errorf("RedictURL = %q; want %q", got, want) + } + if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want { + t.Errorf("Scopes = %q; want %q", got, want) + } + if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want { + t.Errorf("AuthURL = %q; want %q", got, want) + } + if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} + +func TestConfigFromJSON_Installed(t *testing.T) { + conf, err := ConfigFromJSON(installedJSONKey) + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } +} + +func TestJWTConfigFromJSON(t *testing.T) { + conf, err := JWTConfigFromJSON(jwtJSONKey, "scope1", "scope2") + if err != nil { + t.Fatal(err) + } + if got, want := conf.Email, "gopher@developer.gserviceaccount.com"; got != want { + t.Errorf("Email = %q, want %q", got, want) + } + if got, want := string(conf.PrivateKey), "super secret key"; got != want { + t.Errorf("PrivateKey = %q, want %q", got, want) + } + if got, want := conf.PrivateKeyID, "268f54e43a1af97cfc71731688434f45aca15c8b"; got != want { + t.Errorf("PrivateKeyID = %q, want %q", got, want) + } + if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want { + t.Errorf("Scopes = %q; want %q", got, want) + } + if got, want := conf.TokenURL, "https://accounts.google.com/o/gophers/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} + +func TestJWTConfigFromJSONNoTokenURL(t *testing.T) { + conf, err := JWTConfigFromJSON(jwtJSONKeyNoTokenURL, "scope1", "scope2") + if err != nil { + t.Fatal(err) + } + if got, want := conf.TokenURL, "https://accounts.google.com/o/oauth2/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 0000000..b0fdb3a --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt_test.go b/vendor/golang.org/x/oauth2/google/jwt_test.go new file mode 100644 index 0000000..f844436 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt_test.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "strings" + "testing" + "time" + + "golang.org/x/oauth2/jws" +) + +func TestJWTAccessTokenSourceFromJSON(t *testing.T) { + // Generate a key we can use in the test data. + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + // Encode the key and substitute into our example JSON. + enc := pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + }) + enc, err = json.Marshal(string(enc)) + if err != nil { + t.Fatalf("json.Marshal: %v", err) + } + jsonKey := bytes.Replace(jwtJSONKey, []byte(`"super secret key"`), enc, 1) + + ts, err := JWTAccessTokenSourceFromJSON(jsonKey, "audience") + if err != nil { + t.Fatalf("JWTAccessTokenSourceFromJSON: %v\nJSON: %s", err, string(jsonKey)) + } + + tok, err := ts.Token() + if err != nil { + t.Fatalf("Token: %v", err) + } + + if got, want := tok.TokenType, "Bearer"; got != want { + t.Errorf("TokenType = %q, want %q", got, want) + } + if got := tok.Expiry; tok.Expiry.Before(time.Now()) { + t.Errorf("Expiry = %v, should not be expired", got) + } + + err = jws.Verify(tok.AccessToken, &privateKey.PublicKey) + if err != nil { + t.Errorf("jws.Verify on AccessToken: %v", err) + } + + claim, err := jws.Decode(tok.AccessToken) + if err != nil { + t.Fatalf("jws.Decode on AccessToken: %v", err) + } + + if got, want := claim.Iss, "gopher@developer.gserviceaccount.com"; got != want { + t.Errorf("Iss = %q, want %q", got, want) + } + if got, want := claim.Sub, "gopher@developer.gserviceaccount.com"; got != want { + t.Errorf("Sub = %q, want %q", got, want) + } + if got, want := claim.Aud, "audience"; got != want { + t.Errorf("Aud = %q, want %q", got, want) + } + + // Finally, check the header private key. + parts := strings.Split(tok.AccessToken, ".") + hdrJSON, err := base64.RawURLEncoding.DecodeString(parts[0]) + if err != nil { + t.Fatalf("base64 DecodeString: %v\nString: %q", err, parts[0]) + } + var hdr jws.Header + if err := json.Unmarshal([]byte(hdrJSON), &hdr); err != nil { + t.Fatalf("json.Unmarshal: %v (%q)", err, hdrJSON) + } + + if got, want := hdr.KeyID, "268f54e43a1af97cfc71731688434f45aca15c8b"; got != want { + t.Errorf("Header KeyID = %q, want %q", got, want) + } +} diff --git a/vendor/golang.org/x/oauth2/google/not_go19.go b/vendor/golang.org/x/oauth2/google/not_go19.go new file mode 100644 index 0000000..544e406 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/not_go19.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package google + +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// DefaultCredentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type DefaultCredentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*DefaultCredentials, error) { + return findDefaultCredentials(ctx, scopes) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +// +// Note: despite the name, the returned credentials may not be Application Default Credentials. +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*DefaultCredentials, error) { + return credentialsFromJSON(ctx, jsonData, scopes) +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 0000000..b9660ca --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/google/sdk_test.go b/vendor/golang.org/x/oauth2/google/sdk_test.go new file mode 100644 index 0000000..52b8eca --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk_test.go @@ -0,0 +1,107 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "reflect" + "strings" + "testing" +) + +func TestSDKConfig(t *testing.T) { + sdkConfigPath = func() (string, error) { + return "testdata/gcloud", nil + } + + tests := []struct { + account string + accessToken string + err bool + }{ + {"", "bar_access_token", false}, + {"foo@example.com", "foo_access_token", false}, + {"bar@example.com", "bar_access_token", false}, + {"baz@serviceaccount.example.com", "", true}, + } + for _, tt := range tests { + c, err := NewSDKConfig(tt.account) + if got, want := err != nil, tt.err; got != want { + if !tt.err { + t.Errorf("got %v, want nil", err) + } else { + t.Errorf("got nil, want error") + } + continue + } + if err != nil { + continue + } + tok := c.initialToken + if tok == nil { + t.Errorf("got nil, want %q", tt.accessToken) + continue + } + if tok.AccessToken != tt.accessToken { + t.Errorf("got %q, want %q", tok.AccessToken, tt.accessToken) + } + } +} + +func TestParseINI(t *testing.T) { + tests := []struct { + ini string + want map[string]map[string]string + }{ + { + `root = toor +[foo] +bar = hop +ini = nin +`, + map[string]map[string]string{ + "": {"root": "toor"}, + "foo": {"bar": "hop", "ini": "nin"}, + }, + }, + { + "\t extra \t = whitespace \t\r\n \t [everywhere] \t \r\n here \t = \t there \t \r\n", + map[string]map[string]string{ + "": {"extra": "whitespace"}, + "everywhere": {"here": "there"}, + }, + }, + { + `[empty] +[section] +empty= +`, + map[string]map[string]string{ + "": {}, + "empty": {}, + "section": {"empty": ""}, + }, + }, + { + `ignore +[invalid +=stuff +;comment=true +`, + map[string]map[string]string{ + "": {}, + }, + }, + } + for _, tt := range tests { + result, err := parseINI(strings.NewReader(tt.ini)) + if err != nil { + t.Errorf("parseINI(%q) error %v, want: no error", tt.ini, err) + continue + } + if !reflect.DeepEqual(result, tt.want) { + t.Errorf("parseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want) + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials b/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials new file mode 100644 index 0000000..ff5eefb --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials @@ -0,0 +1,122 @@ +{ + "data": [ + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "foo_access_token", + "client_id": "foo_client_id", + "client_secret": "foo_client_secret", + "id_token": { + "at_hash": "foo_at_hash", + "aud": "foo_aud", + "azp": "foo_azp", + "cid": "foo_cid", + "email": "foo@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "foo_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "foo_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "foo_access_token", + "expires_in": 3600, + "id_token": "foo_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "foo@example.com", + "clientId": "foo_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "bar_access_token", + "client_id": "bar_client_id", + "client_secret": "bar_client_secret", + "id_token": { + "at_hash": "bar_at_hash", + "aud": "bar_aud", + "azp": "bar_azp", + "cid": "bar_cid", + "email": "bar@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "bar_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "bar_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "bar_access_token", + "expires_in": 3600, + "id_token": "bar_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "bar@example.com", + "clientId": "bar_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "ServiceAccountCredentials", + "_kwargs": {}, + "_module": "oauth2client.client", + "_private_key_id": "00000000000000000000000000000000", + "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n", + "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "_service_account_email": "baz@serviceaccount.example.com", + "_service_account_id": "baz.serviceaccount.example.com", + "_token_uri": "https://accounts.google.com/o/oauth2/token", + "_user_agent": "Cloud SDK Command Line Tool", + "access_token": null, + "assertion_type": null, + "client_id": null, + "client_secret": null, + "id_token": null, + "invalid": false, + "refresh_token": null, + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "service_account_name": "baz@serviceaccount.example.com", + "token_expiry": null, + "token_response": null, + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "baz@serviceaccount.example.com", + "clientId": "baz_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + } + ], + "file_version": 1 +} diff --git a/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties b/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties new file mode 100644 index 0000000..025de88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties @@ -0,0 +1,2 @@ +[core] +account = bar@example.com \ No newline at end of file diff --git a/vendor/golang.org/x/oauth2/heroku/heroku.go b/vendor/golang.org/x/oauth2/heroku/heroku.go new file mode 100644 index 0000000..5b4fdb8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/heroku/heroku.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package heroku provides constants for using OAuth2 to access Heroku. +package heroku // import "golang.org/x/oauth2/heroku" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Heroku's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://id.heroku.com/oauth/authorize", + TokenURL: "https://id.heroku.com/oauth/token", +} diff --git a/vendor/golang.org/x/oauth2/hipchat/hipchat.go b/vendor/golang.org/x/oauth2/hipchat/hipchat.go new file mode 100644 index 0000000..594fe07 --- /dev/null +++ b/vendor/golang.org/x/oauth2/hipchat/hipchat.go @@ -0,0 +1,60 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hipchat provides constants for using OAuth2 to access HipChat. +package hipchat // import "golang.org/x/oauth2/hipchat" + +import ( + "encoding/json" + "errors" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +// Endpoint is HipChat's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.hipchat.com/users/authorize", + TokenURL: "https://api.hipchat.com/v2/oauth/token", +} + +// ServerEndpoint returns a new oauth2.Endpoint for a HipChat Server instance +// running on the given domain or host. +func ServerEndpoint(host string) oauth2.Endpoint { + return oauth2.Endpoint{ + AuthURL: "https://" + host + "/users/authorize", + TokenURL: "https://" + host + "/v2/oauth/token", + } +} + +// ClientCredentialsConfigFromCaps generates a Config from a HipChat API +// capabilities descriptor. It does not verify the scopes against the +// capabilities document at this time. +// +// For more information see: https://www.hipchat.com/docs/apiv2/method/get_capabilities +func ClientCredentialsConfigFromCaps(capsJSON []byte, clientID, clientSecret string, scopes ...string) (*clientcredentials.Config, error) { + var caps struct { + Caps struct { + Endpoint struct { + TokenURL string `json:"tokenUrl"` + } `json:"oauth2Provider"` + } `json:"capabilities"` + } + + if err := json.Unmarshal(capsJSON, &caps); err != nil { + return nil, err + } + + // Verify required fields. + if caps.Caps.Endpoint.TokenURL == "" { + return nil, errors.New("oauth2/hipchat: missing OAuth2 token URL in the capabilities descriptor JSON") + } + + return &clientcredentials.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + Scopes: scopes, + TokenURL: caps.Caps.Endpoint.TokenURL, + }, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go new file mode 100644 index 0000000..7434871 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import "google.golang.org/appengine/urlfetch" + +func init() { + appengineClientHook = urlfetch.Client +} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 0000000..03265e8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 0000000..fc63fca --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 0000000..ce3f27e --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,266 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://login.mailchimp.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://login.windows.net", + "https://login.live.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", + "https://sandbox.codeswholesale.com/oauth/token", + "https://api.sipgate.com/v1/authorization/oauth", + "https://api.medium.com/v1/tokens", + "https://log.finalsurge.com/oauth/token", +} + +// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. +var brokenAuthHeaderDomains = []string{ + ".auth0.com", + ".force.com", + ".myshopify.com", + ".okta.com", + ".oktapreview.com", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + if u, err := url.Parse(tokenURL); err == nil { + for _, s := range brokenAuthHeaderDomains { + if strings.HasSuffix(u.Host, s) { + return false + } + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth { + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) + } + r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + if token.AccessToken == "" { + return token, errors.New("oauth2: server response missing access_token") + } + return token, nil +} + +type RetrieveError struct { + Response *http.Response + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/internal/token_test.go b/vendor/golang.org/x/oauth2/internal/token_test.go new file mode 100644 index 0000000..7b52e51 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token_test.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "golang.org/x/net/context" +) + +func TestRegisterBrokenAuthHeaderProvider(t *testing.T) { + RegisterBrokenAuthHeaderProvider("https://aaa.com/") + tokenURL := "https://aaa.com/token" + if providerAuthHeaderWorks(tokenURL) { + t.Errorf("got %q as unbroken; want broken", tokenURL) + } +} + +func TestRetrieveTokenBustedNoSecret(t *testing.T) { + const clientID = "client-id" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.FormValue("client_id"), clientID; got != want { + t.Errorf("client_id = %q; want %q", got, want) + } + if got, want := r.FormValue("client_secret"), ""; got != want { + t.Errorf("client_secret = %q; want empty", got) + } + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"access_token": "ACCESS_TOKEN", "token_type": "bearer"}`) + })) + defer ts.Close() + + RegisterBrokenAuthHeaderProvider(ts.URL) + _, err := RetrieveToken(context.Background(), clientID, "", ts.URL, url.Values{}) + if err != nil { + t.Errorf("RetrieveToken = %v; want no error", err) + } +} + +func Test_providerAuthHeaderWorks(t *testing.T) { + for _, p := range brokenAuthHeaderProviders { + if providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } + p := fmt.Sprintf("%ssomesuffix", p) + if providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } + } + p := "https://api.not-in-the-list-example.com/" + if !providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } +} + +func TestProviderAuthHeaderWorksDomain(t *testing.T) { + tests := []struct { + tokenURL string + wantWorks bool + }{ + {"https://dev-12345.okta.com/token-url", false}, + {"https://dev-12345.oktapreview.com/token-url", false}, + {"https://dev-12345.okta.org/token-url", true}, + {"https://foo.bar.force.com/token-url", false}, + {"https://foo.force.com/token-url", false}, + {"https://force.com/token-url", true}, + } + + for _, test := range tests { + got := providerAuthHeaderWorks(test.tokenURL) + if got != test.wantWorks { + t.Errorf("providerAuthHeaderWorks(%q) = %v; want %v", test.tokenURL, got, test.wantWorks) + } + } +} + +func TestRetrieveTokenWithContexts(t *testing.T) { + const clientID = "client-id" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"access_token": "ACCESS_TOKEN", "token_type": "bearer"}`) + })) + defer ts.Close() + + _, err := RetrieveToken(context.Background(), clientID, "", ts.URL, url.Values{}) + if err != nil { + t.Errorf("RetrieveToken (with background context) = %v; want no error", err) + } + + retrieved := make(chan struct{}) + cancellingts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-retrieved + })) + defer cancellingts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + _, err = RetrieveToken(ctx, clientID, "", cancellingts.URL, url.Values{}) + close(retrieved) + if err == nil { + t.Errorf("RetrieveToken (with cancelled context) = nil; want error") + } +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 0000000..d16f9ae --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,34 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +var appengineClientHook func(context.Context) *http.Client + +func ContextClient(ctx context.Context) *http.Client { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc + } + } + if appengineClientHook != nil { + return appengineClientHook(ctx) + } + return http.DefaultClient +} diff --git a/vendor/golang.org/x/oauth2/jira/jira.go b/vendor/golang.org/x/oauth2/jira/jira.go new file mode 100644 index 0000000..e8021e9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jira/jira.go @@ -0,0 +1,167 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jira provides claims and JWT signing for OAuth2 to access JIRA/Confluence. +package jira + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" +) + +// ClaimSet contains information about the JWT signature according +// to Atlassian's documentation +// https://developer.atlassian.com/cloud/jira/software/oauth-2-jwt-bearer-token-authorization-grant-type/ +type ClaimSet struct { + Issuer string `json:"iss"` + Subject string `json:"sub"` + InstalledURL string `json:"tnt"` // URL of installed app + AuthURL string `json:"aud"` // URL of auth server + ExpiresIn int64 `json:"exp"` // Must be no later that 60 seconds in the future + IssuedAt int64 `json:"iat"` +} + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = map[string]string{ + "typ": "JWT", + "alg": "HS256", + } +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // BaseURL for your app + BaseURL string + + // Subject is the userkey as defined by Atlassian + // Different than username (ex: /rest/api/2/user?username=alex) + Subject string + + oauth2.Config +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + exp := time.Duration(59) * time.Second + claimSet := &ClaimSet{ + Issuer: fmt.Sprintf("urn:atlassian:connect:clientid:%s", js.conf.ClientID), + Subject: fmt.Sprintf("urn:atlassian:connect:userkey:%s", js.conf.Subject), + InstalledURL: js.conf.BaseURL, + AuthURL: js.conf.Endpoint.AuthURL, + IssuedAt: time.Now().Unix(), + ExpiresIn: time.Now().Add(exp).Unix(), + } + + v := url.Values{} + v.Set("grant_type", defaultGrantType) + + // Add scopes if they exist; If not, it defaults to app scopes + if scopes := js.conf.Scopes; scopes != nil { + upperScopes := make([]string, len(scopes)) + for _, k := range scopes { + upperScopes = append(upperScopes, strings.ToUpper(k)) + } + v.Set("scope", strings.Join(upperScopes, "+")) + } + + // Sign claims for assertion + assertion, err := sign(js.conf.ClientSecret, claimSet) + if err != nil { + return nil, err + } + v.Set("assertion", string(assertion)) + + // Fetch access token from auth server + hc := oauth2.NewClient(js.ctx, nil) + resp, err := hc.PostForm(js.conf.Endpoint.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + return token, nil +} + +// Sign the claim set with the shared secret +// Result to be sent as assertion +func sign(key string, claims *ClaimSet) (string, error) { + b, err := json.Marshal(defaultHeader) + if err != nil { + return "", err + } + header := base64.RawURLEncoding.EncodeToString(b) + + jsonClaims, err := json.Marshal(claims) + if err != nil { + return "", err + } + encodedClaims := strings.TrimRight(base64.URLEncoding.EncodeToString(jsonClaims), "=") + + ss := fmt.Sprintf("%s.%s", header, encodedClaims) + + mac := hmac.New(sha256.New, []byte(key)) + mac.Write([]byte(ss)) + signature := mac.Sum(nil) + + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(signature)), nil +} diff --git a/vendor/golang.org/x/oauth2/jira/jira_test.go b/vendor/golang.org/x/oauth2/jira/jira_test.go new file mode 100644 index 0000000..c49940d --- /dev/null +++ b/vendor/golang.org/x/oauth2/jira/jira_test.go @@ -0,0 +1,185 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jira + +import ( + "context" + "encoding/base64" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jws" +) + +func TestJWTFetch_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{ + "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", + "token_type": "Bearer", + "expires_in": 3600 + }`)) + })) + defer ts.Close() + + conf := &Config{ + BaseURL: "https://my.app.com", + Subject: "userkey", + Config: oauth2.Config{ + ClientID: "super_secret_client_id", + ClientSecret: "super_shared_secret", + Scopes: []string{"read", "write"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://example.com", + TokenURL: ts.URL, + }, + }, + } + + tok, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatal(err) + } + if !tok.Valid() { + t.Errorf("got invalid token: %v", tok) + } + if got, want := tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c"; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + if got, want := tok.TokenType, "Bearer"; got != want { + t.Errorf("token type = %q; want %q", got, want) + } + if got := tok.Expiry.IsZero(); got { + t.Errorf("token expiry = %v, want none", got) + } +} + +func TestJWTFetch_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"token_type": "Bearer"}`)) + })) + defer ts.Close() + + conf := &Config{ + BaseURL: "https://my.app.com", + Subject: "userkey", + Config: oauth2.Config{ + ClientID: "super_secret_client_id", + ClientSecret: "super_shared_secret", + Scopes: []string{"read", "write"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://example.com", + TokenURL: ts.URL, + }, + }, + } + + tok, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatal(err) + } + if tok == nil { + t.Fatalf("got nil token; want token") + } + if tok.Valid() { + t.Errorf("got invalid token: %v", tok) + } + if got, want := tok.AccessToken, ""; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + if got, want := tok.TokenType, "Bearer"; got != want { + t.Errorf("token type = %q; want %q", got, want) + } +} + +func TestJWTFetch_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "token_type": "Bearer"}`)) + })) + defer ts.Close() + + conf := &Config{ + BaseURL: "https://my.app.com", + Subject: "userkey", + Config: oauth2.Config{ + ClientID: "super_secret_client_id", + ClientSecret: "super_shared_secret", + Endpoint: oauth2.Endpoint{ + AuthURL: "https://example.com", + TokenURL: ts.URL, + }, + }, + } + + tok, err := conf.TokenSource(context.Background()).Token() + if err == nil { + t.Error("got a token; expected error") + if got, want := tok.AccessToken, ""; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + } +} + +func TestJWTFetch_Assertion(t *testing.T) { + var assertion string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + assertion = r.Form.Get("assertion") + + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{ + "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", + "token_type": "Bearer", + "expires_in": 3600 + }`)) + })) + defer ts.Close() + + conf := &Config{ + BaseURL: "https://my.app.com", + Subject: "userkey", + Config: oauth2.Config{ + ClientID: "super_secret_client_id", + ClientSecret: "super_shared_secret", + Endpoint: oauth2.Endpoint{ + AuthURL: "https://example.com", + TokenURL: ts.URL, + }, + }, + } + + _, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatalf("Failed to fetch token: %v", err) + } + + parts := strings.Split(assertion, ".") + if len(parts) != 3 { + t.Fatalf("assertion = %q; want 3 parts", assertion) + } + gotjson, err := base64.RawURLEncoding.DecodeString(parts[0]) + if err != nil { + t.Fatalf("invalid token header; err = %v", err) + } + + got := jws.Header{} + if err := json.Unmarshal(gotjson, &got); err != nil { + t.Errorf("failed to unmarshal json token header = %q; err = %v", gotjson, err) + } + + want := jws.Header{ + Algorithm: "HS256", + Typ: "JWT", + } + if got != want { + t.Errorf("access token header = %q; want %q", got, want) + } +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 0000000..683d2d2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jws/jws_test.go b/vendor/golang.org/x/oauth2/jws/jws_test.go new file mode 100644 index 0000000..39a136a --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jws + +import ( + "crypto/rand" + "crypto/rsa" + "testing" +) + +func TestSignAndVerify(t *testing.T) { + header := &Header{ + Algorithm: "RS256", + Typ: "JWT", + } + payload := &ClaimSet{ + Iss: "http://google.com/", + Aud: "", + Exp: 3610, + Iat: 10, + } + + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + token, err := Encode(header, payload, privateKey) + if err != nil { + t.Fatal(err) + } + + err = Verify(token, &privateKey.PublicKey) + if err != nil { + t.Fatal(err) + } +} + +func TestVerifyFailsOnMalformedClaim(t *testing.T) { + err := Verify("abc.def", nil) + if err == nil { + t.Error("got no errors; want improperly formed JWT not to be verified") + } +} diff --git a/vendor/golang.org/x/oauth2/jwt/example_test.go b/vendor/golang.org/x/oauth2/jwt/example_test.go new file mode 100644 index 0000000..58503d8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/example_test.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jwt_test + +import ( + "context" + + "golang.org/x/oauth2/jwt" +) + +func ExampleJWTConfig() { + ctx := context.Background() + conf := &jwt.Config{ + Email: "xxx@developer.com", + // The contents of your RSA private key or your PEM file + // that contains a private key. + // If you have a p12 file instead, you + // can use `openssl` to export the private key into a pem file. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + // It only supports PEM containers with no passphrase. + PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), + Subject: "user@example.com", + TokenURL: "https://provider.com/o/oauth2/token", + } + // Initiate an http.Client, the following GET request will be + // authorized and authenticated on the behalf of user@example.com. + client := conf.Client(ctx) + client.Get("...") +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 0000000..e08f315 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt_test.go b/vendor/golang.org/x/oauth2/jwt/jwt_test.go new file mode 100644 index 0000000..1fbb9aa --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt_test.go @@ -0,0 +1,221 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jwt + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jws" +) + +var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY-----`) + +func TestJWTFetch_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{ + "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", + "scope": "user", + "token_type": "bearer", + "expires_in": 3600 + }`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatal(err) + } + if !tok.Valid() { + t.Errorf("got invalid token: %v", tok) + } + if got, want := tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c"; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + if got, want := tok.TokenType, "bearer"; got != want { + t.Errorf("token type = %q; want %q", got, want) + } + if got := tok.Expiry.IsZero(); got { + t.Errorf("token expiry = %v, want none", got) + } + scope := tok.Extra("scope") + if got, want := scope, "user"; got != want { + t.Errorf("scope = %q; want %q", got, want) + } +} + +func TestJWTFetch_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatal(err) + } + if tok == nil { + t.Fatalf("got nil token; want token") + } + if tok.Valid() { + t.Errorf("got invalid token: %v", tok) + } + if got, want := tok.AccessToken, ""; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + if got, want := tok.TokenType, "bearer"; got != want { + t.Errorf("token type = %q; want %q", got, want) + } + scope := tok.Extra("scope") + if got, want := scope, "user"; got != want { + t.Errorf("token scope = %q; want %q", got, want) + } +} + +func TestJWTFetch_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(context.Background()).Token() + if err == nil { + t.Error("got a token; expected error") + if got, want := tok.AccessToken, ""; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + } +} + +func TestJWTFetch_Assertion(t *testing.T) { + var assertion string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + assertion = r.Form.Get("assertion") + + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{ + "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", + "scope": "user", + "token_type": "bearer", + "expires_in": 3600 + }`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + PrivateKeyID: "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + TokenURL: ts.URL, + } + + _, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatalf("Failed to fetch token: %v", err) + } + + parts := strings.Split(assertion, ".") + if len(parts) != 3 { + t.Fatalf("assertion = %q; want 3 parts", assertion) + } + gotjson, err := base64.RawURLEncoding.DecodeString(parts[0]) + if err != nil { + t.Fatalf("invalid token header; err = %v", err) + } + + got := jws.Header{} + if err := json.Unmarshal(gotjson, &got); err != nil { + t.Errorf("failed to unmarshal json token header = %q; err = %v", gotjson, err) + } + + want := jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + } + if got != want { + t.Errorf("access token header = %q; want %q", got, want) + } +} + +func TestTokenRetrieveError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-type", "application/json") + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"error": "invalid_grant"}`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + + _, err := conf.TokenSource(context.Background()).Token() + if err == nil { + t.Fatalf("got no error, expected one") + } + _, ok := err.(*oauth2.RetrieveError) + if !ok { + t.Fatalf("got %T error, expected *RetrieveError", err) + } + // Test error string for backwards compatibility + expected := fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", "400 Bad Request", `{"error": "invalid_grant"}`) + if errStr := err.Error(); errStr != expected { + t.Fatalf("got %#v, expected %#v", errStr, expected) + } +} diff --git a/vendor/golang.org/x/oauth2/linkedin/linkedin.go b/vendor/golang.org/x/oauth2/linkedin/linkedin.go new file mode 100644 index 0000000..b619f93 --- /dev/null +++ b/vendor/golang.org/x/oauth2/linkedin/linkedin.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linkedin provides constants for using OAuth2 to access LinkedIn. +package linkedin // import "golang.org/x/oauth2/linkedin" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is LinkedIn's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.linkedin.com/uas/oauth2/authorization", + TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken", +} diff --git a/vendor/golang.org/x/oauth2/mailchimp/mailchimp.go b/vendor/golang.org/x/oauth2/mailchimp/mailchimp.go new file mode 100644 index 0000000..647787e --- /dev/null +++ b/vendor/golang.org/x/oauth2/mailchimp/mailchimp.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mailchimp provides constants for using OAuth2 to access MailChimp. +package mailchimp // import "golang.org/x/oauth2/mailchimp" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is MailChimp's OAuth 2.0 endpoint. +// See http://developer.mailchimp.com/documentation/mailchimp/guides/how-to-use-oauth2/ +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://login.mailchimp.com/oauth2/authorize", + TokenURL: "https://login.mailchimp.com/oauth2/token", +} diff --git a/vendor/golang.org/x/oauth2/mailru/mailru.go b/vendor/golang.org/x/oauth2/mailru/mailru.go new file mode 100644 index 0000000..dddd9dd --- /dev/null +++ b/vendor/golang.org/x/oauth2/mailru/mailru.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mailru provides constants for using OAuth2 to access Mail.Ru. +package mailru // import "golang.org/x/oauth2/mailru" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Mail.Ru's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://o2.mail.ru/login", + TokenURL: "https://o2.mail.ru/token", +} diff --git a/vendor/golang.org/x/oauth2/mediamath/mediamath.go b/vendor/golang.org/x/oauth2/mediamath/mediamath.go new file mode 100644 index 0000000..3ebce5d --- /dev/null +++ b/vendor/golang.org/x/oauth2/mediamath/mediamath.go @@ -0,0 +1,22 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mediamath provides constants for using OAuth2 to access MediaMath. +package mediamath // import "golang.org/x/oauth2/mediamath" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is MediaMath's OAuth 2.0 endpoint for production. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://api.mediamath.com/oauth2/v1.0/authorize", + TokenURL: "https://api.mediamath.com/oauth2/v1.0/token", +} + +// SandboxEndpoint is MediaMath's OAuth 2.0 endpoint for sandbox. +var SandboxEndpoint = oauth2.Endpoint{ + AuthURL: "https://t1sandbox.mediamath.com/oauth2/v1.0/authorize", + TokenURL: "https://t1sandbox.mediamath.com/oauth2/v1.0/token", +} diff --git a/vendor/golang.org/x/oauth2/microsoft/microsoft.go b/vendor/golang.org/x/oauth2/microsoft/microsoft.go new file mode 100644 index 0000000..3ffbc57 --- /dev/null +++ b/vendor/golang.org/x/oauth2/microsoft/microsoft.go @@ -0,0 +1,31 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package microsoft provides constants for using OAuth2 to access Windows Live ID. +package microsoft // import "golang.org/x/oauth2/microsoft" + +import ( + "golang.org/x/oauth2" +) + +// LiveConnectEndpoint is Windows's Live ID OAuth 2.0 endpoint. +var LiveConnectEndpoint = oauth2.Endpoint{ + AuthURL: "https://login.live.com/oauth20_authorize.srf", + TokenURL: "https://login.live.com/oauth20_token.srf", +} + +// AzureADEndpoint returns a new oauth2.Endpoint for the given tenant at Azure Active Directory. +// If tenant is empty, it uses the tenant called `common`. +// +// For more information see: +// https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-v2-protocols#endpoints +func AzureADEndpoint(tenant string) oauth2.Endpoint { + if tenant == "" { + tenant = "common" + } + return oauth2.Endpoint{ + AuthURL: "https://login.microsoftonline.com/" + tenant + "/oauth2/v2.0/authorize", + TokenURL: "https://login.microsoftonline.com/" + tenant + "/oauth2/v2.0/token", + } +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 0000000..a047a5f --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,353 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-empty string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + if state != "" { + // TODO(light): Docs say never to omit state; don't allow empty. + v.Set("state", state) + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + v := url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + return retrieveToken(ctx, c, v) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + return retrieveToken(ctx, c, v) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + return internal.ContextClient(ctx) + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextClient(ctx).Transport, + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/oauth2_test.go b/vendor/golang.org/x/oauth2/oauth2_test.go new file mode 100644 index 0000000..847160f --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2_test.go @@ -0,0 +1,505 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "golang.org/x/net/context" +) + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +func newConf(url string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + RedirectURL: "REDIRECT_URL", + Scopes: []string{"scope1", "scope2"}, + Endpoint: Endpoint{ + AuthURL: url + "/auth", + TokenURL: url + "/token", + }, + } +} + +func TestAuthCodeURL(t *testing.T) { + conf := newConf("server") + url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce) + const want = "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" + if got := url; got != want { + t.Errorf("got auth code URL = %q; want %q", got, want) + } +} + +func TestAuthCodeURL_CustomParam(t *testing.T) { + conf := newConf("server") + param := SetAuthURLParam("foo", "bar") + url := conf.AuthCodeURL("baz", param) + const want = "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" + if got := url; got != want { + t.Errorf("got auth code = %q; want %q", got, want) + } +} + +func TestAuthCodeURL_Optional(t *testing.T) { + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "/auth-url", + TokenURL: "/token-url", + }, + } + url := conf.AuthCodeURL("") + const want = "/auth-url?client_id=CLIENT_ID&response_type=code" + if got := url; got != want { + t.Fatalf("got auth code = %q; want %q", got, want) + } +} + +func TestURLUnsafeClientConfig(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), "Basic Q0xJRU5UX0lEJTNGJTNGOkNMSUVOVF9TRUNSRVQlM0YlM0Y="; got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + conf.ClientID = "CLIENT_ID??" + conf.ClientSecret = "CLIENT_SECRET??" + _, err := conf.Exchange(context.Background(), "exchange-code") + if err != nil { + t.Error(err) + } +} + +func TestExchangeRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(context.Background(), "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } +} + +func TestExchangeRequest_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(context.Background(), "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } + expiresIn := tok.Extra("expires_in") + if expiresIn != float64(86400) { + t.Errorf("Unexpected non-numeric value for expires_in: %v", expiresIn) + } +} + +func TestExtraValueRetrieval(t *testing.T) { + values := url.Values{} + kvmap := map[string]string{ + "scope": "user", "token_type": "bearer", "expires_in": "86400.92", + "server_time": "1443571905.5606415", "referer_ip": "10.0.0.1", + "etag": "\"afZYj912P4alikMz_P11982\"", "request_id": "86400", + "untrimmed": " untrimmed ", + } + for key, value := range kvmap { + values.Set(key, value) + } + + tok := Token{raw: values} + scope := tok.Extra("scope") + if got, want := scope, "user"; got != want { + t.Errorf("got scope = %q; want %q", got, want) + } + serverTime := tok.Extra("server_time") + if got, want := serverTime, 1443571905.5606415; got != want { + t.Errorf("got server_time value = %v; want %v", got, want) + } + refererIP := tok.Extra("referer_ip") + if got, want := refererIP, "10.0.0.1"; got != want { + t.Errorf("got referer_ip value = %v, want %v", got, want) + } + expiresIn := tok.Extra("expires_in") + if got, want := expiresIn, 86400.92; got != want { + t.Errorf("got expires_in value = %v, want %v", got, want) + } + requestID := tok.Extra("request_id") + if got, want := requestID, int64(86400); got != want { + t.Errorf("got request_id value = %v, want %v", got, want) + } + untrimmed := tok.Extra("untrimmed") + if got, want := untrimmed, " untrimmed "; got != want { + t.Errorf("got untrimmed = %q; want %q", got, want) + } +} + +const day = 24 * time.Hour + +func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) { + seconds := int32(day.Seconds()) + for _, c := range []struct { + expires string + want bool + }{ + {fmt.Sprintf(`"expires_in": %d`, seconds), true}, + {fmt.Sprintf(`"expires_in": "%d"`, seconds), true}, // PayPal case + {fmt.Sprintf(`"expires": %d`, seconds), true}, // Facebook case + {`"expires": false`, false}, // wrong type + {`"expires": {}`, false}, // wrong type + {`"expires": "zzz"`, false}, // wrong value + } { + testExchangeRequest_JSONResponse_expiry(t, c.expires, c.want) + } +} + +func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, want bool) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp))) + })) + defer ts.Close() + conf := newConf(ts.URL) + t1 := time.Now().Add(day) + tok, err := conf.Exchange(context.Background(), "exchange-code") + t2 := time.Now().Add(day) + + if got := (err == nil); got != want { + if want { + t.Errorf("unexpected error: got %v", err) + } else { + t.Errorf("unexpected success") + } + } + if !want { + return + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expiry := tok.Expiry + if expiry.Before(t1) || expiry.After(t2) { + t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2) + } +} + +func TestExchangeRequest_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + _, err := conf.Exchange(context.Background(), "code") + if err == nil { + t.Error("expected error from missing access_token") + } +} + +func TestExchangeRequest_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + _, err := conf.Exchange(context.Background(), "exchange-code") + if err == nil { + t.Error("expected error from non-string access_token") + } +} + +func TestExchangeRequest_NonBasicAuth(t *testing.T) { + tr := &mockTransport{ + rt: func(r *http.Request) (w *http.Response, err error) { + headerAuth := r.Header.Get("Authorization") + if headerAuth != "" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + return nil, errors.New("no response") + }, + } + c := &http.Client{Transport: tr} + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "https://accounts.google.com/auth", + TokenURL: "https://accounts.google.com/token", + }, + } + + ctx := context.WithValue(context.Background(), HTTPClient, c) + conf.Exchange(ctx, "code") +} + +func TestPasswordCredentialsTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + expected := "/token" + if r.URL.String() != expected { + t.Errorf("URL = %q; want %q", r.URL, expected) + } + headerAuth := r.Header.Get("Authorization") + expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" + if headerAuth != expected { + t.Errorf("Authorization header = %q; want %q", headerAuth, expected) + } + headerContentType := r.Header.Get("Content-Type") + expected = "application/x-www-form-urlencoded" + if headerContentType != expected { + t.Errorf("Content-Type header = %q; want %q", headerContentType, expected) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + expected = "grant_type=password&password=password1&scope=scope1+scope2&username=user1" + if string(body) != expected { + t.Errorf("res.Body = %q; want %q", string(body), expected) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.PasswordCredentialsToken(context.Background(), "user1", "password1") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expected := "90d64460d14870c08c81352a05dedd3465940a7c" + if tok.AccessToken != expected { + t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected) + } + expected = "bearer" + if tok.TokenType != expected { + t.Errorf("TokenType = %q; want %q", tok.TokenType, expected) + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(context.Background(), &Token{RefreshToken: "REFRESH_TOKEN"}) + c.Get(ts.URL + "/somethingelse") +} + +func TestFetchWithNoRefreshToken(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(context.Background(), nil) + _, err := c.Get(ts.URL + "/somethingelse") + if err == nil { + t.Errorf("Fetch should return an error if no refresh token is set") + } +} + +func TestTokenRetrieveError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + w.Header().Set("Content-type", "application/json") + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"error": "invalid_grant"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + _, err := conf.Exchange(context.Background(), "exchange-code") + if err == nil { + t.Fatalf("got no error, expected one") + } + _, ok := err.(*RetrieveError) + if !ok { + t.Fatalf("got %T error, expected *RetrieveError", err) + } + // Test error string for backwards compatibility + expected := fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", "400 Bad Request", `{"error": "invalid_grant"}`) + if errStr := err.Error(); errStr != expected { + t.Fatalf("got %#v, expected %#v", errStr, expected) + } +} + +func TestRefreshToken_RefreshTokenReplacement(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"ACCESS_TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW_REFRESH_TOKEN"}`)) + return + })) + defer ts.Close() + conf := newConf(ts.URL) + tkr := conf.TokenSource(context.Background(), &Token{RefreshToken: "OLD_REFRESH_TOKEN"}) + tk, err := tkr.Token() + if err != nil { + t.Errorf("got err = %v; want none", err) + return + } + if want := "NEW_REFRESH_TOKEN"; tk.RefreshToken != want { + t.Errorf("RefreshToken = %q; want %q", tk.RefreshToken, want) + } +} + +func TestRefreshToken_RefreshTokenPreservation(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"ACCESS_TOKEN", "scope": "user", "token_type": "bearer"}`)) + return + })) + defer ts.Close() + conf := newConf(ts.URL) + const oldRefreshToken = "OLD_REFRESH_TOKEN" + tkr := conf.TokenSource(context.Background(), &Token{RefreshToken: oldRefreshToken}) + tk, err := tkr.Token() + if err != nil { + t.Fatalf("got err = %v; want none", err) + } + if tk.RefreshToken != oldRefreshToken { + t.Errorf("RefreshToken = %q; want %q", tk.RefreshToken, oldRefreshToken) + } +} + +func TestConfigClientWithToken(t *testing.T) { + tok := &Token{ + AccessToken: "abc123", + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + return + })) + defer ts.Close() + conf := newConf(ts.URL) + + c := conf.Client(context.Background(), tok) + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Error(err) + } + _, err = c.Do(req) + if err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go new file mode 100644 index 0000000..c0d093c --- /dev/null +++ b/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. +package odnoklassniki // import "golang.org/x/oauth2/odnoklassniki" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Odnoklassniki's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.odnoklassniki.ru/oauth/authorize", + TokenURL: "https://api.odnoklassniki.ru/oauth/token.do", +} diff --git a/vendor/golang.org/x/oauth2/paypal/paypal.go b/vendor/golang.org/x/oauth2/paypal/paypal.go new file mode 100644 index 0000000..2e713c5 --- /dev/null +++ b/vendor/golang.org/x/oauth2/paypal/paypal.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package paypal provides constants for using OAuth2 to access PayPal. +package paypal // import "golang.org/x/oauth2/paypal" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice", +} + +// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment. +var SandboxEndpoint = oauth2.Endpoint{ + AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice", +} diff --git a/vendor/golang.org/x/oauth2/slack/slack.go b/vendor/golang.org/x/oauth2/slack/slack.go new file mode 100644 index 0000000..593d2f6 --- /dev/null +++ b/vendor/golang.org/x/oauth2/slack/slack.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slack provides constants for using OAuth2 to access Slack. +package slack // import "golang.org/x/oauth2/slack" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Slack's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://slack.com/oauth/authorize", + TokenURL: "https://slack.com/api/oauth.access", +} diff --git a/vendor/golang.org/x/oauth2/spotify/spotify.go b/vendor/golang.org/x/oauth2/spotify/spotify.go new file mode 100644 index 0000000..c75416c --- /dev/null +++ b/vendor/golang.org/x/oauth2/spotify/spotify.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spotify provides constants for using OAuth2 to access Spotify. +package spotify // import "golang.org/x/oauth2/spotify" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Spotify's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.spotify.com/authorize", + TokenURL: "https://accounts.spotify.com/api/token", +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 0000000..34db8cd --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,175 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*RetrieveError)(rErr) + } + return nil, err + } + return tokenFromInternal(tk), nil +} + +// RetrieveError is the error returned when the token endpoint returns a +// non-2XX HTTP status code. +type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/token_test.go b/vendor/golang.org/x/oauth2/token_test.go new file mode 100644 index 0000000..80db83c --- /dev/null +++ b/vendor/golang.org/x/oauth2/token_test.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "testing" + "time" +) + +func TestTokenExtra(t *testing.T) { + type testCase struct { + key string + val interface{} + want interface{} + } + const key = "extra-key" + cases := []testCase{ + {key: key, val: "abc", want: "abc"}, + {key: key, val: 123, want: 123}, + {key: key, val: "", want: ""}, + {key: "other-key", val: "def", want: nil}, + } + for _, tc := range cases { + extra := make(map[string]interface{}) + extra[tc.key] = tc.val + tok := &Token{raw: extra} + if got, want := tok.Extra(key), tc.want; got != want { + t.Errorf("Extra(%q) = %q; want %q", key, got, want) + } + } +} + +func TestTokenExpiry(t *testing.T) { + now := time.Now() + cases := []struct { + name string + tok *Token + want bool + }{ + {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false}, + {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true}, + {name: "-1 hour", tok: &Token{Expiry: now.Add(-1 * time.Hour)}, want: true}, + } + for _, tc := range cases { + if got, want := tc.tok.expired(), tc.want; got != want { + t.Errorf("expired (%q) = %v; want %v", tc.name, got, want) + } + } +} + +func TestTokenTypeMethod(t *testing.T) { + cases := []struct { + name string + tok *Token + want string + }{ + {name: "bearer-mixed_case", tok: &Token{TokenType: "beAREr"}, want: "Bearer"}, + {name: "default-bearer", tok: &Token{}, want: "Bearer"}, + {name: "basic", tok: &Token{TokenType: "basic"}, want: "Basic"}, + {name: "basic-capitalized", tok: &Token{TokenType: "Basic"}, want: "Basic"}, + {name: "mac", tok: &Token{TokenType: "mac"}, want: "MAC"}, + {name: "mac-caps", tok: &Token{TokenType: "MAC"}, want: "MAC"}, + {name: "mac-mixed_case", tok: &Token{TokenType: "mAc"}, want: "MAC"}, + } + for _, tc := range cases { + if got, want := tc.tok.Type(), tc.want; got != want { + t.Errorf("TokenType(%q) = %v; want %v", tc.name, got, want) + } + } +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 0000000..92ac7e2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/oauth2/transport_test.go b/vendor/golang.org/x/oauth2/transport_test.go new file mode 100644 index 0000000..d6e8087 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport_test.go @@ -0,0 +1,108 @@ +package oauth2 + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" +) + +type tokenSource struct{ token *Token } + +func (t *tokenSource) Token() (*Token, error) { + return t.token, nil +} + +func TestTransportNilTokenSource(t *testing.T) { + tr := &Transport{} + server := newMockServer(func(w http.ResponseWriter, r *http.Request) {}) + defer server.Close() + client := &http.Client{Transport: tr} + resp, err := client.Get(server.URL) + if err == nil { + t.Errorf("got no errors, want an error with nil token source") + } + if resp != nil { + t.Errorf("Response = %v; want nil", resp) + } +} + +func TestTransportTokenSource(t *testing.T) { + ts := &tokenSource{ + token: &Token{ + AccessToken: "abc", + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), "Bearer abc"; got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + }) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() +} + +// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113 +func TestTransportTokenSourceTypes(t *testing.T) { + const val = "abc" + tests := []struct { + key string + val string + want string + }{ + {key: "bearer", val: val, want: "Bearer abc"}, + {key: "mac", val: val, want: "MAC abc"}, + {key: "basic", val: val, want: "Basic abc"}, + } + for _, tc := range tests { + ts := &tokenSource{ + token: &Token{ + AccessToken: tc.val, + TokenType: tc.key, + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), tc.want; got != want { + t.Errorf("Authorization header (%q) = %q; want %q", val, got, want) + } + }) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } +} + +func TestTokenValidNoAccessToken(t *testing.T) { + token := &Token{} + if token.Valid() { + t.Errorf("got valid with no access token; want invalid") + } +} + +func TestExpiredWithExpiry(t *testing.T) { + token := &Token{ + Expiry: time.Now().Add(-5 * time.Hour), + } + if token.Valid() { + t.Errorf("got valid with expired token; want invalid") + } +} + +func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(handler)) +} diff --git a/vendor/golang.org/x/oauth2/twitch/twitch.go b/vendor/golang.org/x/oauth2/twitch/twitch.go new file mode 100644 index 0000000..8c5f06a --- /dev/null +++ b/vendor/golang.org/x/oauth2/twitch/twitch.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package twitch provides constants for using OAuth2 to access Twitch. +package twitch // import "golang.org/x/oauth2/twitch" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Twitch's OAuth 2.0 endpoint. +// +// For more information see: +// https://dev.twitch.tv/docs/authentication +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://api.twitch.tv/kraken/oauth2/authorize", + TokenURL: "https://api.twitch.tv/kraken/oauth2/token", +} diff --git a/vendor/golang.org/x/oauth2/uber/uber.go b/vendor/golang.org/x/oauth2/uber/uber.go new file mode 100644 index 0000000..5520a64 --- /dev/null +++ b/vendor/golang.org/x/oauth2/uber/uber.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uber provides constants for using OAuth2 to access Uber. +package uber // import "golang.org/x/oauth2/uber" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Uber's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://login.uber.com/oauth/v2/authorize", + TokenURL: "https://login.uber.com/oauth/v2/token", +} diff --git a/vendor/golang.org/x/oauth2/vk/vk.go b/vendor/golang.org/x/oauth2/vk/vk.go new file mode 100644 index 0000000..bd8e159 --- /dev/null +++ b/vendor/golang.org/x/oauth2/vk/vk.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vk provides constants for using OAuth2 to access VK.com. +package vk // import "golang.org/x/oauth2/vk" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is VK's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.vk.com/authorize", + TokenURL: "https://oauth.vk.com/access_token", +} diff --git a/vendor/golang.org/x/oauth2/yahoo/yahoo.go b/vendor/golang.org/x/oauth2/yahoo/yahoo.go new file mode 100644 index 0000000..9fa78a2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/yahoo/yahoo.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package yahoo provides constants for using OAuth2 to access Yahoo. +package yahoo // import "golang.org/x/oauth2/yahoo" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Yahoo's OAuth 2.0 endpoint. +// See https://developer.yahoo.com/oauth2/guide/ +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://api.login.yahoo.com/oauth2/request_auth", + TokenURL: "https://api.login.yahoo.com/oauth2/get_token", +} diff --git a/vendor/golang.org/x/oauth2/yandex/yandex.go b/vendor/golang.org/x/oauth2/yandex/yandex.go new file mode 100644 index 0000000..5ebf666 --- /dev/null +++ b/vendor/golang.org/x/oauth2/yandex/yandex.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package yandex provides constants for using OAuth2 to access Yandex APIs. +package yandex // import "golang.org/x/oauth2/yandex" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is the Yandex OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.yandex.com/authorize", + TokenURL: "https://oauth.yandex.com/token", +} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml new file mode 100644 index 0000000..0762cb9 --- /dev/null +++ b/vendor/google.golang.org/appengine/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6.3 + - 1.7.1 + +install: + - go get -v -t -d google.golang.org/appengine/... + - mkdir sdk + - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.40.zip" + - unzip -q sdk.zip -d sdk + - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py + +script: + - go version + - go test -v google.golang.org/appengine/... + - go test -v -race google.golang.org/appengine/... + - sdk/go_appengine/goapp test -v google.golang.org/appengine/... diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md new file mode 100644 index 0000000..b6b11d9 --- /dev/null +++ b/vendor/google.golang.org/appengine/README.md @@ -0,0 +1,73 @@ +# Go App Engine packages + +[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) + +This repository supports the Go runtime on App Engine, +including both the standard App Engine and the +"App Engine flexible environment" (formerly known as "Managed VMs"). +It provides APIs for interacting with App Engine services. +Its canonical import path is `google.golang.org/appengine`. + +See https://cloud.google.com/appengine/docs/go/ +for more information. + +File issue reports and feature requests on the [Google App Engine issue +tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect). + +## Directory structure +The top level directory of this repository is the `appengine` package. It +contains the +basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API +packages are in subdirectories (e.g. `datastore`). + +There is an `internal` subdirectory that contains service protocol buffers, +plus packages required for connectivity to make API calls. App Engine apps +should not directly import any package under `internal`. + +## Updating a Go App Engine app + +This section describes how to update an older Go App Engine app to use +these packages. A provided tool, `aefix`, can help automate steps 2 and 3 +(run `go get google.golang.org/appengine/cmd/aefix` to install it), but +read the details below since `aefix` can't perform all the changes. + +### 1. Update YAML files (App Engine flexible environment / Managed VMs only) + +The `app.yaml` file (and YAML files for modules) should have these new lines added: +``` +vm: true +``` +See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details. + +### 2. Update import paths + +The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. +You will need to update your code to use import paths starting with that; for instance, +code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. + +### 3. Update code using deprecated, removed or modified APIs + +Most App Engine services are available with exactly the same API. +A few APIs were cleaned up, and some are not available yet. +This list summarises the differences: + +* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. +* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. +* `appengine.Datacenter` now takes a `context.Context` argument. +* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. +* `delay.Call` now returns an error. +* `search.FieldLoadSaver` now handles document metadata. +* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the + `context.Context` instead. +* `aetest` no longer declares its own Context type, and uses the standard one instead. +* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been + deprecated and unused for a long time. +* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. + Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. +* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. + Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the + feature you require is not present in the new + [blobstore package](https://google.golang.org/appengine/blobstore). +* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. + Use the standard `net` package instead. diff --git a/vendor/google.golang.org/appengine/aetest/doc.go b/vendor/google.golang.org/appengine/aetest/doc.go new file mode 100644 index 0000000..86ce8c2 --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/doc.go @@ -0,0 +1,42 @@ +/* +Package aetest provides an API for running dev_appserver for use in tests. + +An example test file: + + package foo_test + + import ( + "testing" + + "google.golang.org/appengine/memcache" + "google.golang.org/appengine/aetest" + ) + + func TestFoo(t *testing.T) { + ctx, done, err := aetest.NewContext() + if err != nil { + t.Fatal(err) + } + defer done() + + it := &memcache.Item{ + Key: "some-key", + Value: []byte("some-value"), + } + err = memcache.Set(ctx, it) + if err != nil { + t.Fatalf("Set err: %v", err) + } + it, err = memcache.Get(ctx, "some-key") + if err != nil { + t.Fatalf("Get err: %v; want no error", err) + } + if g, w := string(it.Value), "some-value" ; g != w { + t.Errorf("retrieved Item.Value = %q, want %q", g, w) + } + } + +The environment variable APPENGINE_DEV_APPSERVER specifies the location of the +dev_appserver.py executable to use. If unset, the system PATH is consulted. +*/ +package aetest diff --git a/vendor/google.golang.org/appengine/aetest/instance.go b/vendor/google.golang.org/appengine/aetest/instance.go new file mode 100644 index 0000000..a8f99d8 --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/instance.go @@ -0,0 +1,51 @@ +package aetest + +import ( + "io" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/appengine" +) + +// Instance represents a running instance of the development API Server. +type Instance interface { + // Close kills the child api_server.py process, releasing its resources. + io.Closer + // NewRequest returns an *http.Request associated with this instance. + NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) +} + +// Options is used to specify options when creating an Instance. +type Options struct { + // AppID specifies the App ID to use during tests. + // By default, "testapp". + AppID string + // StronglyConsistentDatastore is whether the local datastore should be + // strongly consistent. This will diverge from production behaviour. + StronglyConsistentDatastore bool +} + +// NewContext starts an instance of the development API server, and returns +// a context that will route all API calls to that server, as well as a +// closure that must be called when the Context is no longer required. +func NewContext() (context.Context, func(), error) { + inst, err := NewInstance(nil) + if err != nil { + return nil, nil, err + } + req, err := inst.NewRequest("GET", "/", nil) + if err != nil { + inst.Close() + return nil, nil, err + } + ctx := appengine.NewContext(req) + return ctx, func() { + inst.Close() + }, nil +} + +// PrepareDevAppserver is a hook which, if set, will be called before the +// dev_appserver.py is started, each time it is started. If aetest.NewContext +// is invoked from the goapp test tool, this hook is unnecessary. +var PrepareDevAppserver func() error diff --git a/vendor/google.golang.org/appengine/aetest/instance_classic.go b/vendor/google.golang.org/appengine/aetest/instance_classic.go new file mode 100644 index 0000000..fbceaa5 --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/instance_classic.go @@ -0,0 +1,21 @@ +// +build appengine + +package aetest + +import "appengine/aetest" + +// NewInstance launches a running instance of api_server.py which can be used +// for multiple test Contexts that delegate all App Engine API calls to that +// instance. +// If opts is nil the default values are used. +func NewInstance(opts *Options) (Instance, error) { + aetest.PrepareDevAppserver = PrepareDevAppserver + var aeOpts *aetest.Options + if opts != nil { + aeOpts = &aetest.Options{ + AppID: opts.AppID, + StronglyConsistentDatastore: opts.StronglyConsistentDatastore, + } + } + return aetest.NewInstance(aeOpts) +} diff --git a/vendor/google.golang.org/appengine/aetest/instance_test.go b/vendor/google.golang.org/appengine/aetest/instance_test.go new file mode 100644 index 0000000..edc3ecd --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/instance_test.go @@ -0,0 +1,116 @@ +package aetest + +import ( + "os" + "testing" + + "google.golang.org/appengine" + "google.golang.org/appengine/datastore" + "google.golang.org/appengine/memcache" + "google.golang.org/appengine/user" +) + +func TestBasicAPICalls(t *testing.T) { + // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set. + if os.Getenv("APPENGINE_DEV_APPSERVER") == "" { + t.Skip("APPENGINE_DEV_APPSERVER not set") + } + + inst, err := NewInstance(nil) + if err != nil { + t.Fatalf("NewInstance: %v", err) + } + defer inst.Close() + + req, err := inst.NewRequest("GET", "http://example.com/page", nil) + if err != nil { + t.Fatalf("NewRequest: %v", err) + } + ctx := appengine.NewContext(req) + + it := &memcache.Item{ + Key: "some-key", + Value: []byte("some-value"), + } + err = memcache.Set(ctx, it) + if err != nil { + t.Fatalf("Set err: %v", err) + } + it, err = memcache.Get(ctx, "some-key") + if err != nil { + t.Fatalf("Get err: %v; want no error", err) + } + if g, w := string(it.Value), "some-value"; g != w { + t.Errorf("retrieved Item.Value = %q, want %q", g, w) + } + + type Entity struct{ Value string } + e := &Entity{Value: "foo"} + k := datastore.NewIncompleteKey(ctx, "Entity", nil) + k, err = datastore.Put(ctx, k, e) + if err != nil { + t.Fatalf("datastore.Put: %v", err) + } + e = new(Entity) + if err := datastore.Get(ctx, k, e); err != nil { + t.Fatalf("datastore.Get: %v", err) + } + if g, w := e.Value, "foo"; g != w { + t.Errorf("retrieved Entity.Value = %q, want %q", g, w) + } +} + +func TestContext(t *testing.T) { + // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set. + if os.Getenv("APPENGINE_DEV_APPSERVER") == "" { + t.Skip("APPENGINE_DEV_APPSERVER not set") + } + + // Check that the context methods work. + _, done, err := NewContext() + if err != nil { + t.Fatalf("NewContext: %v", err) + } + done() +} + +func TestUsers(t *testing.T) { + // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set. + if os.Getenv("APPENGINE_DEV_APPSERVER") == "" { + t.Skip("APPENGINE_DEV_APPSERVER not set") + } + + inst, err := NewInstance(nil) + if err != nil { + t.Fatalf("NewInstance: %v", err) + } + defer inst.Close() + + req, err := inst.NewRequest("GET", "http://example.com/page", nil) + if err != nil { + t.Fatalf("NewRequest: %v", err) + } + ctx := appengine.NewContext(req) + + if user := user.Current(ctx); user != nil { + t.Errorf("user.Current initially %v, want nil", user) + } + + u := &user.User{ + Email: "gopher@example.com", + Admin: true, + } + Login(u, req) + + if got := user.Current(ctx); got.Email != u.Email { + t.Errorf("user.Current: %v, want %v", got, u) + } + if admin := user.IsAdmin(ctx); !admin { + t.Errorf("user.IsAdmin: %t, want true", admin) + } + + Logout(req) + if user := user.Current(ctx); user != nil { + t.Errorf("user.Current after logout %v, want nil", user) + } +} diff --git a/vendor/google.golang.org/appengine/aetest/instance_vm.go b/vendor/google.golang.org/appengine/aetest/instance_vm.go new file mode 100644 index 0000000..ee81480 --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/instance_vm.go @@ -0,0 +1,276 @@ +// +build !appengine + +package aetest + +import ( + "bufio" + "crypto/rand" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "regexp" + "time" + + "golang.org/x/net/context" + "google.golang.org/appengine/internal" +) + +// NewInstance launches a running instance of api_server.py which can be used +// for multiple test Contexts that delegate all App Engine API calls to that +// instance. +// If opts is nil the default values are used. +func NewInstance(opts *Options) (Instance, error) { + i := &instance{ + opts: opts, + appID: "testapp", + } + if opts != nil && opts.AppID != "" { + i.appID = opts.AppID + } + if err := i.startChild(); err != nil { + return nil, err + } + return i, nil +} + +func newSessionID() string { + var buf [16]byte + io.ReadFull(rand.Reader, buf[:]) + return fmt.Sprintf("%x", buf[:]) +} + +// instance implements the Instance interface. +type instance struct { + opts *Options + child *exec.Cmd + apiURL *url.URL // base URL of API HTTP server + adminURL string // base URL of admin HTTP server + appDir string + appID string + relFuncs []func() // funcs to release any associated contexts +} + +// NewRequest returns an *http.Request associated with this instance. +func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + + // Associate this request. + release := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context { + ctx = internal.WithAppIDOverride(ctx, "dev~"+i.appID) + return ctx + }) + i.relFuncs = append(i.relFuncs, release) + + return req, nil +} + +// Close kills the child api_server.py process, releasing its resources. +func (i *instance) Close() (err error) { + for _, rel := range i.relFuncs { + rel() + } + i.relFuncs = nil + if i.child == nil { + return nil + } + defer func() { + i.child = nil + err1 := os.RemoveAll(i.appDir) + if err == nil { + err = err1 + } + }() + + if p := i.child.Process; p != nil { + errc := make(chan error, 1) + go func() { + errc <- i.child.Wait() + }() + + // Call the quit handler on the admin server. + res, err := http.Get(i.adminURL + "/quit") + if err != nil { + p.Kill() + return fmt.Errorf("unable to call /quit handler: %v", err) + } + res.Body.Close() + + select { + case <-time.After(15 * time.Second): + p.Kill() + return errors.New("timeout killing child process") + case err = <-errc: + // Do nothing. + } + } + return +} + +func fileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func findPython() (path string, err error) { + for _, name := range []string{"python2.7", "python"} { + path, err = exec.LookPath(name) + if err == nil { + return + } + } + return +} + +func findDevAppserver() (string, error) { + if p := os.Getenv("APPENGINE_DEV_APPSERVER"); p != "" { + if fileExists(p) { + return p, nil + } + return "", fmt.Errorf("invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist", p) + } + return exec.LookPath("dev_appserver.py") +} + +var apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\S+)`) +var adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\S+)`) + +func (i *instance) startChild() (err error) { + if PrepareDevAppserver != nil { + if err := PrepareDevAppserver(); err != nil { + return err + } + } + python, err := findPython() + if err != nil { + return fmt.Errorf("Could not find python interpreter: %v", err) + } + devAppserver, err := findDevAppserver() + if err != nil { + return fmt.Errorf("Could not find dev_appserver.py: %v", err) + } + + i.appDir, err = ioutil.TempDir("", "appengine-aetest") + if err != nil { + return err + } + defer func() { + if err != nil { + os.RemoveAll(i.appDir) + } + }() + err = os.Mkdir(filepath.Join(i.appDir, "app"), 0755) + if err != nil { + return err + } + err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "app.yaml"), []byte(i.appYAML()), 0644) + if err != nil { + return err + } + err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "stubapp.go"), []byte(appSource), 0644) + if err != nil { + return err + } + + appserverArgs := []string{ + devAppserver, + "--port=0", + "--api_port=0", + "--admin_port=0", + "--automatic_restart=false", + "--skip_sdk_update_check=true", + "--clear_datastore=true", + "--clear_search_indexes=true", + "--datastore_path", filepath.Join(i.appDir, "datastore"), + } + if i.opts != nil && i.opts.StronglyConsistentDatastore { + appserverArgs = append(appserverArgs, "--datastore_consistency_policy=consistent") + } + appserverArgs = append(appserverArgs, filepath.Join(i.appDir, "app")) + + i.child = exec.Command(python, + appserverArgs..., + ) + i.child.Stdout = os.Stdout + var stderr io.Reader + stderr, err = i.child.StderrPipe() + if err != nil { + return err + } + stderr = io.TeeReader(stderr, os.Stderr) + if err = i.child.Start(); err != nil { + return err + } + + // Read stderr until we have read the URLs of the API server and admin interface. + errc := make(chan error, 1) + go func() { + s := bufio.NewScanner(stderr) + for s.Scan() { + if match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil { + u, err := url.Parse(match[1]) + if err != nil { + errc <- fmt.Errorf("failed to parse API URL %q: %v", match[1], err) + return + } + i.apiURL = u + } + if match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil { + i.adminURL = match[1] + } + if i.adminURL != "" && i.apiURL != nil { + break + } + } + errc <- s.Err() + }() + + select { + case <-time.After(15 * time.Second): + if p := i.child.Process; p != nil { + p.Kill() + } + return errors.New("timeout starting child process") + case err := <-errc: + if err != nil { + return fmt.Errorf("error reading child process stderr: %v", err) + } + } + if i.adminURL == "" { + return errors.New("unable to find admin server URL") + } + if i.apiURL == nil { + return errors.New("unable to find API server URL") + } + return nil +} + +func (i *instance) appYAML() string { + return fmt.Sprintf(appYAMLTemplate, i.appID) +} + +const appYAMLTemplate = ` +application: %s +version: 1 +runtime: go +api_version: go1 +vm: true + +handlers: +- url: /.* + script: _go_app +` + +const appSource = ` +package main +import "google.golang.org/appengine" +func main() { appengine.Main() } +` diff --git a/vendor/google.golang.org/appengine/aetest/user.go b/vendor/google.golang.org/appengine/aetest/user.go new file mode 100644 index 0000000..bf9266f --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/user.go @@ -0,0 +1,36 @@ +package aetest + +import ( + "hash/crc32" + "net/http" + "strconv" + + "google.golang.org/appengine/user" +) + +// Login causes the provided Request to act as though issued by the given user. +func Login(u *user.User, req *http.Request) { + req.Header.Set("X-AppEngine-User-Email", u.Email) + id := u.ID + if id == "" { + id = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable))) + } + req.Header.Set("X-AppEngine-User-Id", id) + req.Header.Set("X-AppEngine-User-Federated-Identity", u.Email) + req.Header.Set("X-AppEngine-User-Federated-Provider", u.FederatedProvider) + if u.Admin { + req.Header.Set("X-AppEngine-User-Is-Admin", "1") + } else { + req.Header.Set("X-AppEngine-User-Is-Admin", "0") + } +} + +// Logout causes the provided Request to act as though issued by a logged-out +// user. +func Logout(req *http.Request) { + req.Header.Del("X-AppEngine-User-Email") + req.Header.Del("X-AppEngine-User-Id") + req.Header.Del("X-AppEngine-User-Is-Admin") + req.Header.Del("X-AppEngine-User-Federated-Identity") + req.Header.Del("X-AppEngine-User-Federated-Provider") +} diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go new file mode 100644 index 0000000..475cf2e --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine.go @@ -0,0 +1,112 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package appengine provides basic functionality for Google App Engine. +// +// For more information on how to write Go apps for Google App Engine, see: +// https://cloud.google.com/appengine/docs/go/ +package appengine // import "google.golang.org/appengine" + +import ( + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// The gophers party all night; the rabbits provide the beats. + +// Main is the principal entry point for an app running in App Engine. +// +// On App Engine Flexible it installs a trivial health checker if one isn't +// already registered, and starts listening on port 8080 (overridden by the +// $PORT environment variable). +// +// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests +// for details on how to do your own health checking. +// +// Main is not yet supported on App Engine Standard. +// +// Main never returns. +// +// Main is designed so that the app's main package looks like this: +// +// package main +// +// import ( +// "google.golang.org/appengine" +// +// _ "myapp/package0" +// _ "myapp/package1" +// ) +// +// func main() { +// appengine.Main() +// } +// +// The "myapp/packageX" packages are expected to register HTTP handlers +// in their init functions. +func Main() { + internal.Main() +} + +// IsDevAppServer reports whether the App Engine app is running in the +// development App Server. +func IsDevAppServer() bool { + return internal.IsDevAppServer() +} + +// NewContext returns a context for an in-flight HTTP request. +// This function is cheap. +func NewContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) +} + +// WithContext returns a copy of the parent context +// and associates it with an in-flight HTTP request. +// This function is cheap. +func WithContext(parent context.Context, req *http.Request) context.Context { + return internal.WithContext(parent, req) +} + +// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. + +// BlobKey is a key for a blobstore blob. +// +// Conceptually, this type belongs in the blobstore package, but it lives in +// the appengine package to avoid a circular dependency: blobstore depends on +// datastore, and datastore needs to refer to the BlobKey type. +type BlobKey string + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +// APICallFunc defines a function type for handling an API call. +// See WithCallOverride. +type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error + +// WithAPICallFunc returns a copy of the parent context +// that will cause API calls to invoke f instead of their normal operation. +// +// This is intended for advanced users only. +func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { + return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) +} + +// APICall performs an API call. +// +// This is not intended for general use; it is exported for use in conjunction +// with WithAPICallFunc. +func APICall(ctx context.Context, service, method string, in, out proto.Message) error { + return internal.Call(ctx, service, method, in, out) +} diff --git a/vendor/google.golang.org/appengine/appengine_test.go b/vendor/google.golang.org/appengine/appengine_test.go new file mode 100644 index 0000000..f1cf0a1 --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_test.go @@ -0,0 +1,49 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "testing" +) + +func TestValidGeoPoint(t *testing.T) { + testCases := []struct { + desc string + pt GeoPoint + want bool + }{ + { + "valid", + GeoPoint{67.21, 13.37}, + true, + }, + { + "high lat", + GeoPoint{-90.01, 13.37}, + false, + }, + { + "low lat", + GeoPoint{90.01, 13.37}, + false, + }, + { + "high lng", + GeoPoint{67.21, 182}, + false, + }, + { + "low lng", + GeoPoint{67.21, -181}, + false, + }, + } + + for _, tc := range testCases { + if got := tc.pt.Valid(); got != tc.want { + t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) + } + } +} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go new file mode 100644 index 0000000..f4b645a --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -0,0 +1,20 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package appengine + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// BackgroundContext returns a context not associated with a request. +// This should only be used when not servicing a request. +// This only works in App Engine "flexible environment". +func BackgroundContext() context.Context { + return internal.BackgroundContext() +} diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore.go b/vendor/google.golang.org/appengine/blobstore/blobstore.go new file mode 100644 index 0000000..1c8087b --- /dev/null +++ b/vendor/google.golang.org/appengine/blobstore/blobstore.go @@ -0,0 +1,276 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package blobstore provides a client for App Engine's persistent blob +// storage service. +package blobstore // import "google.golang.org/appengine/blobstore" + +import ( + "bufio" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/datastore" + "google.golang.org/appengine/internal" + + basepb "google.golang.org/appengine/internal/base" + blobpb "google.golang.org/appengine/internal/blobstore" +) + +const ( + blobInfoKind = "__BlobInfo__" + blobFileIndexKind = "__BlobFileIndex__" + zeroKey = appengine.BlobKey("") +) + +// BlobInfo is the blob metadata that is stored in the datastore. +// Filename may be empty. +type BlobInfo struct { + BlobKey appengine.BlobKey + ContentType string `datastore:"content_type"` + CreationTime time.Time `datastore:"creation"` + Filename string `datastore:"filename"` + Size int64 `datastore:"size"` + MD5 string `datastore:"md5_hash"` + + // ObjectName is the Google Cloud Storage name for this blob. + ObjectName string `datastore:"gs_object_name"` +} + +// isErrFieldMismatch returns whether err is a datastore.ErrFieldMismatch. +// +// The blobstore stores blob metadata in the datastore. When loading that +// metadata, it may contain fields that we don't care about. datastore.Get will +// return datastore.ErrFieldMismatch in that case, so we ignore that specific +// error. +func isErrFieldMismatch(err error) bool { + _, ok := err.(*datastore.ErrFieldMismatch) + return ok +} + +// Stat returns the BlobInfo for a provided blobKey. If no blob was found for +// that key, Stat returns datastore.ErrNoSuchEntity. +func Stat(c context.Context, blobKey appengine.BlobKey) (*BlobInfo, error) { + c, _ = appengine.Namespace(c, "") // Blobstore is always in the empty string namespace + dskey := datastore.NewKey(c, blobInfoKind, string(blobKey), 0, nil) + bi := &BlobInfo{ + BlobKey: blobKey, + } + if err := datastore.Get(c, dskey, bi); err != nil && !isErrFieldMismatch(err) { + return nil, err + } + return bi, nil +} + +// Send sets the headers on response to instruct App Engine to send a blob as +// the response body. This is more efficient than reading and writing it out +// manually and isn't subject to normal response size limits. +func Send(response http.ResponseWriter, blobKey appengine.BlobKey) { + hdr := response.Header() + hdr.Set("X-AppEngine-BlobKey", string(blobKey)) + + if hdr.Get("Content-Type") == "" { + // This value is known to dev_appserver to mean automatic. + // In production this is remapped to the empty value which + // means automatic. + hdr.Set("Content-Type", "application/vnd.google.appengine.auto") + } +} + +// UploadURL creates an upload URL for the form that the user will +// fill out, passing the application path to load when the POST of the +// form is completed. These URLs expire and should not be reused. The +// opts parameter may be nil. +func UploadURL(c context.Context, successPath string, opts *UploadURLOptions) (*url.URL, error) { + req := &blobpb.CreateUploadURLRequest{ + SuccessPath: proto.String(successPath), + } + if opts != nil { + if n := opts.MaxUploadBytes; n != 0 { + req.MaxUploadSizeBytes = &n + } + if n := opts.MaxUploadBytesPerBlob; n != 0 { + req.MaxUploadSizePerBlobBytes = &n + } + if s := opts.StorageBucket; s != "" { + req.GsBucketName = &s + } + } + res := &blobpb.CreateUploadURLResponse{} + if err := internal.Call(c, "blobstore", "CreateUploadURL", req, res); err != nil { + return nil, err + } + return url.Parse(*res.Url) +} + +// UploadURLOptions are the options to create an upload URL. +type UploadURLOptions struct { + MaxUploadBytes int64 // optional + MaxUploadBytesPerBlob int64 // optional + + // StorageBucket specifies the Google Cloud Storage bucket in which + // to store the blob. + // This is required if you use Cloud Storage instead of Blobstore. + // Your application must have permission to write to the bucket. + // You may optionally specify a bucket name and path in the format + // "bucket_name/path", in which case the included path will be the + // prefix of the uploaded object's name. + StorageBucket string +} + +// Delete deletes a blob. +func Delete(c context.Context, blobKey appengine.BlobKey) error { + return DeleteMulti(c, []appengine.BlobKey{blobKey}) +} + +// DeleteMulti deletes multiple blobs. +func DeleteMulti(c context.Context, blobKey []appengine.BlobKey) error { + s := make([]string, len(blobKey)) + for i, b := range blobKey { + s[i] = string(b) + } + req := &blobpb.DeleteBlobRequest{ + BlobKey: s, + } + res := &basepb.VoidProto{} + if err := internal.Call(c, "blobstore", "DeleteBlob", req, res); err != nil { + return err + } + return nil +} + +func errorf(format string, args ...interface{}) error { + return fmt.Errorf("blobstore: "+format, args...) +} + +// ParseUpload parses the synthetic POST request that your app gets from +// App Engine after a user's successful upload of blobs. Given the request, +// ParseUpload returns a map of the blobs received (keyed by HTML form +// element name) and other non-blob POST parameters. +func ParseUpload(req *http.Request) (blobs map[string][]*BlobInfo, other url.Values, err error) { + _, params, err := mime.ParseMediaType(req.Header.Get("Content-Type")) + if err != nil { + return nil, nil, err + } + boundary := params["boundary"] + if boundary == "" { + return nil, nil, errorf("did not find MIME multipart boundary") + } + + blobs = make(map[string][]*BlobInfo) + other = make(url.Values) + + mreader := multipart.NewReader(io.MultiReader(req.Body, strings.NewReader("\r\n\r\n")), boundary) + for { + part, perr := mreader.NextPart() + if perr == io.EOF { + break + } + if perr != nil { + return nil, nil, errorf("error reading next mime part with boundary %q (len=%d): %v", + boundary, len(boundary), perr) + } + + bi := &BlobInfo{} + ctype, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition")) + if err != nil { + return nil, nil, err + } + bi.Filename = params["filename"] + formKey := params["name"] + + ctype, params, err = mime.ParseMediaType(part.Header.Get("Content-Type")) + if err != nil { + return nil, nil, err + } + bi.BlobKey = appengine.BlobKey(params["blob-key"]) + if ctype != "message/external-body" || bi.BlobKey == "" { + if formKey != "" { + slurp, serr := ioutil.ReadAll(part) + if serr != nil { + return nil, nil, errorf("error reading %q MIME part", formKey) + } + other[formKey] = append(other[formKey], string(slurp)) + } + continue + } + + // App Engine sends a MIME header as the body of each MIME part. + tp := textproto.NewReader(bufio.NewReader(part)) + header, mimeerr := tp.ReadMIMEHeader() + if mimeerr != nil { + return nil, nil, mimeerr + } + bi.Size, err = strconv.ParseInt(header.Get("Content-Length"), 10, 64) + if err != nil { + return nil, nil, err + } + bi.ContentType = header.Get("Content-Type") + + // Parse the time from the MIME header like: + // X-AppEngine-Upload-Creation: 2011-03-15 21:38:34.712136 + createDate := header.Get("X-AppEngine-Upload-Creation") + if createDate == "" { + return nil, nil, errorf("expected to find an X-AppEngine-Upload-Creation header") + } + bi.CreationTime, err = time.Parse("2006-01-02 15:04:05.000000", createDate) + if err != nil { + return nil, nil, errorf("error parsing X-AppEngine-Upload-Creation: %s", err) + } + + if hdr := header.Get("Content-MD5"); hdr != "" { + md5, err := base64.URLEncoding.DecodeString(hdr) + if err != nil { + return nil, nil, errorf("bad Content-MD5 %q: %v", hdr, err) + } + bi.MD5 = string(md5) + } + + // If the GCS object name was provided, record it. + bi.ObjectName = header.Get("X-AppEngine-Cloud-Storage-Object") + + blobs[formKey] = append(blobs[formKey], bi) + } + return +} + +// Reader is a blob reader. +type Reader interface { + io.Reader + io.ReaderAt + io.Seeker +} + +// NewReader returns a reader for a blob. It always succeeds; if the blob does +// not exist then an error will be reported upon first read. +func NewReader(c context.Context, blobKey appengine.BlobKey) Reader { + return openBlob(c, blobKey) +} + +// BlobKeyForFile returns a BlobKey for a Google Storage file. +// The filename should be of the form "/gs/bucket_name/object_name". +func BlobKeyForFile(c context.Context, filename string) (appengine.BlobKey, error) { + req := &blobpb.CreateEncodedGoogleStorageKeyRequest{ + Filename: &filename, + } + res := &blobpb.CreateEncodedGoogleStorageKeyResponse{} + if err := internal.Call(c, "blobstore", "CreateEncodedGoogleStorageKey", req, res); err != nil { + return "", err + } + return appengine.BlobKey(*res.BlobKey), nil +} diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore_test.go b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go new file mode 100644 index 0000000..c2be7ef --- /dev/null +++ b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go @@ -0,0 +1,183 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package blobstore + +import ( + "io" + "os" + "strconv" + "strings" + "testing" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + + pb "google.golang.org/appengine/internal/blobstore" +) + +const rbs = readBufferSize + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func fakeFetchData(req *pb.FetchDataRequest, res *pb.FetchDataResponse) error { + i0 := int(*req.StartIndex) + i1 := int(*req.EndIndex + 1) // Blobstore's end-indices are inclusive; Go's are exclusive. + bk := *req.BlobKey + if i := strings.Index(bk, "."); i != -1 { + // Strip everything past the ".". + bk = bk[:i] + } + switch bk { + case "a14p": + const s = "abcdefghijklmnop" + i0 := min(len(s), i0) + i1 := min(len(s), i1) + res.Data = []byte(s[i0:i1]) + case "longBlob": + res.Data = make([]byte, i1-i0) + for i := range res.Data { + res.Data[i] = 'A' + uint8(i0/rbs) + i0++ + } + } + return nil +} + +// step is one step of a readerTest. +// It consists of a Reader method to call, the method arguments +// (lenp, offset, whence) and the expected results. +type step struct { + method string + lenp int + offset int64 + whence int + want string + wantErr error +} + +var readerTest = []struct { + blobKey string + step []step +}{ + {"noSuchBlobKey", []step{ + {"Read", 8, 0, 0, "", io.EOF}, + }}, + {"a14p.0", []step{ + // Test basic reads. + {"Read", 1, 0, 0, "a", nil}, + {"Read", 3, 0, 0, "bcd", nil}, + {"Read", 1, 0, 0, "e", nil}, + {"Read", 2, 0, 0, "fg", nil}, + // Test Seek. + {"Seek", 0, 2, os.SEEK_SET, "2", nil}, + {"Read", 5, 0, 0, "cdefg", nil}, + {"Seek", 0, 2, os.SEEK_CUR, "9", nil}, + {"Read", 1, 0, 0, "j", nil}, + // Test reads up to and past EOF. + {"Read", 5, 0, 0, "klmno", nil}, + {"Read", 5, 0, 0, "p", nil}, + {"Read", 5, 0, 0, "", io.EOF}, + // Test ReadAt. + {"ReadAt", 4, 0, 0, "abcd", nil}, + {"ReadAt", 4, 3, 0, "defg", nil}, + {"ReadAt", 4, 12, 0, "mnop", nil}, + {"ReadAt", 4, 13, 0, "nop", io.EOF}, + {"ReadAt", 4, 99, 0, "", io.EOF}, + }}, + {"a14p.1", []step{ + // Test Seek before any reads. + {"Seek", 0, 2, os.SEEK_SET, "2", nil}, + {"Read", 1, 0, 0, "c", nil}, + // Test that ReadAt doesn't affect the Read offset. + {"ReadAt", 3, 9, 0, "jkl", nil}, + {"Read", 3, 0, 0, "def", nil}, + }}, + {"a14p.2", []step{ + // Test ReadAt before any reads or seeks. + {"ReadAt", 2, 14, 0, "op", nil}, + }}, + {"longBlob.0", []step{ + // Test basic read. + {"Read", 1, 0, 0, "A", nil}, + // Test that Read returns early when the buffer is exhausted. + {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil}, + {"Read", 5, 0, 0, "AA", nil}, + {"Read", 3, 0, 0, "BBB", nil}, + // Test that what we just read is still in the buffer. + {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil}, + {"Read", 5, 0, 0, "AABBB", nil}, + // Test ReadAt. + {"ReadAt", 3, rbs - 4, 0, "AAA", nil}, + {"ReadAt", 6, rbs - 4, 0, "AAAABB", nil}, + {"ReadAt", 8, rbs - 4, 0, "AAAABBBB", nil}, + {"ReadAt", 5, rbs - 4, 0, "AAAAB", nil}, + {"ReadAt", 2, rbs - 4, 0, "AA", nil}, + // Test seeking backwards from the Read offset. + {"Seek", 0, 2*rbs - 8, os.SEEK_SET, strconv.Itoa(2*rbs - 8), nil}, + {"Read", 1, 0, 0, "B", nil}, + {"Read", 1, 0, 0, "B", nil}, + {"Read", 1, 0, 0, "B", nil}, + {"Read", 1, 0, 0, "B", nil}, + {"Read", 8, 0, 0, "BBBBCCCC", nil}, + }}, + {"longBlob.1", []step{ + // Test ReadAt with a slice larger than the buffer size. + {"LargeReadAt", 2*rbs - 2, 0, 0, strconv.Itoa(2*rbs - 2), nil}, + {"LargeReadAt", 2*rbs - 1, 0, 0, strconv.Itoa(2*rbs - 1), nil}, + {"LargeReadAt", 2*rbs + 0, 0, 0, strconv.Itoa(2*rbs + 0), nil}, + {"LargeReadAt", 2*rbs + 1, 0, 0, strconv.Itoa(2*rbs + 1), nil}, + {"LargeReadAt", 2*rbs + 2, 0, 0, strconv.Itoa(2*rbs + 2), nil}, + {"LargeReadAt", 2*rbs - 2, 1, 0, strconv.Itoa(2*rbs - 2), nil}, + {"LargeReadAt", 2*rbs - 1, 1, 0, strconv.Itoa(2*rbs - 1), nil}, + {"LargeReadAt", 2*rbs + 0, 1, 0, strconv.Itoa(2*rbs + 0), nil}, + {"LargeReadAt", 2*rbs + 1, 1, 0, strconv.Itoa(2*rbs + 1), nil}, + {"LargeReadAt", 2*rbs + 2, 1, 0, strconv.Itoa(2*rbs + 2), nil}, + }}, +} + +func TestReader(t *testing.T) { + for _, rt := range readerTest { + c := aetesting.FakeSingleContext(t, "blobstore", "FetchData", fakeFetchData) + r := NewReader(c, appengine.BlobKey(rt.blobKey)) + for i, step := range rt.step { + var ( + got string + gotErr error + n int + offset int64 + ) + switch step.method { + case "LargeReadAt": + p := make([]byte, step.lenp) + n, gotErr = r.ReadAt(p, step.offset) + got = strconv.Itoa(n) + case "Read": + p := make([]byte, step.lenp) + n, gotErr = r.Read(p) + got = string(p[:n]) + case "ReadAt": + p := make([]byte, step.lenp) + n, gotErr = r.ReadAt(p, step.offset) + got = string(p[:n]) + case "Seek": + offset, gotErr = r.Seek(step.offset, step.whence) + got = strconv.FormatInt(offset, 10) + default: + t.Fatalf("unknown method: %s", step.method) + } + if gotErr != step.wantErr { + t.Fatalf("%s step %d: got error %v want %v", rt.blobKey, i, gotErr, step.wantErr) + } + if got != step.want { + t.Fatalf("%s step %d: got %q want %q", rt.blobKey, i, got, step.want) + } + } + } +} diff --git a/vendor/google.golang.org/appengine/blobstore/read.go b/vendor/google.golang.org/appengine/blobstore/read.go new file mode 100644 index 0000000..578b1f5 --- /dev/null +++ b/vendor/google.golang.org/appengine/blobstore/read.go @@ -0,0 +1,160 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package blobstore + +import ( + "errors" + "fmt" + "io" + "os" + "sync" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + + blobpb "google.golang.org/appengine/internal/blobstore" +) + +// openBlob returns a reader for a blob. It always succeeds; if the blob does +// not exist then an error will be reported upon first read. +func openBlob(c context.Context, blobKey appengine.BlobKey) Reader { + return &reader{ + c: c, + blobKey: blobKey, + } +} + +const readBufferSize = 256 * 1024 + +// reader is a blob reader. It implements the Reader interface. +type reader struct { + c context.Context + + // Either blobKey or filename is set: + blobKey appengine.BlobKey + filename string + + closeFunc func() // is nil if unavailable or already closed. + + // buf is the read buffer. r is how much of buf has been read. + // off is the offset of buf[0] relative to the start of the blob. + // An invariant is 0 <= r && r <= len(buf). + // Reads that don't require an RPC call will increment r but not off. + // Seeks may modify r without discarding the buffer, but only if the + // invariant can be maintained. + mu sync.Mutex + buf []byte + r int + off int64 +} + +func (r *reader) Close() error { + if f := r.closeFunc; f != nil { + f() + } + r.closeFunc = nil + return nil +} + +func (r *reader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + r.mu.Lock() + defer r.mu.Unlock() + if r.r == len(r.buf) { + if err := r.fetch(r.off + int64(r.r)); err != nil { + return 0, err + } + } + n := copy(p, r.buf[r.r:]) + r.r += n + return n, nil +} + +func (r *reader) ReadAt(p []byte, off int64) (int, error) { + if len(p) == 0 { + return 0, nil + } + r.mu.Lock() + defer r.mu.Unlock() + // Convert relative offsets to absolute offsets. + ab0 := r.off + int64(r.r) + ab1 := r.off + int64(len(r.buf)) + ap0 := off + ap1 := off + int64(len(p)) + // Check if we can satisfy the read entirely out of the existing buffer. + if r.off <= ap0 && ap1 <= ab1 { + // Convert off from an absolute offset to a relative offset. + rp0 := int(ap0 - r.off) + return copy(p, r.buf[rp0:]), nil + } + // Restore the original Read/Seek offset after ReadAt completes. + defer r.seek(ab0) + // Repeatedly fetch and copy until we have filled p. + n := 0 + for len(p) > 0 { + if err := r.fetch(off + int64(n)); err != nil { + return n, err + } + r.r = copy(p, r.buf) + n += r.r + p = p[r.r:] + } + return n, nil +} + +func (r *reader) Seek(offset int64, whence int) (ret int64, err error) { + r.mu.Lock() + defer r.mu.Unlock() + switch whence { + case os.SEEK_SET: + ret = offset + case os.SEEK_CUR: + ret = r.off + int64(r.r) + offset + case os.SEEK_END: + return 0, errors.New("seeking relative to the end of a blob isn't supported") + default: + return 0, fmt.Errorf("invalid Seek whence value: %d", whence) + } + if ret < 0 { + return 0, errors.New("negative Seek offset") + } + return r.seek(ret) +} + +// fetch fetches readBufferSize bytes starting at the given offset. On success, +// the data is saved as r.buf. +func (r *reader) fetch(off int64) error { + req := &blobpb.FetchDataRequest{ + BlobKey: proto.String(string(r.blobKey)), + StartIndex: proto.Int64(off), + EndIndex: proto.Int64(off + readBufferSize - 1), // EndIndex is inclusive. + } + res := &blobpb.FetchDataResponse{} + if err := internal.Call(r.c, "blobstore", "FetchData", req, res); err != nil { + return err + } + if len(res.Data) == 0 { + return io.EOF + } + r.buf, r.r, r.off = res.Data, 0, off + return nil +} + +// seek seeks to the given offset with an effective whence equal to SEEK_SET. +// It discards the read buffer if the invariant cannot be maintained. +func (r *reader) seek(off int64) (int64, error) { + delta := off - r.off + if delta >= 0 && delta < int64(len(r.buf)) { + r.r = int(delta) + return off, nil + } + r.buf, r.r, r.off = nil, 0, off + return off, nil +} diff --git a/vendor/google.golang.org/appengine/capability/capability.go b/vendor/google.golang.org/appengine/capability/capability.go new file mode 100644 index 0000000..3a60bd5 --- /dev/null +++ b/vendor/google.golang.org/appengine/capability/capability.go @@ -0,0 +1,52 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package capability exposes information about outages and scheduled downtime +for specific API capabilities. + +This package does not work in App Engine "flexible environment". + +Example: + if !capability.Enabled(c, "datastore_v3", "write") { + // show user a different page + } +*/ +package capability // import "google.golang.org/appengine/capability" + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + "google.golang.org/appengine/log" + + pb "google.golang.org/appengine/internal/capability" +) + +// Enabled returns whether an API's capabilities are enabled. +// The wildcard "*" capability matches every capability of an API. +// If the underlying RPC fails (if the package is unknown, for example), +// false is returned and information is written to the application log. +func Enabled(ctx context.Context, api, capability string) bool { + req := &pb.IsEnabledRequest{ + Package: &api, + Capability: []string{capability}, + } + res := &pb.IsEnabledResponse{} + if err := internal.Call(ctx, "capability_service", "IsEnabled", req, res); err != nil { + log.Warningf(ctx, "capability.Enabled: RPC failed: %v", err) + return false + } + switch *res.SummaryStatus { + case pb.IsEnabledResponse_ENABLED, + pb.IsEnabledResponse_SCHEDULED_FUTURE, + pb.IsEnabledResponse_SCHEDULED_NOW: + return true + case pb.IsEnabledResponse_UNKNOWN: + log.Errorf(ctx, "capability.Enabled: unknown API capability %s/%s", api, capability) + return false + default: + return false + } +} diff --git a/vendor/google.golang.org/appengine/channel/channel.go b/vendor/google.golang.org/appengine/channel/channel.go new file mode 100644 index 0000000..dfe0a3f --- /dev/null +++ b/vendor/google.golang.org/appengine/channel/channel.go @@ -0,0 +1,83 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package channel implements the server side of App Engine's Channel API. + +Create creates a new channel associated with the given clientID, +which must be unique to the client that will use the returned token. + + token, err := channel.Create(c, "player1") + if err != nil { + // handle error + } + // return token to the client in an HTTP response + +Send sends a message to the client over the channel identified by clientID. + + channel.Send(c, "player1", "Game over!") +*/ +package channel // import "google.golang.org/appengine/channel" + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/channel" +) + +// Create creates a channel and returns a token for use by the client. +// The clientID is an application-provided string used to identify the client. +func Create(c context.Context, clientID string) (token string, err error) { + req := &pb.CreateChannelRequest{ + ApplicationKey: &clientID, + } + resp := &pb.CreateChannelResponse{} + err = internal.Call(c, service, "CreateChannel", req, resp) + token = resp.GetToken() + return token, remapError(err) +} + +// Send sends a message on the channel associated with clientID. +func Send(c context.Context, clientID, message string) error { + req := &pb.SendMessageRequest{ + ApplicationKey: &clientID, + Message: &message, + } + resp := &basepb.VoidProto{} + return remapError(internal.Call(c, service, "SendChannelMessage", req, resp)) +} + +// SendJSON is a helper function that sends a JSON-encoded value +// on the channel associated with clientID. +func SendJSON(c context.Context, clientID string, value interface{}) error { + m, err := json.Marshal(value) + if err != nil { + return err + } + return Send(c, clientID, string(m)) +} + +// remapError fixes any APIError referencing "xmpp" into one referencing "channel". +func remapError(err error) error { + if e, ok := err.(*internal.APIError); ok { + if e.Service == "xmpp" { + e.Service = "channel" + } + } + return err +} + +var service = "xmpp" // prod + +func init() { + if appengine.IsDevAppServer() { + service = "channel" // dev + } + internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/channel/channel_test.go b/vendor/google.golang.org/appengine/channel/channel_test.go new file mode 100644 index 0000000..c7498eb --- /dev/null +++ b/vendor/google.golang.org/appengine/channel/channel_test.go @@ -0,0 +1,21 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package channel + +import ( + "testing" + + "google.golang.org/appengine/internal" +) + +func TestRemapError(t *testing.T) { + err := &internal.APIError{ + Service: "xmpp", + } + err = remapError(err).(*internal.APIError) + if err.Service != "channel" { + t.Errorf("err.Service = %q, want %q", err.Service, "channel") + } +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go new file mode 100644 index 0000000..7b27e6b --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go @@ -0,0 +1,62 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package cloudsql exposes access to Google Cloud SQL databases. + +This package does not work in App Engine "flexible environment". + +This package is intended for MySQL drivers to make App Engine-specific +connections. Applications should use this package through database/sql: +Select a pure Go MySQL driver that supports this package, and use sql.Open +with protocol "cloudsql" and an address of the Cloud SQL instance. + +A Go MySQL driver that has been tested to work well with Cloud SQL +is the go-sql-driver: + import "database/sql" + import _ "github.com/go-sql-driver/mysql" + + db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname") + + +Another driver that works well with Cloud SQL is the mymysql driver: + import "database/sql" + import _ "github.com/ziutek/mymysql/godrv" + + db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password") + + +Using either of these drivers, you can perform a standard SQL query. +This example assumes there is a table named 'users' with +columns 'first_name' and 'last_name': + + rows, err := db.Query("SELECT first_name, last_name FROM users") + if err != nil { + log.Errorf(ctx, "db.Query: %v", err) + } + defer rows.Close() + + for rows.Next() { + var firstName string + var lastName string + if err := rows.Scan(&firstName, &lastName); err != nil { + log.Errorf(ctx, "rows.Scan: %v", err) + continue + } + log.Infof(ctx, "First: %v - Last: %v", firstName, lastName) + } + if err := rows.Err(); err != nil { + log.Errorf(ctx, "Row error: %v", err) + } +*/ +package cloudsql + +import ( + "net" +) + +// Dial connects to the named Cloud SQL instance. +func Dial(instance string) (net.Conn, error) { + return connect(instance) +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go new file mode 100644 index 0000000..af62dba --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go @@ -0,0 +1,17 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package cloudsql + +import ( + "net" + + "appengine/cloudsql" +) + +func connect(instance string) (net.Conn, error) { + return cloudsql.Dial(instance) +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go new file mode 100644 index 0000000..90fa7b3 --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go @@ -0,0 +1,16 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package cloudsql + +import ( + "errors" + "net" +) + +func connect(instance string) (net.Conn, error) { + return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`) +} diff --git a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go new file mode 100644 index 0000000..e317cdd --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go @@ -0,0 +1,342 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Program aebundler turns a Go app into a fully self-contained tar file. +// The app and its subdirectories (if any) are placed under "." +// and the dependencies from $GOPATH are placed under ./_gopath/src. +// A main func is synthesized if one does not exist. +// +// A sample Dockerfile to be used with this bundler could look like this: +// FROM gcr.io/google_appengine/go-compat +// ADD . /app +// RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe +package main + +import ( + "archive/tar" + "flag" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +var ( + output = flag.String("o", "", "name of output tar file or '-' for stdout") + rootDir = flag.String("root", ".", "directory name of application root") + vm = flag.Bool("vm", true, `bundle an app for App Engine "flexible environment"`) + + skipFiles = map[string]bool{ + ".git": true, + ".gitconfig": true, + ".hg": true, + ".travis.yml": true, + } +) + +const ( + newMain = `package main +import "google.golang.org/appengine" +func main() { + appengine.Main() +} +` +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\t%s -o \tBundle app to named tar file or stdout\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\noptional arguments:\n") + flag.PrintDefaults() +} + +func main() { + flag.Usage = usage + flag.Parse() + + var tags []string + if *vm { + tags = append(tags, "appenginevm") + } else { + tags = append(tags, "appengine") + } + + tarFile := *output + if tarFile == "" { + usage() + errorf("Required -o flag not specified.") + } + + app, err := analyze(tags) + if err != nil { + errorf("Error analyzing app: %v", err) + } + if err := app.bundle(tarFile); err != nil { + errorf("Unable to bundle app: %v", err) + } +} + +// errorf prints the error message and exits. +func errorf(format string, a ...interface{}) { + fmt.Fprintf(os.Stderr, "aebundler: "+format+"\n", a...) + os.Exit(1) +} + +type app struct { + hasMain bool + appFiles []string + imports map[string]string +} + +// analyze checks the app for building with the given build tags and returns hasMain, +// app files, and a map of full directory import names to original import names. +func analyze(tags []string) (*app, error) { + ctxt := buildContext(tags) + hasMain, appFiles, err := checkMain(ctxt) + if err != nil { + return nil, err + } + gopath := filepath.SplitList(ctxt.GOPATH) + im, err := imports(ctxt, *rootDir, gopath) + return &app{ + hasMain: hasMain, + appFiles: appFiles, + imports: im, + }, err +} + +// buildContext returns the context for building the source. +func buildContext(tags []string) *build.Context { + return &build.Context{ + GOARCH: build.Default.GOARCH, + GOOS: build.Default.GOOS, + GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + Compiler: build.Default.Compiler, + BuildTags: append(build.Default.BuildTags, tags...), + } +} + +// bundle bundles the app into the named tarFile ("-"==stdout). +func (s *app) bundle(tarFile string) (err error) { + var out io.Writer + if tarFile == "-" { + out = os.Stdout + } else { + f, err := os.Create(tarFile) + if err != nil { + return err + } + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + out = f + } + tw := tar.NewWriter(out) + + for srcDir, importName := range s.imports { + dstDir := "_gopath/src/" + importName + if err = copyTree(tw, dstDir, srcDir); err != nil { + return fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err) + } + } + if err := copyTree(tw, ".", *rootDir); err != nil { + return fmt.Errorf("unable to copy root directory to /app: %v", err) + } + if !s.hasMain { + if err := synthesizeMain(tw, s.appFiles); err != nil { + return fmt.Errorf("unable to synthesize new main func: %v", err) + } + } + + if err := tw.Close(); err != nil { + return fmt.Errorf("unable to close tar file %v: %v", tarFile, err) + } + return nil +} + +// synthesizeMain generates a new main func and writes it to the tarball. +func synthesizeMain(tw *tar.Writer, appFiles []string) error { + appMap := make(map[string]bool) + for _, f := range appFiles { + appMap[f] = true + } + var f string + for i := 0; i < 100; i++ { + f = fmt.Sprintf("app_main%d.go", i) + if !appMap[filepath.Join(*rootDir, f)] { + break + } + } + if appMap[filepath.Join(*rootDir, f)] { + return fmt.Errorf("unable to find unique name for %v", f) + } + hdr := &tar.Header{ + Name: f, + Mode: 0644, + Size: int64(len(newMain)), + } + if err := tw.WriteHeader(hdr); err != nil { + return fmt.Errorf("unable to write header for %v: %v", f, err) + } + if _, err := tw.Write([]byte(newMain)); err != nil { + return fmt.Errorf("unable to write %v to tar file: %v", f, err) + } + return nil +} + +// imports returns a map of all import directories (recursively) used by the app. +// The return value maps full directory names to original import names. +func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) { + pkg, err := ctxt.ImportDir(srcDir, 0) + if err != nil { + return nil, fmt.Errorf("unable to analyze source: %v", err) + } + + // Resolve all non-standard-library imports + result := make(map[string]string) + for _, v := range pkg.Imports { + if !strings.Contains(v, ".") { + continue + } + src, err := findInGopath(v, gopath) + if err != nil { + return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err) + } + result[src] = v + im, err := imports(ctxt, src, gopath) + if err != nil { + return nil, fmt.Errorf("unable to parse package %v: %v", src, err) + } + for k, v := range im { + result[k] = v + } + } + return result, nil +} + +// findInGopath searches the gopath for the named import directory. +func findInGopath(dir string, gopath []string) (string, error) { + for _, v := range gopath { + dst := filepath.Join(v, "src", dir) + if _, err := os.Stat(dst); err == nil { + return dst, nil + } + } + return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath) +} + +// copyTree copies srcDir to tar file dstDir, ignoring skipFiles. +func copyTree(tw *tar.Writer, dstDir, srcDir string) error { + entries, err := ioutil.ReadDir(srcDir) + if err != nil { + return fmt.Errorf("unable to read dir %v: %v", srcDir, err) + } + for _, entry := range entries { + n := entry.Name() + if skipFiles[n] { + continue + } + s := filepath.Join(srcDir, n) + d := filepath.Join(dstDir, n) + if entry.IsDir() { + if err := copyTree(tw, d, s); err != nil { + return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err) + } + continue + } + if err := copyFile(tw, d, s); err != nil { + return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err) + } + } + return nil +} + +// copyFile copies src to tar file dst. +func copyFile(tw *tar.Writer, dst, src string) error { + s, err := os.Open(src) + if err != nil { + return fmt.Errorf("unable to open %v: %v", src, err) + } + defer s.Close() + fi, err := s.Stat() + if err != nil { + return fmt.Errorf("unable to stat %v: %v", src, err) + } + + hdr, err := tar.FileInfoHeader(fi, dst) + if err != nil { + return fmt.Errorf("unable to create tar header for %v: %v", dst, err) + } + hdr.Name = dst + if err := tw.WriteHeader(hdr); err != nil { + return fmt.Errorf("unable to write header for %v: %v", dst, err) + } + _, err = io.Copy(tw, s) + if err != nil { + return fmt.Errorf("unable to copy %v to %v: %v", src, dst, err) + } + return nil +} + +// checkMain verifies that there is a single "main" function. +// It also returns a list of all Go source files in the app. +func checkMain(ctxt *build.Context) (bool, []string, error) { + pkg, err := ctxt.ImportDir(*rootDir, 0) + if err != nil { + return false, nil, fmt.Errorf("unable to analyze source: %v", err) + } + if !pkg.IsCommand() { + errorf("Your app's package needs to be changed from %q to \"main\".\n", pkg.Name) + } + // Search for a "func main" + var hasMain bool + var appFiles []string + for _, f := range pkg.GoFiles { + n := filepath.Join(*rootDir, f) + appFiles = append(appFiles, n) + if hasMain, err = readFile(n); err != nil { + return false, nil, fmt.Errorf("error parsing %q: %v", n, err) + } + } + return hasMain, appFiles, nil +} + +// isMain returns whether the given function declaration is a main function. +// Such a function must be called "main", not have a receiver, and have no arguments or return types. +func isMain(f *ast.FuncDecl) bool { + ft := f.Type + return f.Name.Name == "main" && f.Recv == nil && ft.Params.NumFields() == 0 && ft.Results.NumFields() == 0 +} + +// readFile reads and parses the Go source code file and returns whether it has a main function. +func readFile(filename string) (hasMain bool, err error) { + var src []byte + src, err = ioutil.ReadFile(filename) + if err != nil { + return + } + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, filename, src, 0) + for _, decl := range file.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + if !isMain(funcDecl) { + continue + } + hasMain = true + break + } + return +} diff --git a/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go new file mode 100644 index 0000000..155fc1c --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go @@ -0,0 +1,268 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Program aedeploy assists with deploying App Engine "flexible environment" Go apps to production. +// A temporary directory is created; the app, its subdirectories, and all its +// dependencies from $GOPATH are copied into the directory; then the app +// is deployed to production with the provided command. +// +// The app must be in "package main". +// +// This command must be issued from within the root directory of the app +// (where the app.yaml file is located). +package main + +import ( + "flag" + "fmt" + "go/build" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" +) + +var ( + skipFiles = map[string]bool{ + ".git": true, + ".gitconfig": true, + ".hg": true, + ".travis.yml": true, + } + + gopathCache = map[string]string{} +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\t%s gcloud --verbosity debug preview app deploy --version myversion ./app.yaml\tDeploy app to production\n", os.Args[0]) +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() < 1 { + usage() + os.Exit(1) + } + + if err := aedeploy(); err != nil { + fmt.Fprintf(os.Stderr, os.Args[0]+": Error: %v\n", err) + os.Exit(1) + } +} + +func aedeploy() error { + tags := []string{"appenginevm"} + app, err := analyze(tags) + if err != nil { + return err + } + + tmpDir, err := app.bundle() + if tmpDir != "" { + defer os.RemoveAll(tmpDir) + } + if err != nil { + return err + } + + if err := os.Chdir(tmpDir); err != nil { + return fmt.Errorf("unable to chdir to %v: %v", tmpDir, err) + } + return deploy() +} + +// deploy calls the provided command to deploy the app from the temporary directory. +func deploy() error { + cmd := exec.Command(flag.Arg(0), flag.Args()[1:]...) + cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("unable to run %q: %v", strings.Join(flag.Args(), " "), err) + } + return nil +} + +type app struct { + appFiles []string + imports map[string]string +} + +// analyze checks the app for building with the given build tags and returns +// app files, and a map of full directory import names to original import names. +func analyze(tags []string) (*app, error) { + ctxt := buildContext(tags) + appFiles, err := appFiles(ctxt) + if err != nil { + return nil, err + } + gopath := filepath.SplitList(ctxt.GOPATH) + im, err := imports(ctxt, ".", gopath) + return &app{ + appFiles: appFiles, + imports: im, + }, err +} + +// buildContext returns the context for building the source. +func buildContext(tags []string) *build.Context { + return &build.Context{ + GOARCH: "amd64", + GOOS: "linux", + GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + Compiler: build.Default.Compiler, + BuildTags: append(defaultBuildTags, tags...), + } +} + +// All build tags except go1.7, since Go 1.6 is the runtime version. +var defaultBuildTags = []string{ + "go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6"} + +// bundle bundles the app into a temporary directory. +func (s *app) bundle() (tmpdir string, err error) { + workDir, err := ioutil.TempDir("", "aedeploy") + if err != nil { + return "", fmt.Errorf("unable to create tmpdir: %v", err) + } + + for srcDir, importName := range s.imports { + dstDir := "_gopath/src/" + importName + if err := copyTree(workDir, dstDir, srcDir); err != nil { + return workDir, fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err) + } + } + if err := copyTree(workDir, ".", "."); err != nil { + return workDir, fmt.Errorf("unable to copy root directory to /app: %v", err) + } + return workDir, nil +} + +// imports returns a map of all import directories (recursively) used by the app. +// The return value maps full directory names to original import names. +func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) { + pkg, err := ctxt.ImportDir(srcDir, 0) + if err != nil { + return nil, err + } + + // Resolve all non-standard-library imports + result := make(map[string]string) + for _, v := range pkg.Imports { + if !strings.Contains(v, ".") { + continue + } + src, err := findInGopath(v, gopath) + if err != nil { + return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err) + } + if _, ok := result[src]; ok { // Already processed + continue + } + result[src] = v + im, err := imports(ctxt, src, gopath) + if err != nil { + return nil, fmt.Errorf("unable to parse package %v: %v", src, err) + } + for k, v := range im { + result[k] = v + } + } + return result, nil +} + +// findInGopath searches the gopath for the named import directory. +func findInGopath(dir string, gopath []string) (string, error) { + if v, ok := gopathCache[dir]; ok { + return v, nil + } + for _, v := range gopath { + dst := filepath.Join(v, "src", dir) + if _, err := os.Stat(dst); err == nil { + gopathCache[dir] = dst + return dst, nil + } + } + return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath) +} + +// copyTree copies srcDir to dstDir relative to dstRoot, ignoring skipFiles. +func copyTree(dstRoot, dstDir, srcDir string) error { + d := filepath.Join(dstRoot, dstDir) + if err := os.MkdirAll(d, 0755); err != nil { + return fmt.Errorf("unable to create directory %q: %v", d, err) + } + + entries, err := ioutil.ReadDir(srcDir) + if err != nil { + return fmt.Errorf("unable to read dir %q: %v", srcDir, err) + } + for _, entry := range entries { + n := entry.Name() + if skipFiles[n] { + continue + } + s := filepath.Join(srcDir, n) + if entry.Mode()&os.ModeSymlink == os.ModeSymlink { + if entry, err = os.Stat(s); err != nil { + return fmt.Errorf("unable to stat %v: %v", s, err) + } + } + d := filepath.Join(dstDir, n) + if entry.IsDir() { + if err := copyTree(dstRoot, d, s); err != nil { + return fmt.Errorf("unable to copy dir %q to %q: %v", s, d, err) + } + continue + } + if err := copyFile(dstRoot, d, s); err != nil { + return fmt.Errorf("unable to copy dir %q to %q: %v", s, d, err) + } + } + return nil +} + +// copyFile copies src to dst relative to dstRoot. +func copyFile(dstRoot, dst, src string) error { + s, err := os.Open(src) + if err != nil { + return fmt.Errorf("unable to open %q: %v", src, err) + } + defer s.Close() + + dst = filepath.Join(dstRoot, dst) + d, err := os.Create(dst) + if err != nil { + return fmt.Errorf("unable to create %q: %v", dst, err) + } + _, err = io.Copy(d, s) + if err != nil { + d.Close() // ignore error, copy already failed. + return fmt.Errorf("unable to copy %q to %q: %v", src, dst, err) + } + if err := d.Close(); err != nil { + return fmt.Errorf("unable to close %q: %v", dst, err) + } + return nil +} + +// appFiles returns a list of all Go source files in the app. +func appFiles(ctxt *build.Context) ([]string, error) { + pkg, err := ctxt.ImportDir(".", 0) + if err != nil { + return nil, err + } + if !pkg.IsCommand() { + return nil, fmt.Errorf(`the root of your app needs to be package "main" (currently %q). Please see https://cloud.google.com/appengine/docs/flexible/go/ for more details on structuring your app.`, pkg.Name) + } + var appFiles []string + for _, f := range pkg.GoFiles { + n := filepath.Join(".", f) + appFiles = append(appFiles, n) + } + return appFiles, nil +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae.go b/vendor/google.golang.org/appengine/cmd/aefix/ae.go new file mode 100644 index 0000000..0fe2d4a --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/ae.go @@ -0,0 +1,185 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "path" + "strconv" + "strings" +) + +const ( + ctxPackage = "golang.org/x/net/context" + + newPackageBase = "google.golang.org/" + stutterPackage = false +) + +func init() { + register(fix{ + "ae", + "2016-04-15", + aeFn, + `Update old App Engine APIs to new App Engine APIs`, + }) +} + +// logMethod is the set of methods on appengine.Context used for logging. +var logMethod = map[string]bool{ + "Debugf": true, + "Infof": true, + "Warningf": true, + "Errorf": true, + "Criticalf": true, +} + +// mapPackage turns "appengine" into "google.golang.org/appengine", etc. +func mapPackage(s string) string { + if stutterPackage { + s += "/" + path.Base(s) + } + return newPackageBase + s +} + +func aeFn(f *ast.File) bool { + // During the walk, we track the last thing seen that looks like + // an appengine.Context, and reset it once the walk leaves a func. + var lastContext *ast.Ident + + fixed := false + + // Update imports. + mainImp := "appengine" + for _, imp := range f.Imports { + pth, _ := strconv.Unquote(imp.Path.Value) + if pth == "appengine" || strings.HasPrefix(pth, "appengine/") { + newPth := mapPackage(pth) + imp.Path.Value = strconv.Quote(newPth) + fixed = true + + if pth == "appengine" { + mainImp = newPth + } + } + } + + // Update any API changes. + walk(f, func(n interface{}) { + if ft, ok := n.(*ast.FuncType); ok && ft.Params != nil { + // See if this func has an `appengine.Context arg`. + // If so, remember its identifier. + for _, param := range ft.Params.List { + if !isPkgDot(param.Type, "appengine", "Context") { + continue + } + if len(param.Names) == 1 { + lastContext = param.Names[0] + break + } + } + return + } + + if as, ok := n.(*ast.AssignStmt); ok { + if len(as.Lhs) == 1 && len(as.Rhs) == 1 { + // If this node is an assignment from an appengine.NewContext invocation, + // remember the identifier on the LHS. + if isCall(as.Rhs[0], "appengine", "NewContext") { + if ident, ok := as.Lhs[0].(*ast.Ident); ok { + lastContext = ident + return + } + } + // x (=|:=) appengine.Timeout(y, z) + // should become + // x, _ (=|:=) context.WithTimeout(y, z) + if isCall(as.Rhs[0], "appengine", "Timeout") { + addImport(f, ctxPackage) + as.Lhs = append(as.Lhs, ast.NewIdent("_")) + // isCall already did the type checking. + sel := as.Rhs[0].(*ast.CallExpr).Fun.(*ast.SelectorExpr) + sel.X = ast.NewIdent("context") + sel.Sel = ast.NewIdent("WithTimeout") + fixed = true + return + } + } + return + } + + // If this node is a FuncDecl, we've finished the function, so reset lastContext. + if _, ok := n.(*ast.FuncDecl); ok { + lastContext = nil + return + } + + if call, ok := n.(*ast.CallExpr); ok { + if isPkgDot(call.Fun, "appengine", "Datacenter") && len(call.Args) == 0 { + insertContext(f, call, lastContext) + fixed = true + return + } + if isPkgDot(call.Fun, "taskqueue", "QueueStats") && len(call.Args) == 3 { + call.Args = call.Args[:2] // drop last arg + fixed = true + return + } + + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + if lastContext != nil && refersTo(sel.X, lastContext) && logMethod[sel.Sel.Name] { + // c.Errorf(...) + // should become + // log.Errorf(c, ...) + addImport(f, mapPackage("appengine/log")) + sel.X = &ast.Ident{ // ast.NewIdent doesn't preserve the position. + NamePos: sel.X.Pos(), + Name: "log", + } + insertContext(f, call, lastContext) + fixed = true + return + } + } + }) + + // Change any `appengine.Context` to `context.Context`. + // Do this in a separate walk because the previous walk + // wants to identify "appengine.Context". + walk(f, func(n interface{}) { + expr, ok := n.(ast.Expr) + if ok && isPkgDot(expr, "appengine", "Context") { + addImport(f, ctxPackage) + // isPkgDot did the type checking. + n.(*ast.SelectorExpr).X.(*ast.Ident).Name = "context" + fixed = true + return + } + }) + + // The changes above might remove the need to import "appengine". + // Check if it's used, and drop it if it isn't. + if fixed && !usesImport(f, mainImp) { + deleteImport(f, mainImp) + } + + return fixed +} + +// ctx may be nil. +func insertContext(f *ast.File, call *ast.CallExpr, ctx *ast.Ident) { + if ctx == nil { + // context is unknown, so use a plain "ctx". + ctx = ast.NewIdent("ctx") + } else { + // Create a fresh *ast.Ident so we drop the position information. + ctx = ast.NewIdent(ctx.Name) + } + + call.Args = append([]ast.Expr{ctx}, call.Args...) +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go new file mode 100644 index 0000000..21f5695 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go @@ -0,0 +1,144 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(aeTests, nil) +} + +var aeTests = []testCase{ + // Collection of fixes: + // - imports + // - appengine.Timeout -> context.WithTimeout + // - add ctx arg to appengine.Datacenter + // - logging API + { + Name: "ae.0", + In: `package foo + +import ( + "net/http" + "time" + + "appengine" + "appengine/datastore" +) + +func f(w http.ResponseWriter, r *http.Request) { + c := appengine.NewContext(r) + + c = appengine.Timeout(c, 5*time.Second) + err := datastore.ErrNoSuchEntity + c.Errorf("Something interesting happened: %v", err) + _ = appengine.Datacenter() +} +`, + Out: `package foo + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + "google.golang.org/appengine" + "google.golang.org/appengine/datastore" + "google.golang.org/appengine/log" +) + +func f(w http.ResponseWriter, r *http.Request) { + c := appengine.NewContext(r) + + c, _ = context.WithTimeout(c, 5*time.Second) + err := datastore.ErrNoSuchEntity + log.Errorf(c, "Something interesting happened: %v", err) + _ = appengine.Datacenter(c) +} +`, + }, + + // Updating a function that takes an appengine.Context arg. + { + Name: "ae.1", + In: `package foo + +import ( + "appengine" +) + +func LogSomething(c2 appengine.Context) { + c2.Warningf("Stand back! I'm going to try science!") +} +`, + Out: `package foo + +import ( + "golang.org/x/net/context" + "google.golang.org/appengine/log" +) + +func LogSomething(c2 context.Context) { + log.Warningf(c2, "Stand back! I'm going to try science!") +} +`, + }, + + // Less widely used API changes: + // - drop maxTasks arg to taskqueue.QueueStats + { + Name: "ae.2", + In: `package foo + +import ( + "appengine" + "appengine/taskqueue" +) + +func f(ctx appengine.Context) { + stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"}, 0) +} +`, + Out: `package foo + +import ( + "golang.org/x/net/context" + "google.golang.org/appengine/taskqueue" +) + +func f(ctx context.Context) { + stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"}) +} +`, + }, + + // Check that the main "appengine" import will not be dropped + // if an appengine.Context -> context.Context change happens + // but the appengine package is still referenced. + { + Name: "ae.3", + In: `package foo + +import ( + "appengine" + "io" +) + +func f(ctx appengine.Context, w io.Writer) { + _ = appengine.IsDevAppServer() +} +`, + Out: `package foo + +import ( + "golang.org/x/net/context" + "google.golang.org/appengine" + "io" +) + +func f(ctx context.Context, w io.Writer) { + _ = appengine.IsDevAppServer() +} +`, + }, +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/fix.go b/vendor/google.golang.org/appengine/cmd/aefix/fix.go new file mode 100644 index 0000000..a100be7 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/fix.go @@ -0,0 +1,848 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "path" + "reflect" + "strconv" + "strings" +) + +type fix struct { + name string + date string // date that fix was introduced, in YYYY-MM-DD format + f func(*ast.File) bool + desc string +} + +// main runs sort.Sort(byName(fixes)) before printing list of fixes. +type byName []fix + +func (f byName) Len() int { return len(f) } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f byName) Less(i, j int) bool { return f[i].name < f[j].name } + +// main runs sort.Sort(byDate(fixes)) before applying fixes. +type byDate []fix + +func (f byDate) Len() int { return len(f) } +func (f byDate) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f byDate) Less(i, j int) bool { return f[i].date < f[j].date } + +var fixes []fix + +func register(f fix) { + fixes = append(fixes, f) +} + +// walk traverses the AST x, calling visit(y) for each node y in the tree but +// also with a pointer to each ast.Expr, ast.Stmt, and *ast.BlockStmt, +// in a bottom-up traversal. +func walk(x interface{}, visit func(interface{})) { + walkBeforeAfter(x, nop, visit) +} + +func nop(interface{}) {} + +// walkBeforeAfter is like walk but calls before(x) before traversing +// x's children and after(x) afterward. +func walkBeforeAfter(x interface{}, before, after func(interface{})) { + before(x) + + switch n := x.(type) { + default: + panic(fmt.Errorf("unexpected type %T in walkBeforeAfter", x)) + + case nil: + + // pointers to interfaces + case *ast.Decl: + walkBeforeAfter(*n, before, after) + case *ast.Expr: + walkBeforeAfter(*n, before, after) + case *ast.Spec: + walkBeforeAfter(*n, before, after) + case *ast.Stmt: + walkBeforeAfter(*n, before, after) + + // pointers to struct pointers + case **ast.BlockStmt: + walkBeforeAfter(*n, before, after) + case **ast.CallExpr: + walkBeforeAfter(*n, before, after) + case **ast.FieldList: + walkBeforeAfter(*n, before, after) + case **ast.FuncType: + walkBeforeAfter(*n, before, after) + case **ast.Ident: + walkBeforeAfter(*n, before, after) + case **ast.BasicLit: + walkBeforeAfter(*n, before, after) + + // pointers to slices + case *[]ast.Decl: + walkBeforeAfter(*n, before, after) + case *[]ast.Expr: + walkBeforeAfter(*n, before, after) + case *[]*ast.File: + walkBeforeAfter(*n, before, after) + case *[]*ast.Ident: + walkBeforeAfter(*n, before, after) + case *[]ast.Spec: + walkBeforeAfter(*n, before, after) + case *[]ast.Stmt: + walkBeforeAfter(*n, before, after) + + // These are ordered and grouped to match ../../pkg/go/ast/ast.go + case *ast.Field: + walkBeforeAfter(&n.Names, before, after) + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Tag, before, after) + case *ast.FieldList: + for _, field := range n.List { + walkBeforeAfter(field, before, after) + } + case *ast.BadExpr: + case *ast.Ident: + case *ast.Ellipsis: + walkBeforeAfter(&n.Elt, before, after) + case *ast.BasicLit: + case *ast.FuncLit: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.CompositeLit: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Elts, before, after) + case *ast.ParenExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.SelectorExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.IndexExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Index, before, after) + case *ast.SliceExpr: + walkBeforeAfter(&n.X, before, after) + if n.Low != nil { + walkBeforeAfter(&n.Low, before, after) + } + if n.High != nil { + walkBeforeAfter(&n.High, before, after) + } + case *ast.TypeAssertExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Type, before, after) + case *ast.CallExpr: + walkBeforeAfter(&n.Fun, before, after) + walkBeforeAfter(&n.Args, before, after) + case *ast.StarExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.UnaryExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.BinaryExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Y, before, after) + case *ast.KeyValueExpr: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + + case *ast.ArrayType: + walkBeforeAfter(&n.Len, before, after) + walkBeforeAfter(&n.Elt, before, after) + case *ast.StructType: + walkBeforeAfter(&n.Fields, before, after) + case *ast.FuncType: + walkBeforeAfter(&n.Params, before, after) + if n.Results != nil { + walkBeforeAfter(&n.Results, before, after) + } + case *ast.InterfaceType: + walkBeforeAfter(&n.Methods, before, after) + case *ast.MapType: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + case *ast.ChanType: + walkBeforeAfter(&n.Value, before, after) + + case *ast.BadStmt: + case *ast.DeclStmt: + walkBeforeAfter(&n.Decl, before, after) + case *ast.EmptyStmt: + case *ast.LabeledStmt: + walkBeforeAfter(&n.Stmt, before, after) + case *ast.ExprStmt: + walkBeforeAfter(&n.X, before, after) + case *ast.SendStmt: + walkBeforeAfter(&n.Chan, before, after) + walkBeforeAfter(&n.Value, before, after) + case *ast.IncDecStmt: + walkBeforeAfter(&n.X, before, after) + case *ast.AssignStmt: + walkBeforeAfter(&n.Lhs, before, after) + walkBeforeAfter(&n.Rhs, before, after) + case *ast.GoStmt: + walkBeforeAfter(&n.Call, before, after) + case *ast.DeferStmt: + walkBeforeAfter(&n.Call, before, after) + case *ast.ReturnStmt: + walkBeforeAfter(&n.Results, before, after) + case *ast.BranchStmt: + case *ast.BlockStmt: + walkBeforeAfter(&n.List, before, after) + case *ast.IfStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Cond, before, after) + walkBeforeAfter(&n.Body, before, after) + walkBeforeAfter(&n.Else, before, after) + case *ast.CaseClause: + walkBeforeAfter(&n.List, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.SwitchStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Tag, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.TypeSwitchStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Assign, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.CommClause: + walkBeforeAfter(&n.Comm, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.SelectStmt: + walkBeforeAfter(&n.Body, before, after) + case *ast.ForStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Cond, before, after) + walkBeforeAfter(&n.Post, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.RangeStmt: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Body, before, after) + + case *ast.ImportSpec: + case *ast.ValueSpec: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Values, before, after) + walkBeforeAfter(&n.Names, before, after) + case *ast.TypeSpec: + walkBeforeAfter(&n.Type, before, after) + + case *ast.BadDecl: + case *ast.GenDecl: + walkBeforeAfter(&n.Specs, before, after) + case *ast.FuncDecl: + if n.Recv != nil { + walkBeforeAfter(&n.Recv, before, after) + } + walkBeforeAfter(&n.Type, before, after) + if n.Body != nil { + walkBeforeAfter(&n.Body, before, after) + } + + case *ast.File: + walkBeforeAfter(&n.Decls, before, after) + + case *ast.Package: + walkBeforeAfter(&n.Files, before, after) + + case []*ast.File: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Decl: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Expr: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []*ast.Ident: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Stmt: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Spec: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + } + after(x) +} + +// imports returns true if f imports path. +func imports(f *ast.File, path string) bool { + return importSpec(f, path) != nil +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err == nil { + return t + } + return "" +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// isPkgDot returns true if t is the expression "pkg.name" +// where pkg is an imported identifier. +func isPkgDot(t ast.Expr, pkg, name string) bool { + sel, ok := t.(*ast.SelectorExpr) + return ok && isTopName(sel.X, pkg) && sel.Sel.String() == name +} + +// isPtrPkgDot returns true if f is the expression "*pkg.name" +// where pkg is an imported identifier. +func isPtrPkgDot(t ast.Expr, pkg, name string) bool { + ptr, ok := t.(*ast.StarExpr) + return ok && isPkgDot(ptr.X, pkg, name) +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// isName returns true if n is an identifier with the given name. +func isName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.String() == name +} + +// isCall returns true if t is a call to pkg.name. +func isCall(t ast.Expr, pkg, name string) bool { + call, ok := t.(*ast.CallExpr) + return ok && isPkgDot(call.Fun, pkg, name) +} + +// If n is an *ast.Ident, isIdent returns it; otherwise isIdent returns nil. +func isIdent(n interface{}) *ast.Ident { + id, _ := n.(*ast.Ident) + return id +} + +// refersTo returns true if n is a reference to the same object as x. +func refersTo(n ast.Node, x *ast.Ident) bool { + id, ok := n.(*ast.Ident) + // The test of id.Name == x.Name handles top-level unresolved + // identifiers, which all have Obj == nil. + return ok && id.Obj == x.Obj && id.Name == x.Name +} + +// isBlank returns true if n is the blank identifier. +func isBlank(n ast.Expr) bool { + return isName(n, "_") +} + +// isEmptyString returns true if n is an empty string literal. +func isEmptyString(n ast.Expr) bool { + lit, ok := n.(*ast.BasicLit) + return ok && lit.Kind == token.STRING && len(lit.Value) == 2 +} + +func warn(pos token.Pos, msg string, args ...interface{}) { + if pos.IsValid() { + msg = "%s: " + msg + arg1 := []interface{}{fset.Position(pos).String()} + args = append(arg1, args...) + } + fmt.Fprintf(os.Stderr, msg+"\n", args...) +} + +// countUses returns the number of uses of the identifier x in scope. +func countUses(x *ast.Ident, scope []ast.Stmt) int { + count := 0 + ff := func(n interface{}) { + if n, ok := n.(ast.Node); ok && refersTo(n, x) { + count++ + } + } + for _, n := range scope { + walk(n, ff) + } + return count +} + +// rewriteUses replaces all uses of the identifier x and !x in scope +// with f(x.Pos()) and fnot(x.Pos()). +func rewriteUses(x *ast.Ident, f, fnot func(token.Pos) ast.Expr, scope []ast.Stmt) { + var lastF ast.Expr + ff := func(n interface{}) { + ptr, ok := n.(*ast.Expr) + if !ok { + return + } + nn := *ptr + + // The child node was just walked and possibly replaced. + // If it was replaced and this is a negation, replace with fnot(p). + not, ok := nn.(*ast.UnaryExpr) + if ok && not.Op == token.NOT && not.X == lastF { + *ptr = fnot(nn.Pos()) + return + } + if refersTo(nn, x) { + lastF = f(nn.Pos()) + *ptr = lastF + } + } + for _, n := range scope { + walk(n, ff) + } +} + +// assignsTo returns true if any of the code in scope assigns to or takes the address of x. +func assignsTo(x *ast.Ident, scope []ast.Stmt) bool { + assigned := false + ff := func(n interface{}) { + if assigned { + return + } + switch n := n.(type) { + case *ast.UnaryExpr: + // use of &x + if n.Op == token.AND && refersTo(n.X, x) { + assigned = true + return + } + case *ast.AssignStmt: + for _, l := range n.Lhs { + if refersTo(l, x) { + assigned = true + return + } + } + } + } + for _, n := range scope { + if assigned { + break + } + walk(n, ff) + } + return assigned +} + +// newPkgDot returns an ast.Expr referring to "pkg.name" at position pos. +func newPkgDot(pos token.Pos, pkg, name string) ast.Expr { + return &ast.SelectorExpr{ + X: &ast.Ident{ + NamePos: pos, + Name: pkg, + }, + Sel: &ast.Ident{ + NamePos: pos, + Name: name, + }, + } +} + +// renameTop renames all references to the top-level name old. +// It returns true if it makes any changes. +func renameTop(f *ast.File, old, new string) bool { + var fixed bool + + // Rename any conflicting imports + // (assuming package name is last element of path). + for _, s := range f.Imports { + if s.Name != nil { + if s.Name.Name == old { + s.Name.Name = new + fixed = true + } + } else { + _, thisName := path.Split(importPath(s)) + if thisName == old { + s.Name = ast.NewIdent(new) + fixed = true + } + } + } + + // Rename any top-level declarations. + for _, d := range f.Decls { + switch d := d.(type) { + case *ast.FuncDecl: + if d.Recv == nil && d.Name.Name == old { + d.Name.Name = new + d.Name.Obj.Name = new + fixed = true + } + case *ast.GenDecl: + for _, s := range d.Specs { + switch s := s.(type) { + case *ast.TypeSpec: + if s.Name.Name == old { + s.Name.Name = new + s.Name.Obj.Name = new + fixed = true + } + case *ast.ValueSpec: + for _, n := range s.Names { + if n.Name == old { + n.Name = new + n.Obj.Name = new + fixed = true + } + } + } + } + } + } + + // Rename top-level old to new, both unresolved names + // (probably defined in another file) and names that resolve + // to a declaration we renamed. + walk(f, func(n interface{}) { + id, ok := n.(*ast.Ident) + if ok && isTopName(id, old) { + id.Name = new + fixed = true + } + if ok && id.Obj != nil && id.Name == old && id.Obj.Name == new { + id.Name = id.Obj.Name + fixed = true + } + }) + + return fixed +} + +// matchLen returns the length of the longest prefix shared by x and y. +func matchLen(x, y string) int { + i := 0 + for i < len(x) && i < len(y) && x[i] == y[i] { + i++ + } + return i +} + +// addImport adds the import path to the file f, if absent. +func addImport(f *ast.File, ipath string) (added bool) { + if imports(f, ipath) { + return false + } + + // Determine name of import. + // Assume added imports follow convention of using last element. + _, name := path.Split(ipath) + + // Rename any conflicting top-level references from name to name_. + renameTop(f, name, name+"_") + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(ipath), + }, + } + + // Find an import decl to add to. + var ( + bestMatch = -1 + lastImport = -1 + impDecl *ast.GenDecl + impIndex = -1 + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Compute longest shared prefix with imports in this block. + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + n := matchLen(importPath(impspec), ipath) + if n > bestMatch { + bestMatch = n + impDecl = gen + impIndex = j + } + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Ensure the import decl has parentheses, if needed. + if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() { + impDecl.Lparen = impDecl.Pos() + } + + insertAt := impIndex + 1 + if insertAt == 0 { + insertAt = len(impDecl.Specs) + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + if insertAt > 0 { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + prev := impDecl.Specs[insertAt-1] + newImport.Path.ValuePos = prev.Pos() + newImport.EndPos = prev.Pos() + } + + f.Imports = append(f.Imports, newImport) + return true +} + +// deleteImport deletes the import path from the file f, if present. +func deleteImport(f *ast.File, path string) (deleted bool) { + oldImport := importSpec(f, path) + + // Find the import node that imports path, if any. + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if oldImport != impspec { + continue + } + + // We found an import spec that imports path. + // Delete it. + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + } else if len(gen.Specs) == 1 { + gen.Lparen = token.NoPos // drop parens + } + if j > 0 { + // We deleted an entry but now there will be + // a blank line-sized hole where the import was. + // Close the hole by making the previous + // import appear to "end" where this one did. + gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End() + } + break + } + } + + // Delete it from f.Imports. + for i, imp := range f.Imports { + if imp == oldImport { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + break + } + } + + return +} + +// rewriteImport rewrites any import of path oldPath to path newPath. +func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +func usesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + walk(f, func(n interface{}) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }) + + return +} + +func expr(s string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + panic("parsing " + s + ": " + err.Error()) + } + // Remove position information to avoid spurious newlines. + killPos(reflect.ValueOf(x)) + return x +} + +var posType = reflect.TypeOf(token.Pos(0)) + +func killPos(v reflect.Value) { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if !v.IsNil() { + killPos(v.Elem()) + } + case reflect.Slice: + n := v.Len() + for i := 0; i < n; i++ { + killPos(v.Index(i)) + } + case reflect.Struct: + n := v.NumField() + for i := 0; i < n; i++ { + f := v.Field(i) + if f.Type() == posType { + f.SetInt(0) + continue + } + killPos(f) + } + } +} + +// A Rename describes a single renaming. +type rename struct { + OldImport string // only apply rename if this import is present + NewImport string // add this import during rewrite + Old string // old name: p.T or *p.T + New string // new name: p.T or *p.T +} + +func renameFix(tab []rename) func(*ast.File) bool { + return func(f *ast.File) bool { + return renameFixTab(f, tab) + } +} + +func parseName(s string) (ptr bool, pkg, nam string) { + i := strings.Index(s, ".") + if i < 0 { + panic("parseName: invalid name " + s) + } + if strings.HasPrefix(s, "*") { + ptr = true + s = s[1:] + i-- + } + pkg = s[:i] + nam = s[i+1:] + return +} + +func renameFixTab(f *ast.File, tab []rename) bool { + fixed := false + added := map[string]bool{} + check := map[string]bool{} + for _, t := range tab { + if !imports(f, t.OldImport) { + continue + } + optr, opkg, onam := parseName(t.Old) + walk(f, func(n interface{}) { + np, ok := n.(*ast.Expr) + if !ok { + return + } + x := *np + if optr { + p, ok := x.(*ast.StarExpr) + if !ok { + return + } + x = p.X + } + if !isPkgDot(x, opkg, onam) { + return + } + if t.NewImport != "" && !added[t.NewImport] { + addImport(f, t.NewImport) + added[t.NewImport] = true + } + *np = expr(t.New) + check[t.OldImport] = true + fixed = true + }) + } + + for ipath := range check { + if !usesImport(f, ipath) { + deleteImport(f, ipath) + } + } + return fixed +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main.go b/vendor/google.golang.org/appengine/cmd/aefix/main.go new file mode 100644 index 0000000..8e193a6 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/main.go @@ -0,0 +1,258 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/scanner" + "go/token" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" +) + +var ( + fset = token.NewFileSet() + exitCode = 0 +) + +var allowedRewrites = flag.String("r", "", + "restrict the rewrites to this comma-separated list") + +var forceRewrites = flag.String("force", "", + "force these fixes to run even if the code looks updated") + +var allowed, force map[string]bool + +var doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files") + +// enable for debugging fix failures +const debug = false // display incorrectly reformatted source and exit + +func usage() { + fmt.Fprintf(os.Stderr, "usage: aefix [-diff] [-r fixname,...] [-force fixname,...] [path ...]\n") + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, "\nAvailable rewrites are:\n") + sort.Sort(byName(fixes)) + for _, f := range fixes { + fmt.Fprintf(os.Stderr, "\n%s\n", f.name) + desc := strings.TrimSpace(f.desc) + desc = strings.Replace(desc, "\n", "\n\t", -1) + fmt.Fprintf(os.Stderr, "\t%s\n", desc) + } + os.Exit(2) +} + +func main() { + flag.Usage = usage + flag.Parse() + + sort.Sort(byDate(fixes)) + + if *allowedRewrites != "" { + allowed = make(map[string]bool) + for _, f := range strings.Split(*allowedRewrites, ",") { + allowed[f] = true + } + } + + if *forceRewrites != "" { + force = make(map[string]bool) + for _, f := range strings.Split(*forceRewrites, ",") { + force[f] = true + } + } + + if flag.NArg() == 0 { + if err := processFile("standard input", true); err != nil { + report(err) + } + os.Exit(exitCode) + } + + for i := 0; i < flag.NArg(); i++ { + path := flag.Arg(i) + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, false); err != nil { + report(err) + } + } + } + + os.Exit(exitCode) +} + +const parserMode = parser.ParseComments + +func gofmtFile(f *ast.File) ([]byte, error) { + var buf bytes.Buffer + if err := format.Node(&buf, fset, f); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func processFile(filename string, useStdin bool) error { + var f *os.File + var err error + var fixlog bytes.Buffer + + if useStdin { + f = os.Stdin + } else { + f, err = os.Open(filename) + if err != nil { + return err + } + defer f.Close() + } + + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err != nil { + return err + } + + // Apply all fixes to file. + newFile := file + fixed := false + for _, fix := range fixes { + if allowed != nil && !allowed[fix.name] { + continue + } + if fix.f(newFile) { + fixed = true + fmt.Fprintf(&fixlog, " %s", fix.name) + + // AST changed. + // Print and parse, to update any missing scoping + // or position information for subsequent fixers. + newSrc, err := gofmtFile(newFile) + if err != nil { + return err + } + newFile, err = parser.ParseFile(fset, filename, newSrc, parserMode) + if err != nil { + if debug { + fmt.Printf("%s", newSrc) + report(err) + os.Exit(exitCode) + } + return err + } + } + } + if !fixed { + return nil + } + fmt.Fprintf(os.Stderr, "%s: fixed %s\n", filename, fixlog.String()[1:]) + + // Print AST. We did that after each fix, so this appears + // redundant, but it is necessary to generate gofmt-compatible + // source code in a few cases. The official gofmt style is the + // output of the printer run on a standard AST generated by the parser, + // but the source we generated inside the loop above is the + // output of the printer run on a mangled AST generated by a fixer. + newSrc, err := gofmtFile(newFile) + if err != nil { + return err + } + + if *doDiff { + data, err := diff(src, newSrc) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Printf("diff %s fixed/%s\n", filename, filename) + os.Stdout.Write(data) + return nil + } + + if useStdin { + os.Stdout.Write(newSrc) + return nil + } + + return ioutil.WriteFile(f.Name(), newSrc, 0) +} + +var gofmtBuf bytes.Buffer + +func gofmt(n interface{}) string { + gofmtBuf.Reset() + if err := format.Node(&gofmtBuf, fset, n); err != nil { + return "<" + err.Error() + ">" + } + return gofmtBuf.String() +} + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func walkDir(path string) { + filepath.Walk(path, visitFile) +} + +func visitFile(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, false) + } + if err != nil { + report(err) + } + return nil +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +func diff(b1, b2 []byte) (data []byte, err error) { + f1, err := ioutil.TempFile("", "go-fix") + if err != nil { + return nil, err + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := ioutil.TempFile("", "go-fix") + if err != nil { + return nil, err + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main_test.go b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go new file mode 100644 index 0000000..2151bf2 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go @@ -0,0 +1,129 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "go/parser" + "strings" + "testing" +) + +type testCase struct { + Name string + Fn func(*ast.File) bool + In string + Out string +} + +var testCases []testCase + +func addTestCases(t []testCase, fn func(*ast.File) bool) { + // Fill in fn to avoid repetition in definitions. + if fn != nil { + for i := range t { + if t[i].Fn == nil { + t[i].Fn = fn + } + } + } + testCases = append(testCases, t...) +} + +func fnop(*ast.File) bool { return false } + +func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustBeGofmt bool) (out string, fixed, ok bool) { + file, err := parser.ParseFile(fset, desc, in, parserMode) + if err != nil { + t.Errorf("%s: parsing: %v", desc, err) + return + } + + outb, err := gofmtFile(file) + if err != nil { + t.Errorf("%s: printing: %v", desc, err) + return + } + if s := string(outb); in != s && mustBeGofmt { + t.Errorf("%s: not gofmt-formatted.\n--- %s\n%s\n--- %s | gofmt\n%s", + desc, desc, in, desc, s) + tdiff(t, in, s) + return + } + + if fn == nil { + for _, fix := range fixes { + if fix.f(file) { + fixed = true + } + } + } else { + fixed = fn(file) + } + + outb, err = gofmtFile(file) + if err != nil { + t.Errorf("%s: printing: %v", desc, err) + return + } + + return string(outb), fixed, true +} + +func TestRewrite(t *testing.T) { + for _, tt := range testCases { + // Apply fix: should get tt.Out. + out, fixed, ok := parseFixPrint(t, tt.Fn, tt.Name, tt.In, true) + if !ok { + continue + } + + // reformat to get printing right + out, _, ok = parseFixPrint(t, fnop, tt.Name, out, false) + if !ok { + continue + } + + if out != tt.Out { + t.Errorf("%s: incorrect output.\n", tt.Name) + if !strings.HasPrefix(tt.Name, "testdata/") { + t.Errorf("--- have\n%s\n--- want\n%s", out, tt.Out) + } + tdiff(t, out, tt.Out) + continue + } + + if changed := out != tt.In; changed != fixed { + t.Errorf("%s: changed=%v != fixed=%v", tt.Name, changed, fixed) + continue + } + + // Should not change if run again. + out2, fixed2, ok := parseFixPrint(t, tt.Fn, tt.Name+" output", out, true) + if !ok { + continue + } + + if fixed2 { + t.Errorf("%s: applied fixes during second round", tt.Name) + continue + } + + if out2 != out { + t.Errorf("%s: changed output after second round of fixes.\n--- output after first round\n%s\n--- output after second round\n%s", + tt.Name, out, out2) + tdiff(t, out, out2) + } + } +} + +func tdiff(t *testing.T, a, b string) { + data, err := diff([]byte(a), []byte(b)) + if err != nil { + t.Error(err) + return + } + t.Error(string(data)) +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go new file mode 100644 index 0000000..d54d375 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go @@ -0,0 +1,673 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/token" + "os" + "reflect" + "strings" +) + +// Partial type checker. +// +// The fact that it is partial is very important: the input is +// an AST and a description of some type information to +// assume about one or more packages, but not all the +// packages that the program imports. The checker is +// expected to do as much as it can with what it has been +// given. There is not enough information supplied to do +// a full type check, but the type checker is expected to +// apply information that can be derived from variable +// declarations, function and method returns, and type switches +// as far as it can, so that the caller can still tell the types +// of expression relevant to a particular fix. +// +// TODO(rsc,gri): Replace with go/typechecker. +// Doing that could be an interesting test case for go/typechecker: +// the constraints about working with partial information will +// likely exercise it in interesting ways. The ideal interface would +// be to pass typecheck a map from importpath to package API text +// (Go source code), but for now we use data structures (TypeConfig, Type). +// +// The strings mostly use gofmt form. +// +// A Field or FieldList has as its type a comma-separated list +// of the types of the fields. For example, the field list +// x, y, z int +// has type "int, int, int". + +// The prefix "type " is the type of a type. +// For example, given +// var x int +// type T int +// x's type is "int" but T's type is "type int". +// mkType inserts the "type " prefix. +// getType removes it. +// isType tests for it. + +func mkType(t string) string { + return "type " + t +} + +func getType(t string) string { + if !isType(t) { + return "" + } + return t[len("type "):] +} + +func isType(t string) bool { + return strings.HasPrefix(t, "type ") +} + +// TypeConfig describes the universe of relevant types. +// For ease of creation, the types are all referred to by string +// name (e.g., "reflect.Value"). TypeByName is the only place +// where the strings are resolved. + +type TypeConfig struct { + Type map[string]*Type + Var map[string]string + Func map[string]string +} + +// typeof returns the type of the given name, which may be of +// the form "x" or "p.X". +func (cfg *TypeConfig) typeof(name string) string { + if cfg.Var != nil { + if t := cfg.Var[name]; t != "" { + return t + } + } + if cfg.Func != nil { + if t := cfg.Func[name]; t != "" { + return "func()" + t + } + } + return "" +} + +// Type describes the Fields and Methods of a type. +// If the field or method cannot be found there, it is next +// looked for in the Embed list. +type Type struct { + Field map[string]string // map field name to type + Method map[string]string // map method name to comma-separated return types (should start with "func ") + Embed []string // list of types this type embeds (for extra methods) + Def string // definition of named type +} + +// dot returns the type of "typ.name", making its decision +// using the type information in cfg. +func (typ *Type) dot(cfg *TypeConfig, name string) string { + if typ.Field != nil { + if t := typ.Field[name]; t != "" { + return t + } + } + if typ.Method != nil { + if t := typ.Method[name]; t != "" { + return t + } + } + + for _, e := range typ.Embed { + etyp := cfg.Type[e] + if etyp != nil { + if t := etyp.dot(cfg, name); t != "" { + return t + } + } + } + + return "" +} + +// typecheck type checks the AST f assuming the information in cfg. +// It returns two maps with type information: +// typeof maps AST nodes to type information in gofmt string form. +// assign maps type strings to lists of expressions that were assigned +// to values of another type that were assigned to that type. +func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, assign map[string][]interface{}) { + typeof = make(map[interface{}]string) + assign = make(map[string][]interface{}) + cfg1 := &TypeConfig{} + *cfg1 = *cfg // make copy so we can add locally + copied := false + + // gather function declarations + for _, decl := range f.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + typecheck1(cfg, fn.Type, typeof, assign) + t := typeof[fn.Type] + if fn.Recv != nil { + // The receiver must be a type. + rcvr := typeof[fn.Recv] + if !isType(rcvr) { + if len(fn.Recv.List) != 1 { + continue + } + rcvr = mkType(gofmt(fn.Recv.List[0].Type)) + typeof[fn.Recv.List[0].Type] = rcvr + } + rcvr = getType(rcvr) + if rcvr != "" && rcvr[0] == '*' { + rcvr = rcvr[1:] + } + typeof[rcvr+"."+fn.Name.Name] = t + } else { + if isType(t) { + t = getType(t) + } else { + t = gofmt(fn.Type) + } + typeof[fn.Name] = t + + // Record typeof[fn.Name.Obj] for future references to fn.Name. + typeof[fn.Name.Obj] = t + } + } + + // gather struct declarations + for _, decl := range f.Decls { + d, ok := decl.(*ast.GenDecl) + if ok { + for _, s := range d.Specs { + switch s := s.(type) { + case *ast.TypeSpec: + if cfg1.Type[s.Name.Name] != nil { + break + } + if !copied { + copied = true + // Copy map lazily: it's time. + cfg1.Type = make(map[string]*Type) + for k, v := range cfg.Type { + cfg1.Type[k] = v + } + } + t := &Type{Field: map[string]string{}} + cfg1.Type[s.Name.Name] = t + switch st := s.Type.(type) { + case *ast.StructType: + for _, f := range st.Fields.List { + for _, n := range f.Names { + t.Field[n.Name] = gofmt(f.Type) + } + } + case *ast.ArrayType, *ast.StarExpr, *ast.MapType: + t.Def = gofmt(st) + } + } + } + } + } + + typecheck1(cfg1, f, typeof, assign) + return typeof, assign +} + +func makeExprList(a []*ast.Ident) []ast.Expr { + var b []ast.Expr + for _, x := range a { + b = append(b, x) + } + return b +} + +// Typecheck1 is the recursive form of typecheck. +// It is like typecheck but adds to the information in typeof +// instead of allocating a new map. +func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, assign map[string][]interface{}) { + // set sets the type of n to typ. + // If isDecl is true, n is being declared. + set := func(n ast.Expr, typ string, isDecl bool) { + if typeof[n] != "" || typ == "" { + if typeof[n] != typ { + assign[typ] = append(assign[typ], n) + } + return + } + typeof[n] = typ + + // If we obtained typ from the declaration of x + // propagate the type to all the uses. + // The !isDecl case is a cheat here, but it makes + // up in some cases for not paying attention to + // struct fields. The real type checker will be + // more accurate so we won't need the cheat. + if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") { + typeof[id.Obj] = typ + } + } + + // Type-check an assignment lhs = rhs. + // If isDecl is true, this is := so we can update + // the types of the objects that lhs refers to. + typecheckAssign := func(lhs, rhs []ast.Expr, isDecl bool) { + if len(lhs) > 1 && len(rhs) == 1 { + if _, ok := rhs[0].(*ast.CallExpr); ok { + t := split(typeof[rhs[0]]) + // Lists should have same length but may not; pair what can be paired. + for i := 0; i < len(lhs) && i < len(t); i++ { + set(lhs[i], t[i], isDecl) + } + return + } + } + if len(lhs) == 1 && len(rhs) == 2 { + // x = y, ok + rhs = rhs[:1] + } else if len(lhs) == 2 && len(rhs) == 1 { + // x, ok = y + lhs = lhs[:1] + } + + // Match as much as we can. + for i := 0; i < len(lhs) && i < len(rhs); i++ { + x, y := lhs[i], rhs[i] + if typeof[y] != "" { + set(x, typeof[y], isDecl) + } else { + set(y, typeof[x], false) + } + } + } + + expand := func(s string) string { + typ := cfg.Type[s] + if typ != nil && typ.Def != "" { + return typ.Def + } + return s + } + + // The main type check is a recursive algorithm implemented + // by walkBeforeAfter(n, before, after). + // Most of it is bottom-up, but in a few places we need + // to know the type of the function we are checking. + // The before function records that information on + // the curfn stack. + var curfn []*ast.FuncType + + before := func(n interface{}) { + // push function type on stack + switch n := n.(type) { + case *ast.FuncDecl: + curfn = append(curfn, n.Type) + case *ast.FuncLit: + curfn = append(curfn, n.Type) + } + } + + // After is the real type checker. + after := func(n interface{}) { + if n == nil { + return + } + if false && reflect.TypeOf(n).Kind() == reflect.Ptr { // debugging trace + defer func() { + if t := typeof[n]; t != "" { + pos := fset.Position(n.(ast.Node).Pos()) + fmt.Fprintf(os.Stderr, "%s: typeof[%s] = %s\n", pos, gofmt(n), t) + } + }() + } + + switch n := n.(type) { + case *ast.FuncDecl, *ast.FuncLit: + // pop function type off stack + curfn = curfn[:len(curfn)-1] + + case *ast.FuncType: + typeof[n] = mkType(joinFunc(split(typeof[n.Params]), split(typeof[n.Results]))) + + case *ast.FieldList: + // Field list is concatenation of sub-lists. + t := "" + for _, field := range n.List { + if t != "" { + t += ", " + } + t += typeof[field] + } + typeof[n] = t + + case *ast.Field: + // Field is one instance of the type per name. + all := "" + t := typeof[n.Type] + if !isType(t) { + // Create a type, because it is typically *T or *p.T + // and we might care about that type. + t = mkType(gofmt(n.Type)) + typeof[n.Type] = t + } + t = getType(t) + if len(n.Names) == 0 { + all = t + } else { + for _, id := range n.Names { + if all != "" { + all += ", " + } + all += t + typeof[id.Obj] = t + typeof[id] = t + } + } + typeof[n] = all + + case *ast.ValueSpec: + // var declaration. Use type if present. + if n.Type != nil { + t := typeof[n.Type] + if !isType(t) { + t = mkType(gofmt(n.Type)) + typeof[n.Type] = t + } + t = getType(t) + for _, id := range n.Names { + set(id, t, true) + } + } + // Now treat same as assignment. + typecheckAssign(makeExprList(n.Names), n.Values, true) + + case *ast.AssignStmt: + typecheckAssign(n.Lhs, n.Rhs, n.Tok == token.DEFINE) + + case *ast.Ident: + // Identifier can take its type from underlying object. + if t := typeof[n.Obj]; t != "" { + typeof[n] = t + } + + case *ast.SelectorExpr: + // Field or method. + name := n.Sel.Name + if t := typeof[n.X]; t != "" { + if strings.HasPrefix(t, "*") { + t = t[1:] // implicit * + } + if typ := cfg.Type[t]; typ != nil { + if t := typ.dot(cfg, name); t != "" { + typeof[n] = t + return + } + } + tt := typeof[t+"."+name] + if isType(tt) { + typeof[n] = getType(tt) + return + } + } + // Package selector. + if x, ok := n.X.(*ast.Ident); ok && x.Obj == nil { + str := x.Name + "." + name + if cfg.Type[str] != nil { + typeof[n] = mkType(str) + return + } + if t := cfg.typeof(x.Name + "." + name); t != "" { + typeof[n] = t + return + } + } + + case *ast.CallExpr: + // make(T) has type T. + if isTopName(n.Fun, "make") && len(n.Args) >= 1 { + typeof[n] = gofmt(n.Args[0]) + return + } + // new(T) has type *T + if isTopName(n.Fun, "new") && len(n.Args) == 1 { + typeof[n] = "*" + gofmt(n.Args[0]) + return + } + // Otherwise, use type of function to determine arguments. + t := typeof[n.Fun] + in, out := splitFunc(t) + if in == nil && out == nil { + return + } + typeof[n] = join(out) + for i, arg := range n.Args { + if i >= len(in) { + break + } + if typeof[arg] == "" { + typeof[arg] = in[i] + } + } + + case *ast.TypeAssertExpr: + // x.(type) has type of x. + if n.Type == nil { + typeof[n] = typeof[n.X] + return + } + // x.(T) has type T. + if t := typeof[n.Type]; isType(t) { + typeof[n] = getType(t) + } else { + typeof[n] = gofmt(n.Type) + } + + case *ast.SliceExpr: + // x[i:j] has type of x. + typeof[n] = typeof[n.X] + + case *ast.IndexExpr: + // x[i] has key type of x's type. + t := expand(typeof[n.X]) + if strings.HasPrefix(t, "[") || strings.HasPrefix(t, "map[") { + // Lazy: assume there are no nested [] in the array + // length or map key type. + if i := strings.Index(t, "]"); i >= 0 { + typeof[n] = t[i+1:] + } + } + + case *ast.StarExpr: + // *x for x of type *T has type T when x is an expr. + // We don't use the result when *x is a type, but + // compute it anyway. + t := expand(typeof[n.X]) + if isType(t) { + typeof[n] = "type *" + getType(t) + } else if strings.HasPrefix(t, "*") { + typeof[n] = t[len("*"):] + } + + case *ast.UnaryExpr: + // &x for x of type T has type *T. + t := typeof[n.X] + if t != "" && n.Op == token.AND { + typeof[n] = "*" + t + } + + case *ast.CompositeLit: + // T{...} has type T. + typeof[n] = gofmt(n.Type) + + case *ast.ParenExpr: + // (x) has type of x. + typeof[n] = typeof[n.X] + + case *ast.RangeStmt: + t := expand(typeof[n.X]) + if t == "" { + return + } + var key, value string + if t == "string" { + key, value = "int", "rune" + } else if strings.HasPrefix(t, "[") { + key = "int" + if i := strings.Index(t, "]"); i >= 0 { + value = t[i+1:] + } + } else if strings.HasPrefix(t, "map[") { + if i := strings.Index(t, "]"); i >= 0 { + key, value = t[4:i], t[i+1:] + } + } + changed := false + if n.Key != nil && key != "" { + changed = true + set(n.Key, key, n.Tok == token.DEFINE) + } + if n.Value != nil && value != "" { + changed = true + set(n.Value, value, n.Tok == token.DEFINE) + } + // Ugly failure of vision: already type-checked body. + // Do it again now that we have that type info. + if changed { + typecheck1(cfg, n.Body, typeof, assign) + } + + case *ast.TypeSwitchStmt: + // Type of variable changes for each case in type switch, + // but go/parser generates just one variable. + // Repeat type check for each case with more precise + // type information. + as, ok := n.Assign.(*ast.AssignStmt) + if !ok { + return + } + varx, ok := as.Lhs[0].(*ast.Ident) + if !ok { + return + } + t := typeof[varx] + for _, cas := range n.Body.List { + cas := cas.(*ast.CaseClause) + if len(cas.List) == 1 { + // Variable has specific type only when there is + // exactly one type in the case list. + if tt := typeof[cas.List[0]]; isType(tt) { + tt = getType(tt) + typeof[varx] = tt + typeof[varx.Obj] = tt + typecheck1(cfg, cas.Body, typeof, assign) + } + } + } + // Restore t. + typeof[varx] = t + typeof[varx.Obj] = t + + case *ast.ReturnStmt: + if len(curfn) == 0 { + // Probably can't happen. + return + } + f := curfn[len(curfn)-1] + res := n.Results + if f.Results != nil { + t := split(typeof[f.Results]) + for i := 0; i < len(res) && i < len(t); i++ { + set(res[i], t[i], false) + } + } + } + } + walkBeforeAfter(f, before, after) +} + +// Convert between function type strings and lists of types. +// Using strings makes this a little harder, but it makes +// a lot of the rest of the code easier. This will all go away +// when we can use go/typechecker directly. + +// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"]. +func splitFunc(s string) (in, out []string) { + if !strings.HasPrefix(s, "func(") { + return nil, nil + } + + i := len("func(") // index of beginning of 'in' arguments + nparen := 0 + for j := i; j < len(s); j++ { + switch s[j] { + case '(': + nparen++ + case ')': + nparen-- + if nparen < 0 { + // found end of parameter list + out := strings.TrimSpace(s[j+1:]) + if len(out) >= 2 && out[0] == '(' && out[len(out)-1] == ')' { + out = out[1 : len(out)-1] + } + return split(s[i:j]), split(out) + } + } + } + return nil, nil +} + +// joinFunc is the inverse of splitFunc. +func joinFunc(in, out []string) string { + outs := "" + if len(out) == 1 { + outs = " " + out[0] + } else if len(out) > 1 { + outs = " (" + join(out) + ")" + } + return "func(" + join(in) + ")" + outs +} + +// split splits "int, float" into ["int", "float"] and splits "" into []. +func split(s string) []string { + out := []string{} + i := 0 // current type being scanned is s[i:j]. + nparen := 0 + for j := 0; j < len(s); j++ { + switch s[j] { + case ' ': + if i == j { + i++ + } + case '(': + nparen++ + case ')': + nparen-- + if nparen < 0 { + // probably can't happen + return nil + } + case ',': + if nparen == 0 { + if i < j { + out = append(out, s[i:j]) + } + i = j + 1 + } + } + } + if nparen != 0 { + // probably can't happen + return nil + } + if i < len(s) { + out = append(out, s[i:]) + } + return out +} + +// join is the inverse of split. +func join(x []string) string { + return strings.Join(x, ", ") +} diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go new file mode 100644 index 0000000..9422e41 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/datastore.go @@ -0,0 +1,406 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/datastore" +) + +var ( + // ErrInvalidEntityType is returned when functions like Get or Next are + // passed a dst or src argument of invalid type. + ErrInvalidEntityType = errors.New("datastore: invalid entity type") + // ErrInvalidKey is returned when an invalid key is presented. + ErrInvalidKey = errors.New("datastore: invalid key") + // ErrNoSuchEntity is returned when no entity was found for a given key. + ErrNoSuchEntity = errors.New("datastore: no such entity") +) + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument +// passed to Get or to Iterator.Next. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +// protoToKey converts a Reference proto to a *Key. +func protoToKey(r *pb.Reference) (k *Key, err error) { + appID := r.GetApp() + namespace := r.GetNameSpace() + for _, e := range r.Path.Element { + k = &Key{ + kind: e.GetType(), + stringID: e.GetName(), + intID: e.GetId(), + parent: k, + appID: appID, + namespace: namespace, + } + if !k.valid() { + return nil, ErrInvalidKey + } + } + return +} + +// keyToProto converts a *Key to a Reference proto. +func keyToProto(defaultAppID string, k *Key) *pb.Reference { + appID := k.appID + if appID == "" { + appID = defaultAppID + } + n := 0 + for i := k; i != nil; i = i.parent { + n++ + } + e := make([]*pb.Path_Element, n) + for i := k; i != nil; i = i.parent { + n-- + e[n] = &pb.Path_Element{ + Type: &i.kind, + } + // At most one of {Name,Id} should be set. + // Neither will be set for incomplete keys. + if i.stringID != "" { + e[n].Name = &i.stringID + } else if i.intID != 0 { + e[n].Id = &i.intID + } + } + var namespace *string + if k.namespace != "" { + namespace = proto.String(k.namespace) + } + return &pb.Reference{ + App: proto.String(appID), + NameSpace: namespace, + Path: &pb.Path{ + Element: e, + }, + } +} + +// multiKeyToProto is a batch version of keyToProto. +func multiKeyToProto(appID string, key []*Key) []*pb.Reference { + ret := make([]*pb.Reference, len(key)) + for i, k := range key { + ret[i] = keyToProto(appID, k) + } + return ret +} + +// multiValid is a batch version of Key.valid. It returns an error, not a +// []bool. +func multiValid(key []*Key) error { + invalid := false + for _, k := range key { + if !k.valid() { + invalid = true + break + } + } + if !invalid { + return nil + } + err := make(appengine.MultiError, len(key)) + for i, k := range key { + if !k.valid() { + err[i] = ErrInvalidKey + } + } + return err +} + +// It's unfortunate that the two semantically equivalent concepts pb.Reference +// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the +// two have different protobuf field numbers. + +// referenceValueToKey is the same as protoToKey except the input is a +// PropertyValue_ReferenceValue instead of a Reference. +func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) { + appID := r.GetApp() + namespace := r.GetNameSpace() + for _, e := range r.Pathelement { + k = &Key{ + kind: e.GetType(), + stringID: e.GetName(), + intID: e.GetId(), + parent: k, + appID: appID, + namespace: namespace, + } + if !k.valid() { + return nil, ErrInvalidKey + } + } + return +} + +// keyToReferenceValue is the same as keyToProto except the output is a +// PropertyValue_ReferenceValue instead of a Reference. +func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue { + ref := keyToProto(defaultAppID, k) + pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element)) + for i, e := range ref.Path.Element { + pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{ + Type: e.Type, + Id: e.Id, + Name: e.Name, + } + } + return &pb.PropertyValue_ReferenceValue{ + App: ref.App, + NameSpace: ref.NameSpace, + Pathelement: pe, + } +} + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypePropertyLoadSaver + multiArgTypeStruct + multiArgTypeStructPtr + multiArgTypeInterface +) + +// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct +// type S, for some interface type I, or some non-interface non-pointer type P +// such that P or *P implements PropertyLoadSaver. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S, I or P. +// +// As a special case, PropertyList is an invalid type for v. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + if v.Type() == typeOfPropertyList { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { + return multiArgTypePropertyLoadSaver, elemType + } + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Interface: + return multiArgTypeInterface, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +// Get loads the entity stored for k into dst, which must be a struct pointer +// or implement PropertyLoadSaver. If there is no such entity for the key, Get +// returns ErrNoSuchEntity. +// +// The values of dst's unmatched struct fields are not modified, and matching +// slice-typed fields are not reset before appending to them. In particular, it +// is recommended to pass a pointer to a zero valued struct on each Get call. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. +func Get(c context.Context, key *Key, dst interface{}) error { + if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here + return ErrInvalidEntityType + } + err := GetMulti(c, []*Key{key}, []interface{}{dst}) + if me, ok := err.(appengine.MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +// +// dst must be a []S, []*S, []I or []P, for some struct type S, some interface +// type I, or some non-interface non-pointer type P such that P or *P +// implements PropertyLoadSaver. If an []I, each element must be a valid dst +// for Get: it must be a struct pointer or implement PropertyLoadSaver. +// +// As a special case, PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when []PropertyList was intended. +func GetMulti(c context.Context, key []*Key, dst interface{}) error { + v := reflect.ValueOf(dst) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return errors.New("datastore: dst has invalid type") + } + if len(key) != v.Len() { + return errors.New("datastore: key and dst slices have different length") + } + if len(key) == 0 { + return nil + } + if err := multiValid(key); err != nil { + return err + } + req := &pb.GetRequest{ + Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), + } + res := &pb.GetResponse{} + if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil { + return err + } + if len(key) != len(res.Entity) { + return errors.New("datastore: internal error: server returned the wrong number of entities") + } + multiErr, any := make(appengine.MultiError, len(key)), false + for i, e := range res.Entity { + if e.Entity == nil { + multiErr[i] = ErrNoSuchEntity + } else { + elem := v.Index(i) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + if multiArgType == multiArgTypeStructPtr && elem.IsNil() { + elem.Set(reflect.New(elem.Type().Elem())) + } + multiErr[i] = loadEntity(elem.Interface(), e.Entity) + } + if multiErr[i] != nil { + any = true + } + } + if any { + return multiErr + } + return nil +} + +// Put saves the entity src into the datastore with key k. src must be a struct +// pointer or implement PropertyLoadSaver; if a struct pointer then any +// unexported fields of that struct will be skipped. If k is an incomplete key, +// the returned key will be a unique key generated by the datastore. +func Put(c context.Context, key *Key, src interface{}) (*Key, error) { + k, err := PutMulti(c, []*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(appengine.MultiError); ok { + return nil, me[0] + } + return nil, err + } + return k[0], nil +} + +// PutMulti is a batch version of Put. +// +// src must satisfy the same conditions as the dst argument to GetMulti. +func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) { + v := reflect.ValueOf(src) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return nil, errors.New("datastore: src has invalid type") + } + if len(key) != v.Len() { + return nil, errors.New("datastore: key and src slices have different length") + } + if len(key) == 0 { + return nil, nil + } + appID := internal.FullyQualifiedAppID(c) + if err := multiValid(key); err != nil { + return nil, err + } + req := &pb.PutRequest{} + for i := range key { + elem := v.Index(i) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + sProto, err := saveEntity(appID, key[i], elem.Interface()) + if err != nil { + return nil, err + } + req.Entity = append(req.Entity, sProto) + } + res := &pb.PutResponse{} + if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil { + return nil, err + } + if len(key) != len(res.Key) { + return nil, errors.New("datastore: internal error: server returned the wrong number of keys") + } + ret := make([]*Key, len(key)) + for i := range ret { + var err error + ret[i], err = protoToKey(res.Key[i]) + if err != nil || ret[i].Incomplete() { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + } + return ret, nil +} + +// Delete deletes the entity for the given key. +func Delete(c context.Context, key *Key) error { + err := DeleteMulti(c, []*Key{key}) + if me, ok := err.(appengine.MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func DeleteMulti(c context.Context, key []*Key) error { + if len(key) == 0 { + return nil + } + if err := multiValid(key); err != nil { + return err + } + req := &pb.DeleteRequest{ + Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), + } + res := &pb.DeleteResponse{} + return internal.Call(c, "datastore_v3", "Delete", req, res) +} + +func namespaceMod(m proto.Message, namespace string) { + // pb.Query is the only type that has a name_space field. + // All other namespace support in datastore is in the keys. + switch m := m.(type) { + case *pb.Query: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + } +} + +func init() { + internal.NamespaceMods["datastore_v3"] = namespaceMod + internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name) + internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT)) +} diff --git a/vendor/google.golang.org/appengine/datastore/datastore_test.go b/vendor/google.golang.org/appengine/datastore/datastore_test.go new file mode 100644 index 0000000..b2856a9 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/datastore_test.go @@ -0,0 +1,1567 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "reflect" + "strings" + "testing" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/datastore" +) + +const testAppID = "testApp" + +type ( + myBlob []byte + myByte byte + myString string +) + +func makeMyByteSlice(n int) []myByte { + b := make([]myByte, n) + for i := range b { + b[i] = myByte(i) + } + return b +} + +func makeInt8Slice(n int) []int8 { + b := make([]int8, n) + for i := range b { + b[i] = int8(i) + } + return b +} + +func makeUint8Slice(n int) []uint8 { + b := make([]uint8, n) + for i := range b { + b[i] = uint8(i) + } + return b +} + +func newKey(stringID string, parent *Key) *Key { + return &Key{ + kind: "kind", + stringID: stringID, + intID: 0, + parent: parent, + appID: testAppID, + } +} + +var ( + testKey0 = newKey("name0", nil) + testKey1a = newKey("name1", nil) + testKey1b = newKey("name1", nil) + testKey2a = newKey("name2", testKey0) + testKey2b = newKey("name2", testKey0) + testGeoPt0 = appengine.GeoPoint{Lat: 1.2, Lng: 3.4} + testGeoPt1 = appengine.GeoPoint{Lat: 5, Lng: 10} + testBadGeoPt = appengine.GeoPoint{Lat: 1000, Lng: 34} +) + +type B0 struct { + B []byte +} + +type B1 struct { + B []int8 +} + +type B2 struct { + B myBlob +} + +type B3 struct { + B []myByte +} + +type B4 struct { + B [][]byte +} + +type B5 struct { + B ByteString +} + +type C0 struct { + I int + C chan int +} + +type C1 struct { + I int + C *chan int +} + +type C2 struct { + I int + C []chan int +} + +type C3 struct { + C string +} + +type E struct{} + +type G0 struct { + G appengine.GeoPoint +} + +type G1 struct { + G []appengine.GeoPoint +} + +type K0 struct { + K *Key +} + +type K1 struct { + K []*Key +} + +type N0 struct { + X0 + Nonymous X0 + Ignore string `datastore:"-"` + Other string +} + +type N1 struct { + X0 + Nonymous []X0 + Ignore string `datastore:"-"` + Other string +} + +type N2 struct { + N1 `datastore:"red"` + Green N1 `datastore:"green"` + Blue N1 + White N1 `datastore:"-"` +} + +type O0 struct { + I int64 +} + +type O1 struct { + I int32 +} + +type U0 struct { + U uint +} + +type U1 struct { + U string +} + +type T struct { + T time.Time +} + +type X0 struct { + S string + I int + i int +} + +type X1 struct { + S myString + I int32 + J int64 +} + +type X2 struct { + Z string + i int +} + +type X3 struct { + S bool + I int +} + +type Y0 struct { + B bool + F []float64 + G []float64 +} + +type Y1 struct { + B bool + F float64 +} + +type Y2 struct { + B bool + F []int64 +} + +type Tagged struct { + A int `datastore:"a,noindex"` + B []int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + // The "flatten" option is parsed but ignored for now. + F int `datastore:",noindex,flatten"` + G int `datastore:",flatten"` + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + + Y0 `datastore:"-"` + Z chan int `datastore:"-,"` +} + +type InvalidTagged1 struct { + I int `datastore:"\t"` +} + +type InvalidTagged2 struct { + I int + J int `datastore:"I"` +} + +type Inner1 struct { + W int32 + X string +} + +type Inner2 struct { + Y float64 +} + +type Inner3 struct { + Z bool +} + +type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 +} + +type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool +} + +type Dotted struct { + A DottedA `datastore:"A0.A1.A2"` +} + +type DottedA struct { + B DottedB `datastore:"B3"` +} + +type DottedB struct { + C int `datastore:"C4.C5"` +} + +type SliceOfSlices struct { + I int + S []struct { + J int + F []float64 + } +} + +type Recursive struct { + I int + R []Recursive +} + +type MutuallyRecursive0 struct { + I int + R []MutuallyRecursive1 +} + +type MutuallyRecursive1 struct { + I int + R []MutuallyRecursive0 +} + +type Doubler struct { + S string + I int64 + B bool +} + +func (d *Doubler) Load(props []Property) error { + return LoadStruct(d, props) +} + +func (d *Doubler) Save() ([]Property, error) { + // Save the default Property slice to an in-memory buffer (a PropertyList). + props, err := SaveStruct(d) + if err != nil { + return nil, err + } + var list PropertyList + if err := list.Load(props); err != nil { + return nil, err + } + + // Edit that PropertyList, and send it on. + for i := range list { + switch v := list[i].Value.(type) { + case string: + // + means string concatenation. + list[i].Value = v + v + case int64: + // + means integer addition. + list[i].Value = v + v + } + } + return list.Save() +} + +var _ PropertyLoadSaver = (*Doubler)(nil) + +type Deriver struct { + S, Derived, Ignored string +} + +func (e *Deriver) Load(props []Property) error { + for _, p := range props { + if p.Name != "S" { + continue + } + e.S = p.Value.(string) + e.Derived = "derived+" + e.S + } + return nil +} + +func (e *Deriver) Save() ([]Property, error) { + return []Property{ + { + Name: "S", + Value: e.S, + }, + }, nil +} + +var _ PropertyLoadSaver = (*Deriver)(nil) + +type BadMultiPropEntity struct{} + +func (e *BadMultiPropEntity) Load(props []Property) error { + return errors.New("unimplemented") +} + +func (e *BadMultiPropEntity) Save() ([]Property, error) { + // Write multiple properties with the same name "I", but Multiple is false. + var props []Property + for i := 0; i < 3; i++ { + props = append(props, Property{ + Name: "I", + Value: int64(i), + }) + } + return props, nil +} + +var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) + +type BK struct { + Key appengine.BlobKey +} + +type testCase struct { + desc string + src interface{} + want interface{} + putErr string + getErr string +} + +var testCases = []testCase{ + { + "chan save fails", + &C0{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "*chan save fails", + &C1{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "[]chan save fails", + &C2{I: -1, C: make([]chan int, 8)}, + &E{}, + "unsupported struct field", + "", + }, + { + "chan load fails", + &C3{C: "not a chan"}, + &C0{}, + "", + "type mismatch", + }, + { + "*chan load fails", + &C3{C: "not a *chan"}, + &C1{}, + "", + "type mismatch", + }, + { + "[]chan load fails", + &C3{C: "not a []chan"}, + &C2{}, + "", + "type mismatch", + }, + { + "empty struct", + &E{}, + &E{}, + "", + "", + }, + { + "geopoint", + &G0{G: testGeoPt0}, + &G0{G: testGeoPt0}, + "", + "", + }, + { + "geopoint invalid", + &G0{G: testBadGeoPt}, + &G0{}, + "invalid GeoPoint value", + "", + }, + { + "geopoint as props", + &G0{G: testGeoPt0}, + &PropertyList{ + Property{Name: "G", Value: testGeoPt0, NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "geopoint slice", + &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}}, + &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}}, + "", + "", + }, + { + "key", + &K0{K: testKey1a}, + &K0{K: testKey1b}, + "", + "", + }, + { + "key with parent", + &K0{K: testKey2a}, + &K0{K: testKey2b}, + "", + "", + }, + { + "nil key", + &K0{}, + &K0{}, + "", + "", + }, + { + "all nil keys in slice", + &K1{[]*Key{nil, nil}}, + &K1{[]*Key{nil, nil}}, + "", + "", + }, + { + "some nil keys in slice", + &K1{[]*Key{testKey1a, nil, testKey2a}}, + &K1{[]*Key{testKey1b, nil, testKey2b}}, + "", + "", + }, + { + "overflow", + &O0{I: 1 << 48}, + &O1{}, + "", + "overflow", + }, + { + "time", + &T{T: time.Unix(1e9, 0)}, + &T{T: time.Unix(1e9, 0)}, + "", + "", + }, + { + "time as props", + &T{T: time.Unix(1e9, 0)}, + &PropertyList{ + Property{Name: "T", Value: time.Unix(1e9, 0).UTC(), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "uint save", + &U0{U: 1}, + &U0{}, + "unsupported struct field", + "", + }, + { + "uint load", + &U1{U: "not a uint"}, + &U0{}, + "", + "type mismatch", + }, + { + "zero", + &X0{}, + &X0{}, + "", + "", + }, + { + "basic", + &X0{S: "one", I: 2, i: 3}, + &X0{S: "one", I: 2}, + "", + "", + }, + { + "save string/int load myString/int32", + &X0{S: "one", I: 2, i: 3}, + &X1{S: "one", I: 2}, + "", + "", + }, + { + "missing fields", + &X0{S: "one", I: 2, i: 3}, + &X2{}, + "", + "no such struct field", + }, + { + "save string load bool", + &X0{S: "one", I: 2, i: 3}, + &X3{I: 2}, + "", + "type mismatch", + }, + { + "basic slice", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y0{B: true, F: []float64{7, 8, 9}}, + "", + "", + }, + { + "save []float64 load float64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y1{B: true}, + "", + "requires a slice", + }, + { + "save []float64 load []int64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y2{B: true}, + "", + "type mismatch", + }, + { + "single slice is too long", + &Y0{F: make([]float64, maxIndexedProperties+1)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "two slices are too long", + &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "one slice and one scalar are too long", + &Y0{F: make([]float64, maxIndexedProperties), B: true}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "long blob", + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "long []int8 is too long", + &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, + &B1{}, + "too many indexed properties", + "", + }, + { + "short []int8", + &B1{B: makeInt8Slice(3)}, + &B1{B: makeInt8Slice(3)}, + "", + "", + }, + { + "long myBlob", + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short myBlob", + &B2{B: makeUint8Slice(3)}, + &B2{B: makeUint8Slice(3)}, + "", + "", + }, + { + "long []myByte", + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short []myByte", + &B3{B: makeMyByteSlice(3)}, + &B3{B: makeMyByteSlice(3)}, + "", + "", + }, + { + "slice of blobs", + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + "", + "", + }, + { + "short ByteString", + &B5{B: ByteString(makeUint8Slice(3))}, + &B5{B: ByteString(makeUint8Slice(3))}, + "", + "", + }, + { + "short ByteString as props", + &B5{B: ByteString(makeUint8Slice(3))}, + &PropertyList{ + Property{Name: "B", Value: ByteString(makeUint8Slice(3)), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "short ByteString into string", + &B5{B: ByteString("legacy")}, + &struct{ B string }{"legacy"}, + "", + "", + }, + { + "[]byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: makeUint8Slice(3), NoIndex: false}, + }, + nil, + "cannot index a []byte valued Property", + "", + }, + { + "save tagged load props", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, F: 6, G: 7, I: 8, J: 9}, + &PropertyList{ + // A and B are renamed to a and b; A and C are noindex, I is ignored. + // Indexed properties are loaded before raw properties. Thus, the + // result is: b, b, b, D, E, a, c. + Property{Name: "b", Value: int64(21), NoIndex: false, Multiple: true}, + Property{Name: "b", Value: int64(22), NoIndex: false, Multiple: true}, + Property{Name: "b", Value: int64(23), NoIndex: false, Multiple: true}, + Property{Name: "D", Value: int64(4), NoIndex: false, Multiple: false}, + Property{Name: "E", Value: int64(5), NoIndex: false, Multiple: false}, + Property{Name: "G", Value: int64(7), NoIndex: false, Multiple: false}, + Property{Name: "a", Value: int64(1), NoIndex: true, Multiple: false}, + Property{Name: "C", Value: int64(3), NoIndex: true, Multiple: false}, + Property{Name: "F", Value: int64(6), NoIndex: true, Multiple: false}, + Property{Name: "J", Value: int64(9), NoIndex: true, Multiple: false}, + }, + "", + "", + }, + { + "save tagged load tagged", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, + "", + "", + }, + { + "save props load tagged", + &PropertyList{ + Property{Name: "A", Value: int64(11), NoIndex: true, Multiple: false}, + Property{Name: "a", Value: int64(12), NoIndex: true, Multiple: false}, + }, + &Tagged{A: 12}, + "", + `cannot load field "A"`, + }, + { + "invalid tagged1", + &InvalidTagged1{I: 1}, + &InvalidTagged1{}, + "struct tag has invalid property name", + "", + }, + { + "invalid tagged2", + &InvalidTagged2{I: 1, J: 2}, + &InvalidTagged2{}, + "struct tag has repeated property name", + "", + }, + { + "doubler", + &Doubler{S: "s", I: 1, B: true}, + &Doubler{S: "ss", I: 2, B: true}, + "", + "", + }, + { + "save struct load props", + &X0{S: "s", I: 1}, + &PropertyList{ + Property{Name: "S", Value: "s", NoIndex: false, Multiple: false}, + Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "save props load struct", + &PropertyList{ + Property{Name: "S", Value: "s", NoIndex: false, Multiple: false}, + Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false}, + }, + &X0{S: "s", I: 1}, + "", + "", + }, + { + "nil-value props", + &PropertyList{ + Property{Name: "I", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "B", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "S", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "F", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "K", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "T", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "J", Value: nil, NoIndex: false, Multiple: true}, + Property{Name: "J", Value: int64(7), NoIndex: false, Multiple: true}, + Property{Name: "J", Value: nil, NoIndex: false, Multiple: true}, + }, + &struct { + I int64 + B bool + S string + F float64 + K *Key + T time.Time + J []int64 + }{ + J: []int64{0, 7, 0}, + }, + "", + "", + }, + { + "save outer load props", + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false}, + Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false}, + Property{Name: "Z", Value: true, NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "save props load outer-equivalent", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false}, + Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false}, + Property{Name: "Z", Value: true, NoIndex: false, Multiple: false}, + }, + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + "", + "", + }, + { + "save outer-equivalent load outer", + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + "", + "", + }, + { + "dotted names save", + &Dotted{A: DottedA{B: DottedB{C: 88}}}, + &PropertyList{ + Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "dotted names load", + &PropertyList{ + Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false, Multiple: false}, + }, + &Dotted{A: DottedA{B: DottedB{C: 99}}}, + "", + "", + }, + { + "save struct load deriver", + &X0{S: "s", I: 1}, + &Deriver{S: "s", Derived: "derived+s"}, + "", + "", + }, + { + "save deriver load struct", + &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, + &X0{S: "s"}, + "", + "", + }, + { + "bad multi-prop entity", + &BadMultiPropEntity{}, + &BadMultiPropEntity{}, + "Multiple is false", + "", + }, + // Regression: CL 25062824 broke handling of appengine.BlobKey fields. + { + "appengine.BlobKey", + &BK{Key: "blah"}, + &BK{Key: "blah"}, + "", + "", + }, + { + "zero time.Time", + &T{T: time.Time{}}, + &T{T: time.Time{}}, + "", + "", + }, + { + "time.Time near Unix zero time", + &T{T: time.Unix(0, 4e3)}, + &T{T: time.Unix(0, 4e3)}, + "", + "", + }, + { + "time.Time, far in the future", + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + "", + "", + }, + { + "time.Time, very far in the past", + &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "time.Time, very far in the future", + &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "structs", + &N0{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: X0{S: "four", I: 5, i: 6}, + Ignore: "ignore", + Other: "other", + }, + &N0{ + X0: X0{S: "one", I: 2}, + Nonymous: X0{S: "four", I: 5}, + Other: "other", + }, + "", + "", + }, + { + "slice of structs", + &N1{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: []X0{ + {S: "four", I: 5, i: 6}, + {S: "seven", I: 8, i: 9}, + {S: "ten", I: 11, i: 12}, + {S: "thirteen", I: 14, i: 15}, + }, + Ignore: "ignore", + Other: "other", + }, + &N1{ + X0: X0{S: "one", I: 2}, + Nonymous: []X0{ + {S: "four", I: 5}, + {S: "seven", I: 8}, + {S: "ten", I: 11}, + {S: "thirteen", I: 14}, + }, + Other: "other", + }, + "", + "", + }, + { + "structs with slices of structs", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + "", + "", + }, + { + "save structs load props", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &PropertyList{ + Property{Name: "red.S", Value: "rouge", NoIndex: false, Multiple: false}, + Property{Name: "red.I", Value: int64(0), NoIndex: false, Multiple: false}, + Property{Name: "red.Nonymous.S", Value: "rosso0", NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.S", Value: "rosso1", NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "red.Other", Value: "", NoIndex: false, Multiple: false}, + Property{Name: "green.S", Value: "vert", NoIndex: false, Multiple: false}, + Property{Name: "green.I", Value: int64(0), NoIndex: false, Multiple: false}, + Property{Name: "green.Nonymous.S", Value: "verde0", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.S", Value: "verde1", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.S", Value: "verde2", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Other", Value: "", NoIndex: false, Multiple: false}, + Property{Name: "Blue.S", Value: "bleu", NoIndex: false, Multiple: false}, + Property{Name: "Blue.I", Value: int64(0), NoIndex: false, Multiple: false}, + Property{Name: "Blue.Nonymous.S", Value: "blu0", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu1", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu2", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu3", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Other", Value: "", NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "save props load structs with ragged fields", + &PropertyList{ + Property{Name: "red.S", Value: "rot", NoIndex: false, Multiple: false}, + Property{Name: "green.Nonymous.I", Value: int64(10), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(11), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(12), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(13), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blau0", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(20), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blau1", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(21), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blau2", NoIndex: false, Multiple: true}, + }, + &N2{ + N1: N1{ + X0: X0{S: "rot"}, + }, + Green: N1{ + Nonymous: []X0{ + {I: 10}, + {I: 11}, + {I: 12}, + {I: 13}, + }, + }, + Blue: N1{ + Nonymous: []X0{ + {S: "blau0", I: 20}, + {S: "blau1", I: 21}, + {S: "blau2"}, + }, + }, + }, + "", + "", + }, + { + "save structs with noindex tags", + &struct { + A struct { + X string `datastore:",noindex"` + Y string + } `datastore:",noindex"` + B struct { + X string `datastore:",noindex"` + Y string + } + }{}, + &PropertyList{ + Property{Name: "B.Y", Value: "", NoIndex: false, Multiple: false}, + Property{Name: "A.X", Value: "", NoIndex: true, Multiple: false}, + Property{Name: "A.Y", Value: "", NoIndex: true, Multiple: false}, + Property{Name: "B.X", Value: "", NoIndex: true, Multiple: false}, + }, + "", + "", + }, + { + "embedded struct with name override", + &struct { + Inner1 `datastore:"foo"` + }{}, + &PropertyList{ + Property{Name: "foo.W", Value: int64(0), NoIndex: false, Multiple: false}, + Property{Name: "foo.X", Value: "", NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "slice of slices", + &SliceOfSlices{}, + nil, + "flattening nested structs leads to a slice of slices", + "", + }, + { + "recursive struct", + &Recursive{}, + nil, + "recursive struct", + "", + }, + { + "mutually recursive struct", + &MutuallyRecursive0{}, + nil, + "recursive struct", + "", + }, + { + "non-exported struct fields", + &struct { + i, J int64 + }{i: 1, J: 2}, + &PropertyList{ + Property{Name: "J", Value: int64(2), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "json.RawMessage", + &struct { + J json.RawMessage + }{ + J: json.RawMessage("rawr"), + }, + &PropertyList{ + Property{Name: "J", Value: []byte("rawr"), NoIndex: true, Multiple: false}, + }, + "", + "", + }, + { + "json.RawMessage to myBlob", + &struct { + B json.RawMessage + }{ + B: json.RawMessage("rawr"), + }, + &B2{B: myBlob("rawr")}, + "", + "", + }, +} + +// checkErr returns the empty string if either both want and err are zero, +// or if want is a non-empty substring of err's string representation. +func checkErr(want string, err error) string { + if err != nil { + got := err.Error() + if want == "" || strings.Index(got, want) == -1 { + return got + } + } else if want != "" { + return fmt.Sprintf("want error %q", want) + } + return "" +} + +func TestRoundTrip(t *testing.T) { + for _, tc := range testCases { + p, err := saveEntity(testAppID, testKey0, tc.src) + if s := checkErr(tc.putErr, err); s != "" { + t.Errorf("%s: save: %s", tc.desc, s) + continue + } + if p == nil { + continue + } + var got interface{} + if _, ok := tc.want.(*PropertyList); ok { + got = new(PropertyList) + } else { + got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + } + err = loadEntity(got, p) + if s := checkErr(tc.getErr, err); s != "" { + t.Errorf("%s: load: %s", tc.desc, s) + continue + } + equal := false + if gotT, ok := got.(*T); ok { + // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. + // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. + equal = gotT.T.Equal(tc.want.(*T).T) + } else { + equal = reflect.DeepEqual(got, tc.want) + } + if !equal { + t.Errorf("%s: compare: got %v want %v", tc.desc, got, tc.want) + continue + } + } +} + +func TestQueryConstruction(t *testing.T) { + tests := []struct { + q, exp *Query + err string + }{ + { + q: NewQuery("Foo"), + exp: &Query{ + kind: "Foo", + limit: -1, + }, + }, + { + // Regular filtered query with standard spacing. + q: NewQuery("Foo").Filter("foo >", 7), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterThan, + Value: 7, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with no spacing. + q: NewQuery("Foo").Filter("foo=", 6), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: equal, + Value: 6, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with funky spacing. + q: NewQuery("Foo").Filter(" foo< ", 8), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: lessThan, + Value: 8, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with multicharacter op. + q: NewQuery("Foo").Filter("foo >=", 9), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterEq, + Value: 9, + }, + }, + limit: -1, + }, + }, + { + // Query with ordering. + q: NewQuery("Foo").Order("bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: ascending, + }, + }, + limit: -1, + }, + }, + { + // Query with reverse ordering, and funky spacing. + q: NewQuery("Foo").Order(" - bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: descending, + }, + }, + limit: -1, + }, + }, + { + // Query with an empty ordering. + q: NewQuery("Foo").Order(""), + err: "empty order", + }, + { + // Query with a + ordering. + q: NewQuery("Foo").Order("+bar"), + err: "invalid order", + }, + } + for i, test := range tests { + if test.q.err != nil { + got := test.q.err.Error() + if !strings.Contains(got, test.err) { + t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) + } + continue + } + if !reflect.DeepEqual(test.q, test.exp) { + t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) + } + } +} + +func TestStringMeaning(t *testing.T) { + var xx [4]interface{} + xx[0] = &struct { + X string + }{"xx0"} + xx[1] = &struct { + X string `datastore:",noindex"` + }{"xx1"} + xx[2] = &struct { + X []byte + }{[]byte("xx2")} + xx[3] = &struct { + X []byte `datastore:",noindex"` + }{[]byte("xx3")} + + indexed := [4]bool{ + true, + false, + false, // A []byte is always no-index. + false, + } + want := [4]pb.Property_Meaning{ + pb.Property_NO_MEANING, + pb.Property_TEXT, + pb.Property_BLOB, + pb.Property_BLOB, + } + + for i, x := range xx { + props, err := SaveStruct(x) + if err != nil { + t.Errorf("i=%d: SaveStruct: %v", i, err) + continue + } + e, err := propertiesToProto("appID", testKey0, props) + if err != nil { + t.Errorf("i=%d: propertiesToProto: %v", i, err) + continue + } + var p *pb.Property + switch { + case indexed[i] && len(e.Property) == 1: + p = e.Property[0] + case !indexed[i] && len(e.RawProperty) == 1: + p = e.RawProperty[0] + default: + t.Errorf("i=%d: EntityProto did not have expected property slice", i) + continue + } + if got := p.GetMeaning(); got != want[i] { + t.Errorf("i=%d: meaning: got %v, want %v", i, got, want[i]) + continue + } + } +} + +func TestNamespaceResetting(t *testing.T) { + // These environment variables are necessary because *Query.Run will + // call internal.FullyQualifiedAppID which checks these variables or falls + // back to the Metadata service that is not available in tests. + environ := []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_PARTITION", "1"}, + } + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + defer func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + }() + + namec := make(chan *string, 1) + c0 := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(req *pb.Query, res *pb.QueryResult) error { + namec <- req.NameSpace + return fmt.Errorf("RPC error") + }) + + // Check that wrapping c0 in a namespace twice works correctly. + c1, err := appengine.Namespace(c0, "A") + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + c2, err := appengine.Namespace(c1, "") // should act as the original context + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + + q := NewQuery("SomeKind") + + q.Run(c0) + if ns := <-namec; ns != nil { + t.Errorf(`RunQuery with c0: ns = %q, want nil`, *ns) + } + + q.Run(c1) + if ns := <-namec; ns == nil { + t.Error(`RunQuery with c1: ns = nil, want "A"`) + } else if *ns != "A" { + t.Errorf(`RunQuery with c1: ns = %q, want "A"`, *ns) + } + + q.Run(c2) + if ns := <-namec; ns != nil { + t.Errorf(`RunQuery with c2: ns = %q, want nil`, *ns) + } +} diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go new file mode 100644 index 0000000..92ffe6d --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/doc.go @@ -0,0 +1,351 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package datastore provides a client for App Engine's datastore service. + + +Basic Operations + +Entities are the unit of storage and are associated with a key. A key +consists of an optional parent key, a string application ID, a string kind +(also known as an entity type), and either a StringID or an IntID. A +StringID is also known as an entity name or key name. + +It is valid to create a key with a zero StringID and a zero IntID; this is +called an incomplete key, and does not refer to any saved entity. Putting an +entity into the datastore under an incomplete key will cause a unique key +to be generated for that entity, with a non-zero IntID. + +An entity's contents are a mapping from case-sensitive field names to values. +Valid value types are: + - signed integers (int, int8, int16, int32 and int64), + - bool, + - string, + - float32 and float64, + - []byte (up to 1 megabyte in length), + - any type whose underlying type is one of the above predeclared types, + - ByteString, + - *Key, + - time.Time (stored with microsecond precision), + - appengine.BlobKey, + - appengine.GeoPoint, + - structs whose fields are all valid value types, + - slices of any of the above. + +Slices of structs are valid, as are structs that contain slices. However, if +one struct contains another, then at most one of those can be repeated. This +disqualifies recursively defined struct types: any struct T that (directly or +indirectly) contains a []T. + +The Get and Put functions load and save an entity's contents. An entity's +contents are typically represented by a struct pointer. + +Example code: + + type Entity struct { + Value string + } + + func handle(w http.ResponseWriter, r *http.Request) { + ctx := appengine.NewContext(r) + + k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil) + e := new(Entity) + if err := datastore.Get(ctx, k, e); err != nil { + http.Error(w, err.Error(), 500) + return + } + + old := e.Value + e.Value = r.URL.Path + + if _, err := datastore.Put(ctx, k, e); err != nil { + http.Error(w, err.Error(), 500) + return + } + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value) + } + +GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and +Delete functions. They take a []*Key instead of a *Key, and may return an +appengine.MultiError when encountering partial failure. + + +Properties + +An entity's contents can be represented by a variety of types. These are +typically struct pointers, but can also be any type that implements the +PropertyLoadSaver interface. If using a struct pointer, you do not have to +explicitly implement the PropertyLoadSaver interface; the datastore will +automatically convert via reflection. If a struct pointer does implement that +interface then those methods will be used in preference to the default +behavior for struct pointers. Struct pointers are more strongly typed and are +easier to use; PropertyLoadSavers are more flexible. + +The actual types passed do not have to match between Get and Put calls or even +across different App Engine requests. It is valid to put a *PropertyList and +get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. +Conceptually, any entity is saved as a sequence of properties, and is loaded +into the destination value on a property-by-property basis. When loading into +a struct pointer, an entity that cannot be completely represented (such as a +missing field) will result in an ErrFieldMismatch error but it is up to the +caller whether this error is fatal, recoverable or ignorable. + +By default, for struct pointers, all properties are potentially indexed, and +the property name is the same as the field name (and hence must start with an +upper case letter). Fields may have a `datastore:"name,options"` tag. The tag +name is the property name, which must be one or more valid Go identifiers +joined by ".", but may start with a lower case letter. An empty tag name means +to just use the field name. A "-" tag name means that the datastore will +ignore that field. If options is "noindex" then the field will not be indexed. +If the options is "" then the comma may be omitted. There are no other +recognized options. + +Fields (except for []byte) are indexed by default. Strings longer than 1500 +bytes cannot be indexed; fields used to store long strings should be +tagged with "noindex". Similarly, ByteStrings longer than 1500 bytes cannot be +indexed. + +Example code: + + // A and B are renamed to a and b. + // A, C and J are not indexed. + // D's tag is equivalent to having no tag at all (E). + // I is ignored entirely by the datastore. + // J has tag information for both the datastore and json packages. + type TaggedStruct struct { + A int `datastore:"a,noindex"` + B int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + } + + +Structured Properties + +If the struct pointed to contains other structs, then the nested or embedded +structs are flattened. For example, given these definitions: + + type Inner1 struct { + W int32 + X string + } + + type Inner2 struct { + Y float64 + } + + type Inner3 struct { + Z bool + } + + type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 + } + +then an Outer's properties would be equivalent to those of: + + type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool + } + +If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the +equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`. + +If an outer struct is tagged "noindex" then all of its implicit flattened +fields are effectively "noindex". + + +The PropertyLoadSaver Interface + +An entity's contents can also be represented by any type that implements the +PropertyLoadSaver interface. This type may be a struct pointer, but it does +not have to be. The datastore package will call Load when getting the entity's +contents, and Save when putting the entity's contents. +Possible uses include deriving non-stored fields, verifying fields, or indexing +a field only if its value is positive. + +Example code: + + type CustomPropsExample struct { + I, J int + // Sum is not stored, but should always be equal to I + J. + Sum int `datastore:"-"` + } + + func (x *CustomPropsExample) Load(ps []datastore.Property) error { + // Load I and J as usual. + if err := datastore.LoadStruct(x, ps); err != nil { + return err + } + // Derive the Sum field. + x.Sum = x.I + x.J + return nil + } + + func (x *CustomPropsExample) Save() ([]datastore.Property, error) { + // Validate the Sum field. + if x.Sum != x.I + x.J { + return errors.New("CustomPropsExample has inconsistent sum") + } + // Save I and J as usual. The code below is equivalent to calling + // "return datastore.SaveStruct(x)", but is done manually for + // demonstration purposes. + return []datastore.Property{ + { + Name: "I", + Value: int64(x.I), + }, + { + Name: "J", + Value: int64(x.J), + }, + } + } + +The *PropertyList type implements PropertyLoadSaver, and can therefore hold an +arbitrary entity's contents. + + +Queries + +Queries retrieve entities based on their properties or key's ancestry. Running +a query yields an iterator of results: either keys or (key, entity) pairs. +Queries are re-usable and it is safe to call Query.Run from concurrent +goroutines. Iterators are not safe for concurrent use. + +Queries are immutable, and are either created by calling NewQuery, or derived +from an existing query by calling a method like Filter or Order that returns a +new query value. A query is typically constructed by calling NewQuery followed +by a chain of zero or more such methods. These methods are: + - Ancestor and Filter constrain the entities returned by running a query. + - Order affects the order in which they are returned. + - Project constrains the fields returned. + - Distinct de-duplicates projected entities. + - KeysOnly makes the iterator return only keys, not (key, entity) pairs. + - Start, End, Offset and Limit define which sub-sequence of matching entities + to return. Start and End take cursors, Offset and Limit take integers. Start + and Offset affect the first result, End and Limit affect the last result. + If both Start and Offset are set, then the offset is relative to Start. + If both End and Limit are set, then the earliest constraint wins. Limit is + relative to Start+Offset, not relative to End. As a special case, a + negative limit means unlimited. + +Example code: + + type Widget struct { + Description string + Price int + } + + func handle(w http.ResponseWriter, r *http.Request) { + ctx := appengine.NewContext(r) + q := datastore.NewQuery("Widget"). + Filter("Price <", 1000). + Order("-Price") + b := new(bytes.Buffer) + for t := q.Run(ctx); ; { + var x Widget + key, err := t.Next(&x) + if err == datastore.Done { + break + } + if err != nil { + serveError(ctx, w, err) + return + } + fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x) + } + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + io.Copy(w, b) + } + + +Transactions + +RunInTransaction runs a function in a transaction. + +Example code: + + type Counter struct { + Count int + } + + func inc(ctx context.Context, key *datastore.Key) (int, error) { + var x Counter + if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return 0, err + } + x.Count++ + if _, err := datastore.Put(ctx, key, &x); err != nil { + return 0, err + } + return x.Count, nil + } + + func handle(w http.ResponseWriter, r *http.Request) { + ctx := appengine.NewContext(r) + var count int + err := datastore.RunInTransaction(ctx, func(ctx context.Context) error { + var err1 error + count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil)) + return err1 + }, nil) + if err != nil { + serveError(ctx, w, err) + return + } + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "Count=%d", count) + } + + +Metadata + +The datastore package provides access to some of App Engine's datastore +metadata. This metadata includes information about the entity groups, +namespaces, entity kinds, and properties in the datastore, as well as the +property representations for each property. + +Example code: + + func handle(w http.ResponseWriter, r *http.Request) { + // Print all the kinds in the datastore, with all the indexed + // properties (and their representations) for each. + ctx := appengine.NewContext(r) + + kinds, err := datastore.Kinds(ctx) + if err != nil { + serveError(ctx, w, err) + return + } + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + for _, kind := range kinds { + fmt.Fprintf(w, "%s:\n", kind) + props, err := datastore.KindProperties(ctx, kind) + if err != nil { + fmt.Fprintln(w, "\t(unable to retrieve properties)") + continue + } + for p, rep := range props { + fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(", ", rep)) + } + } + } +*/ +package datastore // import "google.golang.org/appengine/datastore" diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go new file mode 100644 index 0000000..ac1f002 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/key.go @@ -0,0 +1,309 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "bytes" + "encoding/base64" + "encoding/gob" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/datastore" +) + +// Key represents the datastore key for a stored entity, and is immutable. +type Key struct { + kind string + stringID string + intID int64 + parent *Key + appID string + namespace string +} + +// Kind returns the key's kind (also known as entity type). +func (k *Key) Kind() string { + return k.kind +} + +// StringID returns the key's string ID (also known as an entity name or key +// name), which may be "". +func (k *Key) StringID() string { + return k.stringID +} + +// IntID returns the key's integer ID, which may be 0. +func (k *Key) IntID() int64 { + return k.intID +} + +// Parent returns the key's parent key, which may be nil. +func (k *Key) Parent() *Key { + return k.parent +} + +// AppID returns the key's application ID. +func (k *Key) AppID() string { + return k.appID +} + +// Namespace returns the key's namespace. +func (k *Key) Namespace() string { + return k.namespace +} + +// Incomplete returns whether the key does not refer to a stored entity. +// In particular, whether the key has a zero StringID and a zero IntID. +func (k *Key) Incomplete() bool { + return k.stringID == "" && k.intID == 0 +} + +// valid returns whether the key is valid. +func (k *Key) valid() bool { + if k == nil { + return false + } + for ; k != nil; k = k.parent { + if k.kind == "" || k.appID == "" { + return false + } + if k.stringID != "" && k.intID != 0 { + return false + } + if k.parent != nil { + if k.parent.Incomplete() { + return false + } + if k.parent.appID != k.appID || k.parent.namespace != k.namespace { + return false + } + } + } + return true +} + +// Equal returns whether two keys are equal. +func (k *Key) Equal(o *Key) bool { + for k != nil && o != nil { + if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace { + return false + } + k, o = k.parent, o.parent + } + return k == o +} + +// root returns the furthest ancestor of a key, which may be itself. +func (k *Key) root() *Key { + for k.parent != nil { + k = k.parent + } + return k +} + +// marshal marshals the key's string representation to the buffer. +func (k *Key) marshal(b *bytes.Buffer) { + if k.parent != nil { + k.parent.marshal(b) + } + b.WriteByte('/') + b.WriteString(k.kind) + b.WriteByte(',') + if k.stringID != "" { + b.WriteString(k.stringID) + } else { + b.WriteString(strconv.FormatInt(k.intID, 10)) + } +} + +// String returns a string representation of the key. +func (k *Key) String() string { + if k == nil { + return "" + } + b := bytes.NewBuffer(make([]byte, 0, 512)) + k.marshal(b) + return b.String() +} + +type gobKey struct { + Kind string + StringID string + IntID int64 + Parent *gobKey + AppID string + Namespace string +} + +func keyToGobKey(k *Key) *gobKey { + if k == nil { + return nil + } + return &gobKey{ + Kind: k.kind, + StringID: k.stringID, + IntID: k.intID, + Parent: keyToGobKey(k.parent), + AppID: k.appID, + Namespace: k.namespace, + } +} + +func gobKeyToKey(gk *gobKey) *Key { + if gk == nil { + return nil + } + return &Key{ + kind: gk.Kind, + stringID: gk.StringID, + intID: gk.IntID, + parent: gobKeyToKey(gk.Parent), + appID: gk.AppID, + namespace: gk.Namespace, + } +} + +func (k *Key) GobEncode() ([]byte, error) { + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (k *Key) GobDecode(buf []byte) error { + gk := new(gobKey) + if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { + return err + } + *k = *gobKeyToKey(gk) + return nil +} + +func (k *Key) MarshalJSON() ([]byte, error) { + return []byte(`"` + k.Encode() + `"`), nil +} + +func (k *Key) UnmarshalJSON(buf []byte) error { + if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { + return errors.New("datastore: bad JSON key") + } + k2, err := DecodeKey(string(buf[1 : len(buf)-1])) + if err != nil { + return err + } + *k = *k2 + return nil +} + +// Encode returns an opaque representation of the key +// suitable for use in HTML and URLs. +// This is compatible with the Python and Java runtimes. +func (k *Key) Encode() string { + ref := keyToProto("", k) + + b, err := proto.Marshal(ref) + if err != nil { + panic(err) + } + + // Trailing padding is stripped. + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// DecodeKey decodes a key from the opaque representation returned by Encode. +func DecodeKey(encoded string) (*Key, error) { + // Re-add padding. + if m := len(encoded) % 4; m != 0 { + encoded += strings.Repeat("=", 4-m) + } + + b, err := base64.URLEncoding.DecodeString(encoded) + if err != nil { + return nil, err + } + + ref := new(pb.Reference) + if err := proto.Unmarshal(b, ref); err != nil { + return nil, err + } + + return protoToKey(ref) +} + +// NewIncompleteKey creates a new incomplete key. +// kind cannot be empty. +func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key { + return NewKey(c, kind, "", 0, parent) +} + +// NewKey creates a new key. +// kind cannot be empty. +// Either one or both of stringID and intID must be zero. If both are zero, +// the key returned is incomplete. +// parent must either be a complete key or nil. +func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key { + // If there's a parent key, use its namespace. + // Otherwise, use any namespace attached to the context. + var namespace string + if parent != nil { + namespace = parent.namespace + } else { + namespace = internal.NamespaceFromContext(c) + } + + return &Key{ + kind: kind, + stringID: stringID, + intID: intID, + parent: parent, + appID: internal.FullyQualifiedAppID(c), + namespace: namespace, + } +} + +// AllocateIDs returns a range of n integer IDs with the given kind and parent +// combination. kind cannot be empty; parent may be nil. The IDs in the range +// returned will not be used by the datastore's automatic ID sequence generator +// and may be used with NewKey without conflict. +// +// The range is inclusive at the low end and exclusive at the high end. In +// other words, valid intIDs x satisfy low <= x && x < high. +// +// If no error is returned, low + n == high. +func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) { + if kind == "" { + return 0, 0, errors.New("datastore: AllocateIDs given an empty kind") + } + if n < 0 { + return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n) + } + if n == 0 { + return 0, 0, nil + } + req := &pb.AllocateIdsRequest{ + ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)), + Size: proto.Int64(int64(n)), + } + res := &pb.AllocateIdsResponse{} + if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil { + return 0, 0, err + } + // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops) + // is inclusive at the low end and exclusive at the high end, so we add 1. + low = res.GetStart() + high = res.GetEnd() + 1 + if low+int64(n) != high { + return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n) + } + return low, high, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/key_test.go b/vendor/google.golang.org/appengine/datastore/key_test.go new file mode 100644 index 0000000..1fb3e97 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/key_test.go @@ -0,0 +1,204 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "testing" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +func TestKeyEncoding(t *testing.T) { + testCases := []struct { + desc string + key *Key + exp string + }{ + { + desc: "A simple key with an int ID", + key: &Key{ + kind: "Person", + intID: 1, + appID: "glibrary", + }, + exp: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM", + }, + { + desc: "A simple key with a string ID", + key: &Key{ + kind: "Graph", + stringID: "graph:7-day-active", + appID: "glibrary", + }, + exp: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw", + }, + { + desc: "A key with a parent", + key: &Key{ + kind: "WordIndex", + intID: 1033, + parent: &Key{ + kind: "WordIndex", + intID: 1020032, + appID: "glibrary", + }, + appID: "glibrary", + }, + exp: "aghnbGlicmFyeXIhCxIJV29yZEluZGV4GIChPgwLEglXb3JkSW5kZXgYiQgM", + }, + } + for _, tc := range testCases { + enc := tc.key.Encode() + if enc != tc.exp { + t.Errorf("%s: got %q, want %q", tc.desc, enc, tc.exp) + } + + key, err := DecodeKey(tc.exp) + if err != nil { + t.Errorf("%s: failed decoding key: %v", tc.desc, err) + continue + } + if !key.Equal(tc.key) { + t.Errorf("%s: decoded key %v, want %v", tc.desc, key, tc.key) + } + } +} + +func TestKeyGob(t *testing.T) { + k := &Key{ + kind: "Gopher", + intID: 3, + parent: &Key{ + kind: "Mom", + stringID: "narwhal", + appID: "gopher-con", + }, + appID: "gopher-con", + } + + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(k); err != nil { + t.Fatalf("gob encode failed: %v", err) + } + + k2 := new(Key) + if err := gob.NewDecoder(buf).Decode(k2); err != nil { + t.Fatalf("gob decode failed: %v", err) + } + if !k2.Equal(k) { + t.Errorf("gob round trip of %v produced %v", k, k2) + } +} + +func TestNilKeyGob(t *testing.T) { + type S struct { + Key *Key + } + s1 := new(S) + + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(s1); err != nil { + t.Fatalf("gob encode failed: %v", err) + } + + s2 := new(S) + if err := gob.NewDecoder(buf).Decode(s2); err != nil { + t.Fatalf("gob decode failed: %v", err) + } + if s2.Key != nil { + t.Errorf("gob round trip of nil key produced %v", s2.Key) + } +} + +func TestKeyJSON(t *testing.T) { + k := &Key{ + kind: "Gopher", + intID: 2, + parent: &Key{ + kind: "Mom", + stringID: "narwhal", + appID: "gopher-con", + }, + appID: "gopher-con", + } + exp := `"` + k.Encode() + `"` + + buf, err := json.Marshal(k) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + if s := string(buf); s != exp { + t.Errorf("JSON encoding of key %v: got %q, want %q", k, s, exp) + } + + k2 := new(Key) + if err := json.Unmarshal(buf, k2); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !k2.Equal(k) { + t.Errorf("JSON round trip of %v produced %v", k, k2) + } +} + +func TestNilKeyJSON(t *testing.T) { + type S struct { + Key *Key + } + s1 := new(S) + + buf, err := json.Marshal(s1) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + + s2 := new(S) + if err := json.Unmarshal(buf, s2); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if s2.Key != nil { + t.Errorf("JSON round trip of nil key produced %v", s2.Key) + } +} + +func TestIncompleteKeyWithParent(t *testing.T) { + c := internal.WithAppIDOverride(context.Background(), "s~some-app") + + // fadduh is a complete key. + fadduh := NewKey(c, "Person", "", 1, nil) + if fadduh.Incomplete() { + t.Fatalf("fadduh is incomplete") + } + + // robert is an incomplete key with fadduh as a parent. + robert := NewIncompleteKey(c, "Person", fadduh) + if !robert.Incomplete() { + t.Fatalf("robert is complete") + } + + // Both should be valid keys. + if !fadduh.valid() { + t.Errorf("fadduh is invalid: %v", fadduh) + } + if !robert.valid() { + t.Errorf("robert is invalid: %v", robert) + } +} + +func TestNamespace(t *testing.T) { + key := &Key{ + kind: "Person", + intID: 1, + appID: "s~some-app", + namespace: "mynamespace", + } + if g, w := key.Namespace(), "mynamespace"; g != w { + t.Errorf("key.Namespace() = %q, want %q", g, w) + } +} diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go new file mode 100644 index 0000000..3f3c80c --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/load.go @@ -0,0 +1,334 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "fmt" + "reflect" + "time" + + "google.golang.org/appengine" + pb "google.golang.org/appengine/internal/datastore" +) + +var ( + typeOfBlobKey = reflect.TypeOf(appengine.BlobKey("")) + typeOfByteSlice = reflect.TypeOf([]byte(nil)) + typeOfByteString = reflect.TypeOf(ByteString(nil)) + typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{}) + typeOfTime = reflect.TypeOf(time.Time{}) +) + +// typeMismatchReason returns a string explaining why the property p could not +// be stored in an entity field of type v.Type(). +func typeMismatchReason(p Property, v reflect.Value) string { + entityType := "empty" + switch p.Value.(type) { + case int64: + entityType = "int" + case bool: + entityType = "bool" + case string: + entityType = "string" + case float64: + entityType = "float" + case *Key: + entityType = "*datastore.Key" + case time.Time: + entityType = "time.Time" + case appengine.BlobKey: + entityType = "appengine.BlobKey" + case appengine.GeoPoint: + entityType = "appengine.GeoPoint" + case ByteString: + entityType = "datastore.ByteString" + case []byte: + entityType = "[]byte" + } + return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) +} + +type propertyLoader struct { + // m holds the number of times a substruct field like "Foo.Bar.Baz" has + // been seen so far. The map is constructed lazily. + m map[string]int +} + +func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string { + var v reflect.Value + // Traverse a struct's struct-typed fields. + for name := p.Name; ; { + decoder, ok := codec.byName[name] + if !ok { + return "no such struct field" + } + v = structValue.Field(decoder.index) + if !v.IsValid() { + return "no such struct field" + } + if !v.CanSet() { + return "cannot set struct field" + } + + if decoder.substructCodec == nil { + break + } + + if v.Kind() == reflect.Slice { + if l.m == nil { + l.m = make(map[string]int) + } + index := l.m[p.Name] + l.m[p.Name] = index + 1 + for v.Len() <= index { + v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) + } + structValue = v.Index(index) + requireSlice = false + } else { + structValue = v + } + // Strip the "I." from "I.X". + name = name[len(codec.byIndex[decoder.index].name):] + codec = decoder.substructCodec + } + + var slice reflect.Value + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + slice = v + v = reflect.New(v.Type().Elem()).Elem() + } else if requireSlice { + return "multiple-valued property requires a slice field type" + } + + // Convert indexValues to a Go value with a meaning derived from the + // destination type. + pValue := p.Value + if iv, ok := pValue.(indexValue); ok { + meaning := pb.Property_NO_MEANING + switch v.Type() { + case typeOfBlobKey: + meaning = pb.Property_BLOBKEY + case typeOfByteSlice: + meaning = pb.Property_BLOB + case typeOfByteString: + meaning = pb.Property_BYTESTRING + case typeOfGeoPoint: + meaning = pb.Property_GEORSS_POINT + case typeOfTime: + meaning = pb.Property_GD_WHEN + } + var err error + pValue, err = propValue(iv.value, meaning) + if err != nil { + return err.Error() + } + } + + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x, ok := pValue.(int64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowInt(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetInt(x) + case reflect.Bool: + x, ok := pValue.(bool) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetBool(x) + case reflect.String: + switch x := pValue.(type) { + case appengine.BlobKey: + v.SetString(string(x)) + case ByteString: + v.SetString(string(x)) + case string: + v.SetString(x) + default: + if pValue != nil { + return typeMismatchReason(p, v) + } + } + case reflect.Float32, reflect.Float64: + x, ok := pValue.(float64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowFloat(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetFloat(x) + case reflect.Ptr: + x, ok := pValue.(*Key) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if _, ok := v.Interface().(*Key); !ok { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case reflect.Struct: + switch v.Type() { + case typeOfTime: + x, ok := pValue.(time.Time) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case typeOfGeoPoint: + x, ok := pValue.(appengine.GeoPoint) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + default: + return typeMismatchReason(p, v) + } + case reflect.Slice: + x, ok := pValue.([]byte) + if !ok { + if y, yok := pValue.(ByteString); yok { + x, ok = []byte(y), true + } + } + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.Type().Elem().Kind() != reflect.Uint8 { + return typeMismatchReason(p, v) + } + v.SetBytes(x) + default: + return typeMismatchReason(p, v) + } + if slice.IsValid() { + slice.Set(reflect.Append(slice, v)) + } + return "" +} + +// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. +func loadEntity(dst interface{}, src *pb.EntityProto) (err error) { + props, err := protoToProperties(src) + if err != nil { + return err + } + if e, ok := dst.(PropertyLoadSaver); ok { + return e.Load(props) + } + return LoadStruct(dst, props) +} + +func (s structPLS) Load(props []Property) error { + var fieldName, reason string + var l propertyLoader + for _, p := range props { + if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" { + // We don't return early, as we try to load as many properties as possible. + // It is valid to load an entity into a struct that cannot fully represent it. + // That case returns an error, but the caller is free to ignore it. + fieldName, reason = p.Name, errStr + } + } + if reason != "" { + return &ErrFieldMismatch{ + StructType: s.v.Type(), + FieldName: fieldName, + Reason: reason, + } + } + return nil +} + +func protoToProperties(src *pb.EntityProto) ([]Property, error) { + props, rawProps := src.Property, src.RawProperty + out := make([]Property, 0, len(props)+len(rawProps)) + for { + var ( + x *pb.Property + noIndex bool + ) + if len(props) > 0 { + x, props = props[0], props[1:] + } else if len(rawProps) > 0 { + x, rawProps = rawProps[0], rawProps[1:] + noIndex = true + } else { + break + } + + var value interface{} + if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE { + value = indexValue{x.Value} + } else { + var err error + value, err = propValue(x.Value, x.GetMeaning()) + if err != nil { + return nil, err + } + } + out = append(out, Property{ + Name: x.GetName(), + Value: value, + NoIndex: noIndex, + Multiple: x.GetMultiple(), + }) + } + return out, nil +} + +// propValue returns a Go value that combines the raw PropertyValue with a +// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time. +func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) { + switch { + case v.Int64Value != nil: + if m == pb.Property_GD_WHEN { + return fromUnixMicro(*v.Int64Value), nil + } else { + return *v.Int64Value, nil + } + case v.BooleanValue != nil: + return *v.BooleanValue, nil + case v.StringValue != nil: + if m == pb.Property_BLOB { + return []byte(*v.StringValue), nil + } else if m == pb.Property_BLOBKEY { + return appengine.BlobKey(*v.StringValue), nil + } else if m == pb.Property_BYTESTRING { + return ByteString(*v.StringValue), nil + } else { + return *v.StringValue, nil + } + case v.DoubleValue != nil: + return *v.DoubleValue, nil + case v.Referencevalue != nil: + key, err := referenceValueToKey(v.Referencevalue) + if err != nil { + return nil, err + } + return key, nil + case v.Pointvalue != nil: + // NOTE: Strangely, latitude maps to X, longitude to Y. + return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil + } + return nil, nil +} + +// indexValue is a Property value that is created when entities are loaded from +// an index, such as from a projection query. +// +// Such Property values do not contain all of the metadata required to be +// faithfully represented as a Go value, and are instead represented as an +// opaque indexValue. Load the properties into a concrete struct type (e.g. by +// passing a struct pointer to Iterator.Next) to reconstruct actual Go values +// of type int, string, time.Time, etc. +type indexValue struct { + value *pb.PropertyValue +} diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go new file mode 100644 index 0000000..67995f9 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/metadata.go @@ -0,0 +1,78 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import "golang.org/x/net/context" + +// Datastore kinds for the metadata entities. +const ( + namespaceKind = "__namespace__" + kindKind = "__kind__" + propertyKind = "__property__" +) + +// Namespaces returns all the datastore namespaces. +func Namespaces(ctx context.Context) ([]string, error) { + // TODO(djd): Support range queries. + q := NewQuery(namespaceKind).KeysOnly() + keys, err := q.GetAll(ctx, nil) + if err != nil { + return nil, err + } + // The empty namespace key uses a numeric ID (==1), but luckily + // the string ID defaults to "" for numeric IDs anyway. + return keyNames(keys), nil +} + +// Kinds returns the names of all the kinds in the current namespace. +func Kinds(ctx context.Context) ([]string, error) { + // TODO(djd): Support range queries. + q := NewQuery(kindKind).KeysOnly() + keys, err := q.GetAll(ctx, nil) + if err != nil { + return nil, err + } + return keyNames(keys), nil +} + +// keyNames returns a slice of the provided keys' names (string IDs). +func keyNames(keys []*Key) []string { + n := make([]string, 0, len(keys)) + for _, k := range keys { + n = append(n, k.StringID()) + } + return n +} + +// KindProperties returns all the indexed properties for the given kind. +// The properties are returned as a map of property names to a slice of the +// representation types. The representation types for the supported Go property +// types are: +// "INT64": signed integers and time.Time +// "DOUBLE": float32 and float64 +// "BOOLEAN": bool +// "STRING": string, []byte and ByteString +// "POINT": appengine.GeoPoint +// "REFERENCE": *Key +// "USER": (not used in the Go runtime) +func KindProperties(ctx context.Context, kind string) (map[string][]string, error) { + // TODO(djd): Support range queries. + kindKey := NewKey(ctx, kindKind, kind, 0, nil) + q := NewQuery(propertyKind).Ancestor(kindKey) + + propMap := map[string][]string{} + props := []struct { + Repr []string `datastore:property_representation` + }{} + + keys, err := q.GetAll(ctx, &props) + if err != nil { + return nil, err + } + for i, p := range props { + propMap[keys[i].StringID()] = p.Repr + } + return propMap, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go new file mode 100644 index 0000000..1f50ac0 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/prop.go @@ -0,0 +1,296 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "sync" + "unicode" +) + +// Entities with more than this many indexed properties will not be saved. +const maxIndexedProperties = 20000 + +// []byte fields more than 1 megabyte long will not be loaded or saved. +const maxBlobLen = 1 << 20 + +// Property is a name/value pair plus some metadata. A datastore entity's +// contents are loaded and saved as a sequence of Properties. An entity can +// have multiple Properties with the same name, provided that p.Multiple is +// true on all of that entity's Properties with that name. +type Property struct { + // Name is the property name. + Name string + // Value is the property value. The valid types are: + // - int64 + // - bool + // - string + // - float64 + // - ByteString + // - *Key + // - time.Time + // - appengine.BlobKey + // - appengine.GeoPoint + // - []byte (up to 1 megabyte in length) + // This set is smaller than the set of valid struct field types that the + // datastore can load and save. A Property Value cannot be a slice (apart + // from []byte); use multiple Properties instead. Also, a Value's type + // must be explicitly on the list above; it is not sufficient for the + // underlying type to be on that list. For example, a Value of "type + // myInt64 int64" is invalid. Smaller-width integers and floats are also + // invalid. Again, this is more restrictive than the set of valid struct + // field types. + // + // A Value will have an opaque type when loading entities from an index, + // such as via a projection query. Load entities into a struct instead + // of a PropertyLoadSaver when using a projection query. + // + // A Value may also be the nil interface value; this is equivalent to + // Python's None but not directly representable by a Go struct. Loading + // a nil-valued property into a struct will set that field to the zero + // value. + Value interface{} + // NoIndex is whether the datastore cannot index this property. + NoIndex bool + // Multiple is whether the entity can have multiple properties with + // the same name. Even if a particular instance only has one property with + // a certain name, Multiple should be true if a struct would best represent + // it as a field of type []T instead of type T. + Multiple bool +} + +// ByteString is a short byte slice (up to 1500 bytes) that can be indexed. +type ByteString []byte + +// PropertyLoadSaver can be converted from and to a slice of Properties. +type PropertyLoadSaver interface { + Load([]Property) error + Save() ([]Property, error) +} + +// PropertyList converts a []Property to implement PropertyLoadSaver. +type PropertyList []Property + +var ( + typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() + typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) +) + +// Load loads all of the provided properties into l. +// It does not first reset *l to an empty slice. +func (l *PropertyList) Load(p []Property) error { + *l = append(*l, p...) + return nil +} + +// Save saves all of l's properties as a slice or Properties. +func (l *PropertyList) Save() ([]Property, error) { + return *l, nil +} + +// validPropertyName returns whether name consists of one or more valid Go +// identifiers joined by ".". +func validPropertyName(name string) bool { + if name == "" { + return false + } + for _, s := range strings.Split(name, ".") { + if s == "" { + return false + } + first := true + for _, c := range s { + if first { + first = false + if c != '_' && !unicode.IsLetter(c) { + return false + } + } else { + if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + } + return true +} + +// structTag is the parsed `datastore:"name,options"` tag of a struct field. +// If a field has no tag, or the tag has an empty name, then the structTag's +// name is just the field name. A "-" name means that the datastore ignores +// that field. +type structTag struct { + name string + noIndex bool +} + +// structCodec describes how to convert a struct to and from a sequence of +// properties. +type structCodec struct { + // byIndex gives the structTag for the i'th field. + byIndex []structTag + // byName gives the field codec for the structTag with the given name. + byName map[string]fieldCodec + // hasSlice is whether a struct or any of its nested or embedded structs + // has a slice-typed field (other than []byte). + hasSlice bool + // complete is whether the structCodec is complete. An incomplete + // structCodec may be encountered when walking a recursive struct. + complete bool +} + +// fieldCodec is a struct field's index and, if that struct field's type is +// itself a struct, that substruct's structCodec. +type fieldCodec struct { + index int + substructCodec *structCodec +} + +// structCodecs collects the structCodecs that have already been calculated. +var ( + structCodecsMutex sync.Mutex + structCodecs = make(map[reflect.Type]*structCodec) +) + +// getStructCodec returns the structCodec for the given struct type. +func getStructCodec(t reflect.Type) (*structCodec, error) { + structCodecsMutex.Lock() + defer structCodecsMutex.Unlock() + return getStructCodecLocked(t) +} + +// getStructCodecLocked implements getStructCodec. The structCodecsMutex must +// be held when calling this function. +func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { + c, ok := structCodecs[t] + if ok { + return c, nil + } + c = &structCodec{ + byIndex: make([]structTag, t.NumField()), + byName: make(map[string]fieldCodec), + } + + // Add c to the structCodecs map before we are sure it is good. If t is + // a recursive type, it needs to find the incomplete entry for itself in + // the map. + structCodecs[t] = c + defer func() { + if retErr != nil { + delete(structCodecs, t) + } + }() + + for i := range c.byIndex { + f := t.Field(i) + tags := strings.Split(f.Tag.Get("datastore"), ",") + name := tags[0] + opts := make(map[string]bool) + for _, t := range tags[1:] { + opts[t] = true + } + if name == "" { + if !f.Anonymous { + name = f.Name + } + } else if name == "-" { + c.byIndex[i] = structTag{name: name} + continue + } else if !validPropertyName(name) { + return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) + } + + substructType, fIsSlice := reflect.Type(nil), false + switch f.Type.Kind() { + case reflect.Struct: + substructType = f.Type + case reflect.Slice: + if f.Type.Elem().Kind() == reflect.Struct { + substructType = f.Type.Elem() + } + fIsSlice = f.Type != typeOfByteSlice + c.hasSlice = c.hasSlice || fIsSlice + } + + if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint { + if name != "" { + name = name + "." + } + sub, err := getStructCodecLocked(substructType) + if err != nil { + return nil, err + } + if !sub.complete { + return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) + } + if fIsSlice && sub.hasSlice { + return nil, fmt.Errorf( + "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) + } + c.hasSlice = c.hasSlice || sub.hasSlice + for relName := range sub.byName { + absName := name + relName + if _, ok := c.byName[absName]; ok { + return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName) + } + c.byName[absName] = fieldCodec{index: i, substructCodec: sub} + } + } else { + if _, ok := c.byName[name]; ok { + return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) + } + c.byName[name] = fieldCodec{index: i} + } + + c.byIndex[i] = structTag{ + name: name, + noIndex: opts["noindex"], + } + } + c.complete = true + return c, nil +} + +// structPLS adapts a struct to be a PropertyLoadSaver. +type structPLS struct { + v reflect.Value + codec *structCodec +} + +// newStructPLS returns a PropertyLoadSaver for the struct pointer p. +func newStructPLS(p interface{}) (PropertyLoadSaver, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidEntityType + } + v = v.Elem() + codec, err := getStructCodec(v.Type()) + if err != nil { + return nil, err + } + return structPLS{v, codec}, nil +} + +// LoadStruct loads the properties from p to dst. +// dst must be a struct pointer. +func LoadStruct(dst interface{}, p []Property) error { + x, err := newStructPLS(dst) + if err != nil { + return err + } + return x.Load(p) +} + +// SaveStruct returns the properties from src as a slice of Properties. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Property, error) { + x, err := newStructPLS(src) + if err != nil { + return nil, err + } + return x.Save() +} diff --git a/vendor/google.golang.org/appengine/datastore/prop_test.go b/vendor/google.golang.org/appengine/datastore/prop_test.go new file mode 100644 index 0000000..6889521 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/prop_test.go @@ -0,0 +1,604 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "reflect" + "testing" + "time" + + "google.golang.org/appengine" +) + +func TestValidPropertyName(t *testing.T) { + testCases := []struct { + name string + want bool + }{ + // Invalid names. + {"", false}, + {"'", false}, + {".", false}, + {"..", false}, + {".foo", false}, + {"0", false}, + {"00", false}, + {"X.X.4.X.X", false}, + {"\n", false}, + {"\x00", false}, + {"abc\xffz", false}, + {"foo.", false}, + {"foo..", false}, + {"foo..bar", false}, + {"☃", false}, + {`"`, false}, + // Valid names. + {"AB", true}, + {"Abc", true}, + {"X.X.X.X.X", true}, + {"_", true}, + {"_0", true}, + {"a", true}, + {"a_B", true}, + {"f00", true}, + {"f0o", true}, + {"fo0", true}, + {"foo", true}, + {"foo.bar", true}, + {"foo.bar.baz", true}, + {"世界", true}, + } + for _, tc := range testCases { + got := validPropertyName(tc.name) + if got != tc.want { + t.Errorf("%q: got %v, want %v", tc.name, got, tc.want) + } + } +} + +func TestStructCodec(t *testing.T) { + type oStruct struct { + O int + } + type pStruct struct { + P int + Q int + } + type rStruct struct { + R int + S pStruct + T oStruct + oStruct + } + type uStruct struct { + U int + v int + } + type vStruct struct { + V string `datastore:",noindex"` + } + oStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "O"}, + }, + byName: map[string]fieldCodec{ + "O": {index: 0}, + }, + complete: true, + } + pStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "P"}, + {name: "Q"}, + }, + byName: map[string]fieldCodec{ + "P": {index: 0}, + "Q": {index: 1}, + }, + complete: true, + } + rStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "R"}, + {name: "S."}, + {name: "T."}, + {name: ""}, + }, + byName: map[string]fieldCodec{ + "R": {index: 0}, + "S.P": {index: 1, substructCodec: pStructCodec}, + "S.Q": {index: 1, substructCodec: pStructCodec}, + "T.O": {index: 2, substructCodec: oStructCodec}, + "O": {index: 3, substructCodec: oStructCodec}, + }, + complete: true, + } + uStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "U"}, + {name: "v"}, + }, + byName: map[string]fieldCodec{ + "U": {index: 0}, + "v": {index: 1}, + }, + complete: true, + } + vStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "V", noIndex: true}, + }, + byName: map[string]fieldCodec{ + "V": {index: 0}, + }, + complete: true, + } + + testCases := []struct { + desc string + structValue interface{} + want *structCodec + }{ + { + "oStruct", + oStruct{}, + oStructCodec, + }, + { + "pStruct", + pStruct{}, + pStructCodec, + }, + { + "rStruct", + rStruct{}, + rStructCodec, + }, + { + "uStruct", + uStruct{}, + uStructCodec, + }, + { + "non-basic fields", + struct { + B appengine.BlobKey + K *Key + T time.Time + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "B"}, + {name: "K"}, + {name: "T"}, + }, + byName: map[string]fieldCodec{ + "B": {index: 0}, + "K": {index: 1}, + "T": {index: 2}, + }, + complete: true, + }, + }, + { + "struct tags with ignored embed", + struct { + A int `datastore:"a,noindex"` + B int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + oStruct `datastore:"-"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "a", noIndex: true}, + {name: "b", noIndex: false}, + {name: "C", noIndex: true}, + {name: "D", noIndex: false}, + {name: "E", noIndex: false}, + {name: "-", noIndex: false}, + {name: "J", noIndex: true}, + {name: "-", noIndex: false}, + }, + byName: map[string]fieldCodec{ + "a": {index: 0}, + "b": {index: 1}, + "C": {index: 2}, + "D": {index: 3}, + "E": {index: 4}, + "J": {index: 6}, + }, + complete: true, + }, + }, + { + "unexported fields", + struct { + A int + b int + C int `datastore:"x"` + d int `datastore:"Y"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A"}, + {name: "b"}, + {name: "x"}, + {name: "Y"}, + }, + byName: map[string]fieldCodec{ + "A": {index: 0}, + "b": {index: 1}, + "x": {index: 2}, + "Y": {index: 3}, + }, + complete: true, + }, + }, + { + "nested and embedded structs", + struct { + A int + B int + CC oStruct + DDD rStruct + oStruct + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A"}, + {name: "B"}, + {name: "CC."}, + {name: "DDD."}, + {name: ""}, + }, + byName: map[string]fieldCodec{ + "A": {index: 0}, + "B": {index: 1}, + "CC.O": {index: 2, substructCodec: oStructCodec}, + "DDD.R": {index: 3, substructCodec: rStructCodec}, + "DDD.S.P": {index: 3, substructCodec: rStructCodec}, + "DDD.S.Q": {index: 3, substructCodec: rStructCodec}, + "DDD.T.O": {index: 3, substructCodec: rStructCodec}, + "DDD.O": {index: 3, substructCodec: rStructCodec}, + "O": {index: 4, substructCodec: oStructCodec}, + }, + complete: true, + }, + }, + { + "struct tags with nested and embedded structs", + struct { + A int `datastore:"-"` + B int `datastore:"w"` + C oStruct `datastore:"xx"` + D rStruct `datastore:"y"` + oStruct `datastore:"z"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "-"}, + {name: "w"}, + {name: "xx."}, + {name: "y."}, + {name: "z."}, + }, + byName: map[string]fieldCodec{ + "w": {index: 1}, + "xx.O": {index: 2, substructCodec: oStructCodec}, + "y.R": {index: 3, substructCodec: rStructCodec}, + "y.S.P": {index: 3, substructCodec: rStructCodec}, + "y.S.Q": {index: 3, substructCodec: rStructCodec}, + "y.T.O": {index: 3, substructCodec: rStructCodec}, + "y.O": {index: 3, substructCodec: rStructCodec}, + "z.O": {index: 4, substructCodec: oStructCodec}, + }, + complete: true, + }, + }, + { + "unexported nested and embedded structs", + struct { + a int + B int + c uStruct + D uStruct + uStruct + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "a"}, + {name: "B"}, + {name: "c."}, + {name: "D."}, + {name: ""}, + }, + byName: map[string]fieldCodec{ + "a": {index: 0}, + "B": {index: 1}, + "c.U": {index: 2, substructCodec: uStructCodec}, + "c.v": {index: 2, substructCodec: uStructCodec}, + "D.U": {index: 3, substructCodec: uStructCodec}, + "D.v": {index: 3, substructCodec: uStructCodec}, + "U": {index: 4, substructCodec: uStructCodec}, + "v": {index: 4, substructCodec: uStructCodec}, + }, + complete: true, + }, + }, + { + "noindex nested struct", + struct { + A oStruct `datastore:",noindex"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A.", noIndex: true}, + }, + byName: map[string]fieldCodec{ + "A.O": {index: 0, substructCodec: oStructCodec}, + }, + complete: true, + }, + }, + { + "noindex slice", + struct { + A []string `datastore:",noindex"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A", noIndex: true}, + }, + byName: map[string]fieldCodec{ + "A": {index: 0}, + }, + hasSlice: true, + complete: true, + }, + }, + { + "noindex embedded struct slice", + struct { + // vStruct has a single field, V, also with noindex. + A []vStruct `datastore:",noindex"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A.", noIndex: true}, + }, + byName: map[string]fieldCodec{ + "A.V": {index: 0, substructCodec: vStructCodec}, + }, + hasSlice: true, + complete: true, + }, + }, + } + + for _, tc := range testCases { + got, err := getStructCodec(reflect.TypeOf(tc.structValue)) + if err != nil { + t.Errorf("%s: getStructCodec: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s\ngot %+v\nwant %+v\n", tc.desc, got, tc.want) + continue + } + } +} + +func TestRepeatedPropertyName(t *testing.T) { + good := []interface{}{ + struct { + A int `datastore:"-"` + }{}, + struct { + A int `datastore:"b"` + B int + }{}, + struct { + A int + B int `datastore:"B"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"-"` + }{}, + struct { + A int `datastore:"-"` + B int `datastore:"A"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"A"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"C"` + C int `datastore:"A"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"C"` + C int `datastore:"D"` + }{}, + } + bad := []interface{}{ + struct { + A int `datastore:"B"` + B int + }{}, + struct { + A int + B int `datastore:"A"` + }{}, + struct { + A int `datastore:"C"` + B int `datastore:"C"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"C"` + C int `datastore:"B"` + }{}, + } + testGetStructCodec(t, good, bad) +} + +func TestFlatteningNestedStructs(t *testing.T) { + type deepGood struct { + A struct { + B []struct { + C struct { + D int + } + } + } + } + type deepBad struct { + A struct { + B []struct { + C struct { + D []int + } + } + } + } + type iSay struct { + Tomato int + } + type youSay struct { + Tomato int + } + type tweedledee struct { + Dee int `datastore:"D"` + } + type tweedledum struct { + Dum int `datastore:"D"` + } + + good := []interface{}{ + struct { + X []struct { + Y string + } + }{}, + struct { + X []struct { + Y []byte + } + }{}, + struct { + P []int + X struct { + Y []int + } + }{}, + struct { + X struct { + Y []int + } + Q []int + }{}, + struct { + P []int + X struct { + Y []int + } + Q []int + }{}, + struct { + deepGood + }{}, + struct { + DG deepGood + }{}, + struct { + Foo struct { + Z int `datastore:"X"` + } `datastore:"A"` + Bar struct { + Z int `datastore:"Y"` + } `datastore:"A"` + }{}, + } + bad := []interface{}{ + struct { + X []struct { + Y []string + } + }{}, + struct { + X []struct { + Y []int + } + }{}, + struct { + deepBad + }{}, + struct { + DB deepBad + }{}, + struct { + iSay + youSay + }{}, + struct { + tweedledee + tweedledum + }{}, + struct { + Foo struct { + Z int + } `datastore:"A"` + Bar struct { + Z int + } `datastore:"A"` + }{}, + } + testGetStructCodec(t, good, bad) +} + +func testGetStructCodec(t *testing.T, good []interface{}, bad []interface{}) { + for _, x := range good { + if _, err := getStructCodec(reflect.TypeOf(x)); err != nil { + t.Errorf("type %T: got non-nil error (%s), want nil", x, err) + } + } + for _, x := range bad { + if _, err := getStructCodec(reflect.TypeOf(x)); err == nil { + t.Errorf("type %T: got nil error, want non-nil", x) + } + } +} + +func TestNilKeyIsStored(t *testing.T) { + x := struct { + K *Key + I int + }{} + p := PropertyList{} + // Save x as properties. + p1, _ := SaveStruct(&x) + p.Load(p1) + // Set x's fields to non-zero. + x.K = &Key{} + x.I = 2 + // Load x from properties. + p2, _ := p.Save() + LoadStruct(&x, p2) + // Check that x's fields were set to zero. + if x.K != nil { + t.Errorf("K field was not zero") + } + if x.I != 0 { + t.Errorf("I field was not zero") + } +} diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go new file mode 100644 index 0000000..3847b0f --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/query.go @@ -0,0 +1,724 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/datastore" +) + +type operator int + +const ( + lessThan operator = iota + lessEq + equal + greaterEq + greaterThan +) + +var operatorToProto = map[operator]*pb.Query_Filter_Operator{ + lessThan: pb.Query_Filter_LESS_THAN.Enum(), + lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(), + equal: pb.Query_Filter_EQUAL.Enum(), + greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(), + greaterThan: pb.Query_Filter_GREATER_THAN.Enum(), +} + +// filter is a conditional filter on query results. +type filter struct { + FieldName string + Op operator + Value interface{} +} + +type sortDirection int + +const ( + ascending sortDirection = iota + descending +) + +var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{ + ascending: pb.Query_Order_ASCENDING.Enum(), + descending: pb.Query_Order_DESCENDING.Enum(), +} + +// order is a sort order on query results. +type order struct { + FieldName string + Direction sortDirection +} + +// NewQuery creates a new Query for a specific entity kind. +// +// An empty kind means to return all entities, including entities created and +// managed by other App Engine features, and is called a kindless query. +// Kindless queries cannot include filters or sort orders on property values. +func NewQuery(kind string) *Query { + return &Query{ + kind: kind, + limit: -1, + } +} + +// Query represents a datastore query. +type Query struct { + kind string + ancestor *Key + filter []filter + order []order + projection []string + + distinct bool + keysOnly bool + eventual bool + limit int32 + offset int32 + start *pb.CompiledCursor + end *pb.CompiledCursor + + err error +} + +func (q *Query) clone() *Query { + x := *q + // Copy the contents of the slice-typed fields to a new backing store. + if len(q.filter) > 0 { + x.filter = make([]filter, len(q.filter)) + copy(x.filter, q.filter) + } + if len(q.order) > 0 { + x.order = make([]order, len(q.order)) + copy(x.order, q.order) + } + return &x +} + +// Ancestor returns a derivative query with an ancestor filter. +// The ancestor should not be nil. +func (q *Query) Ancestor(ancestor *Key) *Query { + q = q.clone() + if ancestor == nil { + q.err = errors.New("datastore: nil query ancestor") + return q + } + q.ancestor = ancestor + return q +} + +// EventualConsistency returns a derivative query that returns eventually +// consistent results. +// It only has an effect on ancestor queries. +func (q *Query) EventualConsistency() *Query { + q = q.clone() + q.eventual = true + return q +} + +// Filter returns a derivative query with a field-based filter. +// The filterStr argument must be a field name followed by optional space, +// followed by an operator, one of ">", "<", ">=", "<=", or "=". +// Fields are compared against the provided value using the operator. +// Multiple filters are AND'ed together. +func (q *Query) Filter(filterStr string, value interface{}) *Query { + q = q.clone() + filterStr = strings.TrimSpace(filterStr) + if len(filterStr) < 1 { + q.err = errors.New("datastore: invalid filter: " + filterStr) + return q + } + f := filter{ + FieldName: strings.TrimRight(filterStr, " ><=!"), + Value: value, + } + switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { + case "<=": + f.Op = lessEq + case ">=": + f.Op = greaterEq + case "<": + f.Op = lessThan + case ">": + f.Op = greaterThan + case "=": + f.Op = equal + default: + q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) + return q + } + q.filter = append(q.filter, f) + return q +} + +// Order returns a derivative query with a field-based sort order. Orders are +// applied in the order they are added. The default order is ascending; to sort +// in descending order prefix the fieldName with a minus sign (-). +func (q *Query) Order(fieldName string) *Query { + q = q.clone() + fieldName = strings.TrimSpace(fieldName) + o := order{ + Direction: ascending, + FieldName: fieldName, + } + if strings.HasPrefix(fieldName, "-") { + o.Direction = descending + o.FieldName = strings.TrimSpace(fieldName[1:]) + } else if strings.HasPrefix(fieldName, "+") { + q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) + return q + } + if len(o.FieldName) == 0 { + q.err = errors.New("datastore: empty order") + return q + } + q.order = append(q.order, o) + return q +} + +// Project returns a derivative query that yields only the given fields. It +// cannot be used with KeysOnly. +func (q *Query) Project(fieldNames ...string) *Query { + q = q.clone() + q.projection = append([]string(nil), fieldNames...) + return q +} + +// Distinct returns a derivative query that yields de-duplicated entities with +// respect to the set of projected fields. It is only used for projection +// queries. +func (q *Query) Distinct() *Query { + q = q.clone() + q.distinct = true + return q +} + +// KeysOnly returns a derivative query that yields only keys, not keys and +// entities. It cannot be used with projection queries. +func (q *Query) KeysOnly() *Query { + q = q.clone() + q.keysOnly = true + return q +} + +// Limit returns a derivative query that has a limit on the number of results +// returned. A negative value means unlimited. +func (q *Query) Limit(limit int) *Query { + q = q.clone() + if limit < math.MinInt32 || limit > math.MaxInt32 { + q.err = errors.New("datastore: query limit overflow") + return q + } + q.limit = int32(limit) + return q +} + +// Offset returns a derivative query that has an offset of how many keys to +// skip over before returning results. A negative value is invalid. +func (q *Query) Offset(offset int) *Query { + q = q.clone() + if offset < 0 { + q.err = errors.New("datastore: negative query offset") + return q + } + if offset > math.MaxInt32 { + q.err = errors.New("datastore: query offset overflow") + return q + } + q.offset = int32(offset) + return q +} + +// Start returns a derivative query with the given start point. +func (q *Query) Start(c Cursor) *Query { + q = q.clone() + if c.cc == nil { + q.err = errors.New("datastore: invalid cursor") + return q + } + q.start = c.cc + return q +} + +// End returns a derivative query with the given end point. +func (q *Query) End(c Cursor) *Query { + q = q.clone() + if c.cc == nil { + q.err = errors.New("datastore: invalid cursor") + return q + } + q.end = c.cc + return q +} + +// toProto converts the query to a protocol buffer. +func (q *Query) toProto(dst *pb.Query, appID string) error { + if len(q.projection) != 0 && q.keysOnly { + return errors.New("datastore: query cannot both project and be keys-only") + } + dst.Reset() + dst.App = proto.String(appID) + if q.kind != "" { + dst.Kind = proto.String(q.kind) + } + if q.ancestor != nil { + dst.Ancestor = keyToProto(appID, q.ancestor) + if q.eventual { + dst.Strong = proto.Bool(false) + } + } + if q.projection != nil { + dst.PropertyName = q.projection + if q.distinct { + dst.GroupByPropertyName = q.projection + } + } + if q.keysOnly { + dst.KeysOnly = proto.Bool(true) + dst.RequirePerfectPlan = proto.Bool(true) + } + for _, qf := range q.filter { + if qf.FieldName == "" { + return errors.New("datastore: empty query filter field name") + } + p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false) + if errStr != "" { + return errors.New("datastore: bad query filter value type: " + errStr) + } + xf := &pb.Query_Filter{ + Op: operatorToProto[qf.Op], + Property: []*pb.Property{p}, + } + if xf.Op == nil { + return errors.New("datastore: unknown query filter operator") + } + dst.Filter = append(dst.Filter, xf) + } + for _, qo := range q.order { + if qo.FieldName == "" { + return errors.New("datastore: empty query order field name") + } + xo := &pb.Query_Order{ + Property: proto.String(qo.FieldName), + Direction: sortDirectionToProto[qo.Direction], + } + if xo.Direction == nil { + return errors.New("datastore: unknown query order direction") + } + dst.Order = append(dst.Order, xo) + } + if q.limit >= 0 { + dst.Limit = proto.Int32(q.limit) + } + if q.offset != 0 { + dst.Offset = proto.Int32(q.offset) + } + dst.CompiledCursor = q.start + dst.EndCompiledCursor = q.end + dst.Compile = proto.Bool(true) + return nil +} + +// Count returns the number of results for the query. +// +// The running time and number of API calls made by Count scale linearly with +// the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise Count will +// continue until it finishes counting or the provided context expires. +func (q *Query) Count(c context.Context) (int, error) { + // Check that the query is well-formed. + if q.err != nil { + return 0, q.err + } + + // Run a copy of the query, with keysOnly true (if we're not a projection, + // since the two are incompatible), and an adjusted offset. We also set the + // limit to zero, as we don't want any actual entity data, just the number + // of skipped results. + newQ := q.clone() + newQ.keysOnly = len(newQ.projection) == 0 + newQ.limit = 0 + if q.limit < 0 { + // If the original query was unlimited, set the new query's offset to maximum. + newQ.offset = math.MaxInt32 + } else { + newQ.offset = q.offset + q.limit + if newQ.offset < 0 { + // Do the best we can, in the presence of overflow. + newQ.offset = math.MaxInt32 + } + } + req := &pb.Query{} + if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil { + return 0, err + } + res := &pb.QueryResult{} + if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil { + return 0, err + } + + // n is the count we will return. For example, suppose that our original + // query had an offset of 4 and a limit of 2008: the count will be 2008, + // provided that there are at least 2012 matching entities. However, the + // RPCs will only skip 1000 results at a time. The RPC sequence is: + // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset + // response has (skippedResults, moreResults) = (1000, true) + // n += 1000 // n == 1000 + // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n + // response has (skippedResults, moreResults) = (1000, true) + // n += 1000 // n == 2000 + // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n + // response has (skippedResults, moreResults) = (12, false) + // n += 12 // n == 2012 + // // exit the loop + // n -= 4 // n == 2008 + var n int32 + for { + // The QueryResult should have no actual entity data, just skipped results. + if len(res.Result) != 0 { + return 0, errors.New("datastore: internal error: Count request returned too much data") + } + n += res.GetSkippedResults() + if !res.GetMoreResults() { + break + } + if err := callNext(c, res, newQ.offset-n, 0); err != nil { + return 0, err + } + } + n -= q.offset + if n < 0 { + // If the offset was greater than the number of matching entities, + // return 0 instead of negative. + n = 0 + } + return int(n), nil +} + +// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that +// returned by a query with more results. +func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error { + if res.Cursor == nil { + return errors.New("datastore: internal error: server did not return a cursor") + } + req := &pb.NextRequest{ + Cursor: res.Cursor, + } + if limit >= 0 { + req.Count = proto.Int32(limit) + } + if offset != 0 { + req.Offset = proto.Int32(offset) + } + if res.CompiledCursor != nil { + req.Compile = proto.Bool(true) + } + res.Reset() + return internal.Call(c, "datastore_v3", "Next", req, res) +} + +// GetAll runs the query in the given context and returns all keys that match +// that query, as well as appending the values to dst. +// +// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- +// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. +// +// As a special case, *PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when *[]PropertyList was intended. +// +// The keys returned by GetAll will be in a 1-1 correspondence with the entities +// added to dst. +// +// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +// +// The running time and number of API calls made by GetAll scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise GetAll will +// continue until it finishes collecting results or the provided context +// expires. +func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) { + var ( + dv reflect.Value + mat multiArgType + elemType reflect.Type + errFieldMismatch error + ) + if !q.keysOnly { + dv = reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return nil, ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType = checkMultiArg(dv) + if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { + return nil, ErrInvalidEntityType + } + } + + var keys []*Key + for t := q.Run(c); ; { + k, e, err := t.next() + if err == Done { + break + } + if err != nil { + return keys, err + } + if !q.keysOnly { + ev := reflect.New(elemType) + if elemType.Kind() == reflect.Map { + // This is a special case. The zero values of a map type are + // not immediately useful; they have to be make'd. + // + // Funcs and channels are similar, in that a zero value is not useful, + // but even a freshly make'd channel isn't useful: there's no fixed + // channel buffer size that is always going to be large enough, and + // there's no goroutine to drain the other end. Theoretically, these + // types could be supported, for example by sniffing for a constructor + // method or requiring prior registration, but for now it's not a + // frequent enough concern to be worth it. Programmers can work around + // it by explicitly using Iterator.Next instead of the Query.GetAll + // convenience method. + x := reflect.MakeMap(elemType) + ev.Elem().Set(x) + } + if err = loadEntity(ev.Interface(), e); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return keys, err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + } + keys = append(keys, k) + } + return keys, errFieldMismatch +} + +// Run runs the query in the given context. +func (q *Query) Run(c context.Context) *Iterator { + if q.err != nil { + return &Iterator{err: q.err} + } + t := &Iterator{ + c: c, + limit: q.limit, + q: q, + prevCC: q.start, + } + var req pb.Query + if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil { + t.err = err + return t + } + if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil { + t.err = err + return t + } + offset := q.offset - t.res.GetSkippedResults() + for offset > 0 && t.res.GetMoreResults() { + t.prevCC = t.res.CompiledCursor + if err := callNext(t.c, &t.res, offset, t.limit); err != nil { + t.err = err + break + } + skip := t.res.GetSkippedResults() + if skip < 0 { + t.err = errors.New("datastore: internal error: negative number of skipped_results") + break + } + offset -= skip + } + if offset < 0 { + t.err = errors.New("datastore: internal error: query offset was overshot") + } + return t +} + +// Iterator is the result of running a query. +type Iterator struct { + c context.Context + err error + // res is the result of the most recent RunQuery or Next API call. + res pb.QueryResult + // i is how many elements of res.Result we have iterated over. + i int + // limit is the limit on the number of results this iterator should return. + // A negative value means unlimited. + limit int32 + // q is the original query which yielded this iterator. + q *Query + // prevCC is the compiled cursor that marks the end of the previous batch + // of results. + prevCC *pb.CompiledCursor +} + +// Done is returned when a query iteration has completed. +var Done = errors.New("datastore: query has no more results") + +// Next returns the key of the next result. When there are no more results, +// Done is returned as the error. +// +// If the query is not keys only and dst is non-nil, it also loads the entity +// stored for that key into the struct pointer or PropertyLoadSaver dst, with +// the same semantics and possible errors as for the Get function. +func (t *Iterator) Next(dst interface{}) (*Key, error) { + k, e, err := t.next() + if err != nil { + return nil, err + } + if dst != nil && !t.q.keysOnly { + err = loadEntity(dst, e) + } + return k, err +} + +func (t *Iterator) next() (*Key, *pb.EntityProto, error) { + if t.err != nil { + return nil, nil, t.err + } + + // Issue datastore_v3/Next RPCs as necessary. + for t.i == len(t.res.Result) { + if !t.res.GetMoreResults() { + t.err = Done + return nil, nil, t.err + } + t.prevCC = t.res.CompiledCursor + if err := callNext(t.c, &t.res, 0, t.limit); err != nil { + t.err = err + return nil, nil, t.err + } + if t.res.GetSkippedResults() != 0 { + t.err = errors.New("datastore: internal error: iterator has skipped results") + return nil, nil, t.err + } + t.i = 0 + if t.limit >= 0 { + t.limit -= int32(len(t.res.Result)) + if t.limit < 0 { + t.err = errors.New("datastore: internal error: query returned more results than the limit") + return nil, nil, t.err + } + } + } + + // Extract the key from the t.i'th element of t.res.Result. + e := t.res.Result[t.i] + t.i++ + if e.Key == nil { + return nil, nil, errors.New("datastore: internal error: server did not return a key") + } + k, err := protoToKey(e.Key) + if err != nil || k.Incomplete() { + return nil, nil, errors.New("datastore: internal error: server returned an invalid key") + } + return k, e, nil +} + +// Cursor returns a cursor for the iterator's current location. +func (t *Iterator) Cursor() (Cursor, error) { + if t.err != nil && t.err != Done { + return Cursor{}, t.err + } + // If we are at either end of the current batch of results, + // return the compiled cursor at that end. + skipped := t.res.GetSkippedResults() + if t.i == 0 && skipped == 0 { + if t.prevCC == nil { + // A nil pointer (of type *pb.CompiledCursor) means no constraint: + // passing it as the end cursor of a new query means unlimited results + // (glossing over the integer limit parameter for now). + // A non-nil pointer to an empty pb.CompiledCursor means the start: + // passing it as the end cursor of a new query means 0 results. + // If prevCC was nil, then the original query had no start cursor, but + // Iterator.Cursor should return "the start" instead of unlimited. + return Cursor{&zeroCC}, nil + } + return Cursor{t.prevCC}, nil + } + if t.i == len(t.res.Result) { + return Cursor{t.res.CompiledCursor}, nil + } + // Otherwise, re-run the query offset to this iterator's position, starting from + // the most recent compiled cursor. This is done on a best-effort basis, as it + // is racy; if a concurrent process has added or removed entities, then the + // cursor returned may be inconsistent. + q := t.q.clone() + q.start = t.prevCC + q.offset = skipped + int32(t.i) + q.limit = 0 + q.keysOnly = len(q.projection) == 0 + t1 := q.Run(t.c) + _, _, err := t1.next() + if err != Done { + if err == nil { + err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results") + } + return Cursor{}, err + } + return Cursor{t1.res.CompiledCursor}, nil +} + +var zeroCC pb.CompiledCursor + +// Cursor is an iterator's position. It can be converted to and from an opaque +// string. A cursor can be used from different HTTP requests, but only with a +// query with the same kind, ancestor, filter and order constraints. +type Cursor struct { + cc *pb.CompiledCursor +} + +// String returns a base-64 string representation of a cursor. +func (c Cursor) String() string { + if c.cc == nil { + return "" + } + b, err := proto.Marshal(c.cc) + if err != nil { + // The only way to construct a Cursor with a non-nil cc field is to + // unmarshal from the byte representation. We panic if the unmarshal + // succeeds but the marshaling of the unchanged protobuf value fails. + panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err)) + } + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// Decode decodes a cursor from its base-64 string representation. +func DecodeCursor(s string) (Cursor, error) { + if s == "" { + return Cursor{&zeroCC}, nil + } + if n := len(s) % 4; n != 0 { + s += strings.Repeat("=", 4-n) + } + b, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return Cursor{}, err + } + cc := &pb.CompiledCursor{} + if err := proto.Unmarshal(b, cc); err != nil { + return Cursor{}, err + } + return Cursor{cc}, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/query_test.go b/vendor/google.golang.org/appengine/datastore/query_test.go new file mode 100644 index 0000000..f1b9de8 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/query_test.go @@ -0,0 +1,583 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/datastore" +) + +var ( + path1 = &pb.Path{ + Element: []*pb.Path_Element{ + { + Type: proto.String("Gopher"), + Id: proto.Int64(6), + }, + }, + } + path2 = &pb.Path{ + Element: []*pb.Path_Element{ + { + Type: proto.String("Gopher"), + Id: proto.Int64(6), + }, + { + Type: proto.String("Gopher"), + Id: proto.Int64(8), + }, + }, + } +) + +func fakeRunQuery(in *pb.Query, out *pb.QueryResult) error { + expectedIn := &pb.Query{ + App: proto.String("dev~fake-app"), + Kind: proto.String("Gopher"), + Compile: proto.Bool(true), + } + if !proto.Equal(in, expectedIn) { + return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) + } + *out = pb.QueryResult{ + Result: []*pb.EntityProto{ + { + Key: &pb.Reference{ + App: proto.String("s~test-app"), + Path: path1, + }, + EntityGroup: path1, + Property: []*pb.Property{ + { + Meaning: pb.Property_TEXT.Enum(), + Name: proto.String("Name"), + Value: &pb.PropertyValue{ + StringValue: proto.String("George"), + }, + }, + { + Name: proto.String("Height"), + Value: &pb.PropertyValue{ + Int64Value: proto.Int64(32), + }, + }, + }, + }, + { + Key: &pb.Reference{ + App: proto.String("s~test-app"), + Path: path2, + }, + EntityGroup: path1, // ancestor is George + Property: []*pb.Property{ + { + Meaning: pb.Property_TEXT.Enum(), + Name: proto.String("Name"), + Value: &pb.PropertyValue{ + StringValue: proto.String("Rufus"), + }, + }, + // No height for Rufus. + }, + }, + }, + MoreResults: proto.Bool(false), + } + return nil +} + +type StructThatImplementsPLS struct{} + +func (StructThatImplementsPLS) Load(p []Property) error { return nil } +func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = StructThatImplementsPLS{} + +type StructPtrThatImplementsPLS struct{} + +func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } +func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} + +type PropertyMap map[string]Property + +func (m PropertyMap) Load(props []Property) error { + for _, p := range props { + if p.Multiple { + return errors.New("PropertyMap does not support multiple properties") + } + m[p.Name] = p + } + return nil +} + +func (m PropertyMap) Save() ([]Property, error) { + props := make([]Property, 0, len(m)) + for _, p := range m { + if p.Multiple { + return nil, errors.New("PropertyMap does not support multiple properties") + } + props = append(props, p) + } + return props, nil +} + +var _ PropertyLoadSaver = PropertyMap{} + +type Gopher struct { + Name string + Height int +} + +// typeOfEmptyInterface is the type of interface{}, but we can't use +// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an +// interface{}. +var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() + +func TestCheckMultiArg(t *testing.T) { + testCases := []struct { + v interface{} + mat multiArgType + elemType reflect.Type + }{ + // Invalid cases. + {nil, multiArgTypeInvalid, nil}, + {Gopher{}, multiArgTypeInvalid, nil}, + {&Gopher{}, multiArgTypeInvalid, nil}, + {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. + {PropertyMap{}, multiArgTypeInvalid, nil}, + {[]*PropertyList(nil), multiArgTypeInvalid, nil}, + {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, + {[]**Gopher(nil), multiArgTypeInvalid, nil}, + {[]*interface{}(nil), multiArgTypeInvalid, nil}, + // Valid cases. + { + []PropertyList(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyList{}), + }, + { + []PropertyMap(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyMap{}), + }, + { + []StructThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructThatImplementsPLS{}), + }, + { + []StructPtrThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructPtrThatImplementsPLS{}), + }, + { + []Gopher(nil), + multiArgTypeStruct, + reflect.TypeOf(Gopher{}), + }, + { + []*Gopher(nil), + multiArgTypeStructPtr, + reflect.TypeOf(Gopher{}), + }, + { + []interface{}(nil), + multiArgTypeInterface, + typeOfEmptyInterface, + }, + } + for _, tc := range testCases { + mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) + if mat != tc.mat || elemType != tc.elemType { + t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", + tc.v, mat, elemType, tc.mat, tc.elemType) + } + } +} + +func TestSimpleQuery(t *testing.T) { + struct1 := Gopher{Name: "George", Height: 32} + struct2 := Gopher{Name: "Rufus"} + pList1 := PropertyList{ + { + Name: "Name", + Value: "George", + }, + { + Name: "Height", + Value: int64(32), + }, + } + pList2 := PropertyList{ + { + Name: "Name", + Value: "Rufus", + }, + } + pMap1 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "George", + }, + "Height": Property{ + Name: "Height", + Value: int64(32), + }, + } + pMap2 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "Rufus", + }, + } + + testCases := []struct { + dst interface{} + want interface{} + }{ + // The destination must have type *[]P, *[]S or *[]*S, for some non-interface + // type P such that *P implements PropertyLoadSaver, or for some struct type S. + {new([]Gopher), &[]Gopher{struct1, struct2}}, + {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, + {new([]PropertyList), &[]PropertyList{pList1, pList2}}, + {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, + + // Any other destination type is invalid. + {0, nil}, + {Gopher{}, nil}, + {PropertyList{}, nil}, + {PropertyMap{}, nil}, + {[]int{}, nil}, + {[]Gopher{}, nil}, + {[]PropertyList{}, nil}, + {new(int), nil}, + {new(Gopher), nil}, + {new(PropertyList), nil}, // This is a special case. + {new(PropertyMap), nil}, + {new([]int), nil}, + {new([]map[int]int), nil}, + {new([]map[string]Property), nil}, + {new([]map[string]interface{}), nil}, + {new([]*int), nil}, + {new([]*map[int]int), nil}, + {new([]*map[string]Property), nil}, + {new([]*map[string]interface{}), nil}, + {new([]**Gopher), nil}, + {new([]*PropertyList), nil}, + {new([]*PropertyMap), nil}, + } + for _, tc := range testCases { + nCall := 0 + c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error { + nCall++ + return fakeRunQuery(in, out) + }) + c = internal.WithAppIDOverride(c, "dev~fake-app") + + var ( + expectedErr error + expectedNCall int + ) + if tc.want == nil { + expectedErr = ErrInvalidEntityType + } else { + expectedNCall = 1 + } + keys, err := NewQuery("Gopher").GetAll(c, tc.dst) + if err != expectedErr { + t.Errorf("dst type %T: got error [%v], want [%v]", tc.dst, err, expectedErr) + continue + } + if nCall != expectedNCall { + t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) + continue + } + if err != nil { + continue + } + + key1 := NewKey(c, "Gopher", "", 6, nil) + expectedKeys := []*Key{ + key1, + NewKey(c, "Gopher", "", 8, key1), + } + if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { + t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) + continue + } + for i, key := range keys { + if key.AppID() != "s~test-app" { + t.Errorf(`dst type %T: Key #%d's AppID = %q, want "s~test-app"`, tc.dst, i, key.AppID()) + continue + } + if !keysEqual(key, expectedKeys[i]) { + t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) + continue + } + } + + if !reflect.DeepEqual(tc.dst, tc.want) { + t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want) + continue + } + } +} + +// keysEqual is like (*Key).Equal, but ignores the App ID. +func keysEqual(a, b *Key) bool { + for a != nil && b != nil { + if a.Kind() != b.Kind() || a.StringID() != b.StringID() || a.IntID() != b.IntID() { + return false + } + a, b = a.Parent(), b.Parent() + } + return a == b +} + +func TestQueriesAreImmutable(t *testing.T) { + // Test that deriving q2 from q1 does not modify q1. + q0 := NewQuery("foo") + q1 := NewQuery("foo") + q2 := q1.Offset(2) + if !reflect.DeepEqual(q0, q1) { + t.Errorf("q0 and q1 were not equal") + } + if reflect.DeepEqual(q1, q2) { + t.Errorf("q1 and q2 were equal") + } + + // Test that deriving from q4 twice does not conflict, even though + // q4 has a long list of order clauses. This tests that the arrays + // backed by a query's slice of orders are not shared. + f := func() *Query { + q := NewQuery("bar") + // 47 is an ugly number that is unlikely to be near a re-allocation + // point in repeated append calls. For example, it's not near a power + // of 2 or a multiple of 10. + for i := 0; i < 47; i++ { + q = q.Order(fmt.Sprintf("x%d", i)) + } + return q + } + q3 := f().Order("y") + q4 := f() + q5 := q4.Order("y") + q6 := q4.Order("z") + if !reflect.DeepEqual(q3, q5) { + t.Errorf("q3 and q5 were not equal") + } + if reflect.DeepEqual(q5, q6) { + t.Errorf("q5 and q6 were equal") + } +} + +func TestFilterParser(t *testing.T) { + testCases := []struct { + filterStr string + wantOK bool + wantFieldName string + wantOp operator + }{ + // Supported ops. + {"x<", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {" x < ", true, "x", lessThan}, + {"x <=", true, "x", lessEq}, + {"x =", true, "x", equal}, + {"x >=", true, "x", greaterEq}, + {"x >", true, "x", greaterThan}, + {"in >", true, "in", greaterThan}, + {"in>", true, "in", greaterThan}, + // Valid but (currently) unsupported ops. + {"x!=", false, "", 0}, + {"x !=", false, "", 0}, + {" x != ", false, "", 0}, + {"x IN", false, "", 0}, + {"x in", false, "", 0}, + // Invalid ops. + {"x EQ", false, "", 0}, + {"x lt", false, "", 0}, + {"x <>", false, "", 0}, + {"x >>", false, "", 0}, + {"x ==", false, "", 0}, + {"x =<", false, "", 0}, + {"x =>", false, "", 0}, + {"x !", false, "", 0}, + {"x ", false, "", 0}, + {"x", false, "", 0}, + } + for _, tc := range testCases { + q := NewQuery("foo").Filter(tc.filterStr, 42) + if ok := q.err == nil; ok != tc.wantOK { + t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) + continue + } + if !tc.wantOK { + continue + } + if len(q.filter) != 1 { + t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) + continue + } + got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} + if got != want { + t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) + continue + } + } +} + +func TestQueryToProto(t *testing.T) { + // The context is required to make Keys for the test cases. + var got *pb.Query + NoErr := errors.New("No error") + c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error { + got = in + return NoErr // return a non-nil error so Run doesn't keep going. + }) + c = internal.WithAppIDOverride(c, "dev~fake-app") + + testCases := []struct { + desc string + query *Query + want *pb.Query + err string + }{ + { + desc: "empty", + query: NewQuery(""), + want: &pb.Query{}, + }, + { + desc: "standard query", + query: NewQuery("kind").Order("-I").Filter("I >", 17).Filter("U =", "Dave").Limit(7).Offset(42), + want: &pb.Query{ + Kind: proto.String("kind"), + Filter: []*pb.Query_Filter{ + { + Op: pb.Query_Filter_GREATER_THAN.Enum(), + Property: []*pb.Property{ + { + Name: proto.String("I"), + Value: &pb.PropertyValue{Int64Value: proto.Int64(17)}, + Multiple: proto.Bool(false), + }, + }, + }, + { + Op: pb.Query_Filter_EQUAL.Enum(), + Property: []*pb.Property{ + { + Name: proto.String("U"), + Value: &pb.PropertyValue{StringValue: proto.String("Dave")}, + Multiple: proto.Bool(false), + }, + }, + }, + }, + Order: []*pb.Query_Order{ + { + Property: proto.String("I"), + Direction: pb.Query_Order_DESCENDING.Enum(), + }, + }, + Limit: proto.Int32(7), + Offset: proto.Int32(42), + }, + }, + { + desc: "ancestor", + query: NewQuery("").Ancestor(NewKey(c, "kind", "Mummy", 0, nil)), + want: &pb.Query{ + Ancestor: &pb.Reference{ + App: proto.String("dev~fake-app"), + Path: &pb.Path{ + Element: []*pb.Path_Element{{Type: proto.String("kind"), Name: proto.String("Mummy")}}, + }, + }, + }, + }, + { + desc: "projection", + query: NewQuery("").Project("A", "B"), + want: &pb.Query{ + PropertyName: []string{"A", "B"}, + }, + }, + { + desc: "projection with distinct", + query: NewQuery("").Project("A", "B").Distinct(), + want: &pb.Query{ + PropertyName: []string{"A", "B"}, + GroupByPropertyName: []string{"A", "B"}, + }, + }, + { + desc: "keys only", + query: NewQuery("").KeysOnly(), + want: &pb.Query{ + KeysOnly: proto.Bool(true), + RequirePerfectPlan: proto.Bool(true), + }, + }, + { + desc: "empty filter", + query: NewQuery("kind").Filter("=", 17), + err: "empty query filter field nam", + }, + { + desc: "bad filter type", + query: NewQuery("kind").Filter("M =", map[string]bool{}), + err: "bad query filter value type", + }, + { + desc: "bad filter operator", + query: NewQuery("kind").Filter("I <<=", 17), + err: `invalid operator "<<=" in filter "I <<="`, + }, + { + desc: "empty order", + query: NewQuery("kind").Order(""), + err: "empty order", + }, + { + desc: "bad order direction", + query: NewQuery("kind").Order("+I"), + err: `invalid order: "+I`, + }, + } + + for _, tt := range testCases { + got = nil + if _, err := tt.query.Run(c).Next(nil); err != NoErr { + if tt.err == "" || !strings.Contains(err.Error(), tt.err) { + t.Errorf("%s: error %v, want %q", tt.desc, err, tt.err) + } + continue + } + if tt.err != "" { + t.Errorf("%s: no error, want %q", tt.desc, tt.err) + continue + } + // Fields that are common to all protos. + tt.want.App = proto.String("dev~fake-app") + tt.want.Compile = proto.Bool(true) + if !proto.Equal(got, tt.want) { + t.Errorf("%s:\ngot %v\nwant %v", tt.desc, got, tt.want) + } + } +} diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go new file mode 100644 index 0000000..b5f9592 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/save.go @@ -0,0 +1,300 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "errors" + "fmt" + "math" + "reflect" + "time" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine" + pb "google.golang.org/appengine/internal/datastore" +) + +func toUnixMicro(t time.Time) int64 { + // We cannot use t.UnixNano() / 1e3 because we want to handle times more than + // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot + // be represented in the numerator of a single int64 divide. + return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) +} + +func fromUnixMicro(t int64) time.Time { + return time.Unix(t/1e6, (t%1e6)*1e3).UTC() +} + +var ( + minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) + maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) +) + +// valueToProto converts a named value to a newly allocated Property. +// The returned error string is empty on success. +func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) { + var ( + pv pb.PropertyValue + unsupported bool + ) + switch v.Kind() { + case reflect.Invalid: + // No-op. + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + pv.Int64Value = proto.Int64(v.Int()) + case reflect.Bool: + pv.BooleanValue = proto.Bool(v.Bool()) + case reflect.String: + pv.StringValue = proto.String(v.String()) + case reflect.Float32, reflect.Float64: + pv.DoubleValue = proto.Float64(v.Float()) + case reflect.Ptr: + if k, ok := v.Interface().(*Key); ok { + if k != nil { + pv.Referencevalue = keyToReferenceValue(defaultAppID, k) + } + } else { + unsupported = true + } + case reflect.Struct: + switch t := v.Interface().(type) { + case time.Time: + if t.Before(minTime) || t.After(maxTime) { + return nil, "time value out of range" + } + pv.Int64Value = proto.Int64(toUnixMicro(t)) + case appengine.GeoPoint: + if !t.Valid() { + return nil, "invalid GeoPoint value" + } + // NOTE: Strangely, latitude maps to X, longitude to Y. + pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng} + default: + unsupported = true + } + case reflect.Slice: + if b, ok := v.Interface().([]byte); ok { + pv.StringValue = proto.String(string(b)) + } else { + // nvToProto should already catch slice values. + // If we get here, we have a slice of slice values. + unsupported = true + } + default: + unsupported = true + } + if unsupported { + return nil, "unsupported datastore value type: " + v.Type().String() + } + p = &pb.Property{ + Name: proto.String(name), + Value: &pv, + Multiple: proto.Bool(multiple), + } + if v.IsValid() { + switch v.Interface().(type) { + case []byte: + p.Meaning = pb.Property_BLOB.Enum() + case ByteString: + p.Meaning = pb.Property_BYTESTRING.Enum() + case appengine.BlobKey: + p.Meaning = pb.Property_BLOBKEY.Enum() + case time.Time: + p.Meaning = pb.Property_GD_WHEN.Enum() + case appengine.GeoPoint: + p.Meaning = pb.Property_GEORSS_POINT.Enum() + } + } + return p, "" +} + +// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. +func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) { + var err error + var props []Property + if e, ok := src.(PropertyLoadSaver); ok { + props, err = e.Save() + } else { + props, err = SaveStruct(src) + } + if err != nil { + return nil, err + } + return propertiesToProto(defaultAppID, key, props) +} + +func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error { + p := Property{ + Name: name, + NoIndex: noIndex, + Multiple: multiple, + } + switch x := v.Interface().(type) { + case *Key: + p.Value = x + case time.Time: + p.Value = x + case appengine.BlobKey: + p.Value = x + case appengine.GeoPoint: + p.Value = x + case ByteString: + p.Value = x + default: + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.Value = v.Int() + case reflect.Bool: + p.Value = v.Bool() + case reflect.String: + p.Value = v.String() + case reflect.Float32, reflect.Float64: + p.Value = v.Float() + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + p.NoIndex = true + p.Value = v.Bytes() + } + case reflect.Struct: + if !v.CanAddr() { + return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") + } + sub, err := newStructPLS(v.Addr().Interface()) + if err != nil { + return fmt.Errorf("datastore: unsupported struct field: %v", err) + } + return sub.(structPLS).save(props, name, noIndex, multiple) + } + } + if p.Value == nil { + return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) + } + *props = append(*props, p) + return nil +} + +func (s structPLS) Save() ([]Property, error) { + var props []Property + if err := s.save(&props, "", false, false); err != nil { + return nil, err + } + return props, nil +} + +func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error { + for i, t := range s.codec.byIndex { + if t.name == "-" { + continue + } + name := t.name + if prefix != "" { + name = prefix + name + } + v := s.v.Field(i) + if !v.IsValid() || !v.CanSet() { + continue + } + noIndex1 := noIndex || t.noIndex + // For slice fields that aren't []byte, save each element. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + for j := 0; j < v.Len(); j++ { + if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil { + return err + } + } + continue + } + // Otherwise, save the field itself. + if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil { + return err + } + } + return nil +} + +func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) { + e := &pb.EntityProto{ + Key: keyToProto(defaultAppID, key), + } + if key.parent == nil { + e.EntityGroup = &pb.Path{} + } else { + e.EntityGroup = keyToProto(defaultAppID, key.root()).Path + } + prevMultiple := make(map[string]bool) + + for _, p := range props { + if pm, ok := prevMultiple[p.Name]; ok { + if !pm || !p.Multiple { + return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name) + } + } else { + prevMultiple[p.Name] = p.Multiple + } + + x := &pb.Property{ + Name: proto.String(p.Name), + Value: new(pb.PropertyValue), + Multiple: proto.Bool(p.Multiple), + } + switch v := p.Value.(type) { + case int64: + x.Value.Int64Value = proto.Int64(v) + case bool: + x.Value.BooleanValue = proto.Bool(v) + case string: + x.Value.StringValue = proto.String(v) + if p.NoIndex { + x.Meaning = pb.Property_TEXT.Enum() + } + case float64: + x.Value.DoubleValue = proto.Float64(v) + case *Key: + if v != nil { + x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v) + } + case time.Time: + if v.Before(minTime) || v.After(maxTime) { + return nil, fmt.Errorf("datastore: time value out of range") + } + x.Value.Int64Value = proto.Int64(toUnixMicro(v)) + x.Meaning = pb.Property_GD_WHEN.Enum() + case appengine.BlobKey: + x.Value.StringValue = proto.String(string(v)) + x.Meaning = pb.Property_BLOBKEY.Enum() + case appengine.GeoPoint: + if !v.Valid() { + return nil, fmt.Errorf("datastore: invalid GeoPoint value") + } + // NOTE: Strangely, latitude maps to X, longitude to Y. + x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng} + x.Meaning = pb.Property_GEORSS_POINT.Enum() + case []byte: + x.Value.StringValue = proto.String(string(v)) + x.Meaning = pb.Property_BLOB.Enum() + if !p.NoIndex { + return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name) + } + case ByteString: + x.Value.StringValue = proto.String(string(v)) + x.Meaning = pb.Property_BYTESTRING.Enum() + default: + if p.Value != nil { + return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name) + } + } + + if p.NoIndex { + e.RawProperty = append(e.RawProperty, x) + } else { + e.Property = append(e.Property, x) + if len(e.Property) > maxIndexedProperties { + return nil, errors.New("datastore: too many indexed properties") + } + } + } + return e, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/time_test.go b/vendor/google.golang.org/appengine/datastore/time_test.go new file mode 100644 index 0000000..ba74b44 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/time_test.go @@ -0,0 +1,65 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "testing" + "time" +) + +func TestUnixMicro(t *testing.T) { + // Test that all these time.Time values survive a round trip to unix micros. + testCases := []time.Time{ + {}, + time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), + time.Unix(-1e6, -1000), + time.Unix(-1e6, 0), + time.Unix(-1e6, +1000), + time.Unix(-60, -1000), + time.Unix(-60, 0), + time.Unix(-60, +1000), + time.Unix(-1, -1000), + time.Unix(-1, 0), + time.Unix(-1, +1000), + time.Unix(0, -3000), + time.Unix(0, -2000), + time.Unix(0, -1000), + time.Unix(0, 0), + time.Unix(0, +1000), + time.Unix(0, +2000), + time.Unix(+60, -1000), + time.Unix(+60, 0), + time.Unix(+60, +1000), + time.Unix(+1e6, -1000), + time.Unix(+1e6, 0), + time.Unix(+1e6, +1000), + time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), + time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), + time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), + } + for _, tc := range testCases { + got := fromUnixMicro(toUnixMicro(tc)) + if !got.Equal(tc) { + t.Errorf("got %q, want %q", got, tc) + } + } + + // Test that a time.Time that isn't an integral number of microseconds + // is not perfectly reconstructed after a round trip. + t0 := time.Unix(0, 123) + t1 := fromUnixMicro(toUnixMicro(t0)) + if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { + t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) + } +} diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go new file mode 100644 index 0000000..a7f3f2b --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/transaction.go @@ -0,0 +1,87 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "errors" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/datastore" +) + +func init() { + internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) { + x.Transaction = t + }) + internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) { + x.Transaction = t + }) + internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) { + x.Transaction = t + }) + internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) { + x.Transaction = t + }) +} + +// ErrConcurrentTransaction is returned when a transaction is rolled back due +// to a conflict with a concurrent transaction. +var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") + +// RunInTransaction runs f in a transaction. It calls f with a transaction +// context tc that f should use for all App Engine operations. +// +// If f returns nil, RunInTransaction attempts to commit the transaction, +// returning nil if it succeeds. If the commit fails due to a conflicting +// transaction, RunInTransaction retries f, each time with a new transaction +// context. It gives up and returns ErrConcurrentTransaction after three +// failed attempts. The number of attempts can be configured by specifying +// TransactionOptions.Attempts. +// +// If f returns non-nil, then any datastore changes will not be applied and +// RunInTransaction returns that same error. The function f is not retried. +// +// Note that when f returns, the transaction is not yet committed. Calling code +// must be careful not to assume that any of f's changes have been committed +// until RunInTransaction returns nil. +// +// Since f may be called multiple times, f should usually be idempotent. +// datastore.Get is not idempotent when unmarshaling slice fields. +// +// Nested transactions are not supported; c may not be a transaction context. +func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error { + xg := false + if opts != nil { + xg = opts.XG + } + attempts := 3 + if opts != nil && opts.Attempts > 0 { + attempts = opts.Attempts + } + for i := 0; i < attempts; i++ { + if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction { + return err + } + } + return ErrConcurrentTransaction +} + +// TransactionOptions are the options for running a transaction. +type TransactionOptions struct { + // XG is whether the transaction can cross multiple entity groups. In + // comparison, a single group transaction is one where all datastore keys + // used have the same root key. Note that cross group transactions do not + // have the same behavior as single group transactions. In particular, it + // is much more likely to see partially applied transactions in different + // entity groups, in global queries. + // It is valid to set XG to true even if the transaction is within a + // single entity group. + XG bool + // Attempts controls the number of retries to perform when commits fail + // due to a conflicting transaction. If omitted, it defaults to 3. + Attempts int +} diff --git a/vendor/google.golang.org/appengine/delay/delay.go b/vendor/google.golang.org/appengine/delay/delay.go new file mode 100644 index 0000000..9e517ca --- /dev/null +++ b/vendor/google.golang.org/appengine/delay/delay.go @@ -0,0 +1,278 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package delay provides a way to execute code outside the scope of a +user request by using the taskqueue API. + +To declare a function that may be executed later, call Func +in a top-level assignment context, passing it an arbitrary string key +and a function whose first argument is of type context.Context. + var laterFunc = delay.Func("key", myFunc) +It is also possible to use a function literal. + var laterFunc = delay.Func("key", func(c context.Context, x string) { + // ... + }) + +To call a function, invoke its Call method. + laterFunc.Call(c, "something") +A function may be called any number of times. If the function has any +return arguments, and the last one is of type error, the function may +return a non-nil error to signal that the function should be retried. + +The arguments to functions may be of any type that is encodable by the gob +package. If an argument is of interface type, it is the client's responsibility +to register with the gob package whatever concrete type may be passed for that +argument; see http://golang.org/pkg/gob/#Register for details. + +Any errors during initialization or execution of a function will be +logged to the application logs. Error logs that occur during initialization will +be associated with the request that invoked the Call method. + +The state of a function invocation that has not yet successfully +executed is preserved by combining the file name in which it is declared +with the string key that was passed to the Func function. Updating an app +with pending function invocations is safe as long as the relevant +functions have the (filename, key) combination preserved. + +The delay package uses the Task Queue API to create tasks that call the +reserved application path "/_ah/queue/go/delay". +This path must not be marked as "login: required" in app.yaml; +it must be marked as "login: admin" or have no access restriction. +*/ +package delay // import "google.golang.org/appengine/delay" + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "net/http" + "reflect" + "runtime" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/log" + "google.golang.org/appengine/taskqueue" +) + +// Function represents a function that may have a delayed invocation. +type Function struct { + fv reflect.Value // Kind() == reflect.Func + key string + err error // any error during initialization +} + +const ( + // The HTTP path for invocations. + path = "/_ah/queue/go/delay" + // Use the default queue. + queue = "" +) + +var ( + // registry of all delayed functions + funcs = make(map[string]*Function) + + // precomputed types + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() + + // errors + errFirstArg = errors.New("first argument must be context.Context") +) + +// Func declares a new Function. The second argument must be a function with a +// first argument of type context.Context. +// This function must be called at program initialization time. That means it +// must be called in a global variable declaration or from an init function. +// This restriction is necessary because the instance that delays a function +// call may not be the one that executes it. Only the code executed at program +// initialization time is guaranteed to have been run by an instance before it +// receives a request. +func Func(key string, i interface{}) *Function { + f := &Function{fv: reflect.ValueOf(i)} + + // Derive unique, somewhat stable key for this func. + _, file, _, _ := runtime.Caller(1) + f.key = file + ":" + key + + t := f.fv.Type() + if t.Kind() != reflect.Func { + f.err = errors.New("not a function") + return f + } + if t.NumIn() == 0 || t.In(0) != contextType { + f.err = errFirstArg + return f + } + + // Register the function's arguments with the gob package. + // This is required because they are marshaled inside a []interface{}. + // gob.Register only expects to be called during initialization; + // that's fine because this function expects the same. + for i := 0; i < t.NumIn(); i++ { + // Only concrete types may be registered. If the argument has + // interface type, the client is resposible for registering the + // concrete types it will hold. + if t.In(i).Kind() == reflect.Interface { + continue + } + gob.Register(reflect.Zero(t.In(i)).Interface()) + } + + if old := funcs[f.key]; old != nil { + old.err = fmt.Errorf("multiple functions registered for %s in %s", key, file) + } + funcs[f.key] = f + return f +} + +type invocation struct { + Key string + Args []interface{} +} + +// Call invokes a delayed function. +// err := f.Call(c, ...) +// is equivalent to +// t, _ := f.Task(...) +// _, err := taskqueue.Add(c, t, "") +func (f *Function) Call(c context.Context, args ...interface{}) error { + t, err := f.Task(args...) + if err != nil { + return err + } + _, err = taskqueueAdder(c, t, queue) + return err +} + +// Task creates a Task that will invoke the function. +// Its parameters may be tweaked before adding it to a queue. +// Users should not modify the Path or Payload fields of the returned Task. +func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) { + if f.err != nil { + return nil, fmt.Errorf("delay: func is invalid: %v", f.err) + } + + nArgs := len(args) + 1 // +1 for the context.Context + ft := f.fv.Type() + minArgs := ft.NumIn() + if ft.IsVariadic() { + minArgs-- + } + if nArgs < minArgs { + return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs) + } + if !ft.IsVariadic() && nArgs > minArgs { + return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs) + } + + // Check arg types. + for i := 1; i < nArgs; i++ { + at := reflect.TypeOf(args[i-1]) + var dt reflect.Type + if i < minArgs { + // not a variadic arg + dt = ft.In(i) + } else { + // a variadic arg + dt = ft.In(minArgs).Elem() + } + // nil arguments won't have a type, so they need special handling. + if at == nil { + // nil interface + switch dt.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + continue // may be nil + } + return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt) + } + switch at.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + av := reflect.ValueOf(args[i-1]) + if av.IsNil() { + // nil value in interface; not supported by gob, so we replace it + // with a nil interface value + args[i-1] = nil + } + } + if !at.AssignableTo(dt) { + return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt) + } + } + + inv := invocation{ + Key: f.key, + Args: args, + } + + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(inv); err != nil { + return nil, fmt.Errorf("delay: gob encoding failed: %v", err) + } + + return &taskqueue.Task{ + Path: path, + Payload: buf.Bytes(), + }, nil +} + +var taskqueueAdder = taskqueue.Add // for testing + +func init() { + http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) { + runFunc(appengine.NewContext(req), w, req) + }) +} + +func runFunc(c context.Context, w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + + var inv invocation + if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil { + log.Errorf(c, "delay: failed decoding task payload: %v", err) + log.Warningf(c, "delay: dropping task") + return + } + + f := funcs[inv.Key] + if f == nil { + log.Errorf(c, "delay: no func with key %q found", inv.Key) + log.Warningf(c, "delay: dropping task") + return + } + + ft := f.fv.Type() + in := []reflect.Value{reflect.ValueOf(c)} + for _, arg := range inv.Args { + var v reflect.Value + if arg != nil { + v = reflect.ValueOf(arg) + } else { + // Task was passed a nil argument, so we must construct + // the zero value for the argument here. + n := len(in) // we're constructing the nth argument + var at reflect.Type + if !ft.IsVariadic() || n < ft.NumIn()-1 { + at = ft.In(n) + } else { + at = ft.In(ft.NumIn() - 1).Elem() + } + v = reflect.Zero(at) + } + in = append(in, v) + } + out := f.fv.Call(in) + + if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType { + if errv := out[n-1]; !errv.IsNil() { + log.Errorf(c, "delay: func failed (will retry): %v", errv.Interface()) + w.WriteHeader(http.StatusInternalServerError) + return + } + } +} diff --git a/vendor/google.golang.org/appengine/delay/delay_test.go b/vendor/google.golang.org/appengine/delay/delay_test.go new file mode 100644 index 0000000..1c37e79 --- /dev/null +++ b/vendor/google.golang.org/appengine/delay/delay_test.go @@ -0,0 +1,375 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package delay + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + "google.golang.org/appengine/taskqueue" +) + +type CustomType struct { + N int +} + +type CustomInterface interface { + N() int +} + +type CustomImpl int + +func (c CustomImpl) N() int { return int(c) } + +// CustomImpl needs to be registered with gob. +func init() { + gob.Register(CustomImpl(0)) +} + +var ( + invalidFunc = Func("invalid", func() {}) + + regFuncRuns = 0 + regFuncMsg = "" + regFunc = Func("reg", func(c context.Context, arg string) { + regFuncRuns++ + regFuncMsg = arg + }) + + custFuncTally = 0 + custFunc = Func("cust", func(c context.Context, ct *CustomType, ci CustomInterface) { + a, b := 2, 3 + if ct != nil { + a = ct.N + } + if ci != nil { + b = ci.N() + } + custFuncTally += a + b + }) + + anotherCustFunc = Func("cust2", func(c context.Context, n int, ct *CustomType, ci CustomInterface) { + }) + + varFuncMsg = "" + varFunc = Func("variadic", func(c context.Context, format string, args ...int) { + // convert []int to []interface{} for fmt.Sprintf. + as := make([]interface{}, len(args)) + for i, a := range args { + as[i] = a + } + varFuncMsg = fmt.Sprintf(format, as...) + }) + + errFuncRuns = 0 + errFuncErr = errors.New("error!") + errFunc = Func("err", func(c context.Context) error { + errFuncRuns++ + if errFuncRuns == 1 { + return nil + } + return errFuncErr + }) + + dupeWhich = 0 + dupe1Func = Func("dupe", func(c context.Context) { + if dupeWhich == 0 { + dupeWhich = 1 + } + }) + dupe2Func = Func("dupe", func(c context.Context) { + if dupeWhich == 0 { + dupeWhich = 2 + } + }) +) + +type fakeContext struct { + ctx context.Context + logging [][]interface{} +} + +func newFakeContext() *fakeContext { + f := new(fakeContext) + f.ctx = internal.WithCallOverride(context.Background(), f.call) + f.ctx = internal.WithLogOverride(f.ctx, f.logf) + return f +} + +func (f *fakeContext) call(ctx context.Context, service, method string, in, out proto.Message) error { + panic("should never be called") +} + +var logLevels = map[int64]string{1: "INFO", 3: "ERROR"} + +func (f *fakeContext) logf(level int64, format string, args ...interface{}) { + f.logging = append(f.logging, append([]interface{}{logLevels[level], format}, args...)) +} + +func TestInvalidFunction(t *testing.T) { + c := newFakeContext() + + if got, want := invalidFunc.Call(c.ctx), fmt.Errorf("delay: func is invalid: %s", errFirstArg); got.Error() != want.Error() { + t.Errorf("Incorrect error: got %q, want %q", got, want) + } +} + +func TestVariadicFunctionArguments(t *testing.T) { + // Check the argument type validation for variadic functions. + + c := newFakeContext() + + calls := 0 + taskqueueAdder = func(c context.Context, t *taskqueue.Task, _ string) (*taskqueue.Task, error) { + calls++ + return t, nil + } + + varFunc.Call(c.ctx, "hi") + varFunc.Call(c.ctx, "%d", 12) + varFunc.Call(c.ctx, "%d %d %d", 3, 1, 4) + if calls != 3 { + t.Errorf("Got %d calls to taskqueueAdder, want 3", calls) + } + + if got, want := varFunc.Call(c.ctx, "%d %s", 12, "a string is bad"), errors.New("delay: argument 3 has wrong type: string is not assignable to int"); got.Error() != want.Error() { + t.Errorf("Incorrect error: got %q, want %q", got, want) + } +} + +func TestBadArguments(t *testing.T) { + // Try running regFunc with different sets of inappropriate arguments. + + c := newFakeContext() + + tests := []struct { + args []interface{} // all except context + wantErr string + }{ + { + args: nil, + wantErr: "delay: too few arguments to func: 1 < 2", + }, + { + args: []interface{}{"lala", 53}, + wantErr: "delay: too many arguments to func: 3 > 2", + }, + { + args: []interface{}{53}, + wantErr: "delay: argument 1 has wrong type: int is not assignable to string", + }, + } + for i, tc := range tests { + got := regFunc.Call(c.ctx, tc.args...) + if got.Error() != tc.wantErr { + t.Errorf("Call %v: got %q, want %q", i, got, tc.wantErr) + } + } +} + +func TestRunningFunction(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + regFuncRuns, regFuncMsg = 0, "" // reset state + const msg = "Why, hello!" + regFunc.Call(c.ctx, msg) + + // Simulate the Task Queue service. + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + if regFuncRuns != 1 { + t.Errorf("regFuncRuns: got %d, want 1", regFuncRuns) + } + if regFuncMsg != msg { + t.Errorf("regFuncMsg: got %q, want %q", regFuncMsg, msg) + } +} + +func TestCustomType(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + custFuncTally = 0 // reset state + custFunc.Call(c.ctx, &CustomType{N: 11}, CustomImpl(13)) + + // Simulate the Task Queue service. + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + if custFuncTally != 24 { + t.Errorf("custFuncTally = %d, want 24", custFuncTally) + } + + // Try the same, but with nil values; one is a nil pointer (and thus a non-nil interface value), + // and the other is a nil interface value. + custFuncTally = 0 // reset state + custFunc.Call(c.ctx, (*CustomType)(nil), nil) + + // Simulate the Task Queue service. + req, err = http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw = httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + if custFuncTally != 5 { + t.Errorf("custFuncTally = %d, want 5", custFuncTally) + } +} + +func TestRunningVariadic(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + varFuncMsg = "" // reset state + varFunc.Call(c.ctx, "Amiga %d has %d KB RAM", 500, 512) + + // Simulate the Task Queue service. + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + const expected = "Amiga 500 has 512 KB RAM" + if varFuncMsg != expected { + t.Errorf("varFuncMsg = %q, want %q", varFuncMsg, expected) + } +} + +func TestErrorFunction(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + errFunc.Call(c.ctx) + + // Simulate the Task Queue service. + // The first call should succeed; the second call should fail. + { + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + } + { + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + if rw.Code != http.StatusInternalServerError { + t.Errorf("Got status code %d, want %d", rw.Code, http.StatusInternalServerError) + } + + wantLogging := [][]interface{}{ + {"ERROR", "delay: func failed (will retry): %v", errFuncErr}, + } + if !reflect.DeepEqual(c.logging, wantLogging) { + t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging) + } + } +} + +func TestDuplicateFunction(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + if err := dupe1Func.Call(c.ctx); err == nil { + t.Error("dupe1Func.Call did not return error") + } + if task != nil { + t.Error("dupe1Func.Call posted a task") + } + if err := dupe2Func.Call(c.ctx); err != nil { + t.Errorf("dupe2Func.Call error: %v", err) + } + if task == nil { + t.Fatalf("dupe2Func.Call did not post a task") + } + + // Simulate the Task Queue service. + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + if dupeWhich == 1 { + t.Error("dupe2Func.Call used old registered function") + } else if dupeWhich != 2 { + t.Errorf("dupeWhich = %d; want 2", dupeWhich) + } +} diff --git a/vendor/google.golang.org/appengine/demos/guestbook/app.yaml b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml new file mode 100644 index 0000000..3342503 --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml @@ -0,0 +1,14 @@ +# Demo application for App Engine "flexible environment". +runtime: go +vm: true +api_version: go1 + +handlers: +# Favicon. Without this, the browser hits this once per page view. +- url: /favicon.ico + static_files: favicon.ico + upload: favicon.ico + +# Main app. All the real work is here. +- url: /.* + script: _go_app diff --git a/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..1a71ea772e972df2e955b36261ae5d7f53b9c9b1 GIT binary patch literal 1150 zcmd5)OKVd>6rNI{3l|0|#f50WO+XjL$3`~+!3T;Ix^p413yHRhmS9^&ywzgVMH)<- zCQV34A4!wjylP%GkDGUz=QT;NG>gb*8n4`ye3#{^zkce45EvUvW9N8Y#yV5-i2?n|gRoZc<%s zmh~rn+mM*?Ph4ge?;K&MO=5dH$Y(hhHh2y-K8|XULpI_@BFLhc^dYyZ;RQd6ULnX% zY7XBrdX%kq;dvp(g8Ue4lb2A6TCi0~Be~{)e`OwVpB?PH2D#WOBIv*k9@h8svMjN%LB8=hT3X!a(GF&~^uI=HQRRDv3$W^b7s@-uyV zh0r)6|MU>DZWSsYRM^NkQI4_jJUxMR7lX9x9lUlU?B*HdJ=56ZweCUP$ZoY9rFF+p zujNrIgppL7LdhyaA;coEVs7#ao|(V$&G-5wg`mF4|60vrXX_&(76p9^7qVeblj~)T zDEamE)_Ys!wZ}cExSr6rOJIAGMbZ`| + + + Guestbook Demo + + +

    + {{with .Email}}You are currently logged in as {{.}}.{{end}} + {{with .Login}}Sign in{{end}} + {{with .Logout}}Sign out{{end}} +

    + + {{range .Greetings }} +

    + {{with .Author}}{{.}}{{else}}An anonymous person{{end}} + on {{.Date.Format "3:04pm, Mon 2 Jan"}} + wrote

    {{.Content}}
    +

    + {{end}} + +
    +
    +
    +
    + + diff --git a/vendor/google.golang.org/appengine/demos/helloworld/app.yaml b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml new file mode 100644 index 0000000..1509119 --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml @@ -0,0 +1,10 @@ +runtime: go +api_version: go1 +vm: true + +handlers: +- url: /favicon.ico + static_files: favicon.ico + upload: favicon.ico +- url: /.* + script: _go_app diff --git a/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..f19c04d270a3865384ce3db41412448692b8cba4 GIT binary patch literal 1150 zcmchVT}YE*6vvO#Ozpx+V3I*aL_(n#kx)ooMSh^NcA;pHT?E=LQZz`!FSHCRr@prN zwT3p)%=s-dmt~u}Ev?O|`zZYCq8qyiy0L=y-}5?0YR${e%Q?^Uob&&^@ADoGkso`+ zVq)QuG~pS#!VCV*}8%$~So~Xo7Z}fn#{=kyT1ep!Zb zv1b!}`L%0%gZ-u8{86F};i`UY4wfg*lK=n! literal 0 HcmV?d00001 diff --git a/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go new file mode 100644 index 0000000..fbe9f56 --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go @@ -0,0 +1,50 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This example only works on App Engine "flexible environment". +// +build !appengine + +package main + +import ( + "html/template" + "net/http" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/log" +) + +var initTime = time.Now() + +func main() { + http.HandleFunc("/", handle) + appengine.Main() +} + +func handle(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + ctx := appengine.NewContext(r) + log.Infof(ctx, "Serving the front page.") + + tmpl.Execute(w, time.Since(initTime)) +} + +var tmpl = template.Must(template.New("front").Parse(` + + +

    +Hello, World! 세ìƒì•„ 안녕! +

    + +

    +This instance has been running for {{.}}. +

    + + +`)) diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go new file mode 100644 index 0000000..16d0772 --- /dev/null +++ b/vendor/google.golang.org/appengine/errors.go @@ -0,0 +1,46 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This file provides error functions for common API failure modes. + +package appengine + +import ( + "fmt" + + "google.golang.org/appengine/internal" +) + +// IsOverQuota reports whether err represents an API call failure +// due to insufficient available quota. +func IsOverQuota(err error) bool { + callErr, ok := err.(*internal.CallError) + return ok && callErr.Code == 4 +} + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/google.golang.org/appengine/file/file.go b/vendor/google.golang.org/appengine/file/file.go new file mode 100644 index 0000000..c3cd58b --- /dev/null +++ b/vendor/google.golang.org/appengine/file/file.go @@ -0,0 +1,28 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package file provides helper functions for using Google Cloud Storage. +package file + +import ( + "fmt" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + aipb "google.golang.org/appengine/internal/app_identity" +) + +// DefaultBucketName returns the name of this application's +// default Google Cloud Storage bucket. +func DefaultBucketName(c context.Context) (string, error) { + req := &aipb.GetDefaultGcsBucketNameRequest{} + res := &aipb.GetDefaultGcsBucketNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res) + if err != nil { + return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res) + } + return res.GetDefaultGcsBucketName(), nil +} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go new file mode 100644 index 0000000..b8dcf8f --- /dev/null +++ b/vendor/google.golang.org/appengine/identity.go @@ -0,0 +1,142 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "time" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/app_identity" + modpb "google.golang.org/appengine/internal/modules" +) + +// AppID returns the application ID for the current application. +// The string will be a plain application ID (e.g. "appid"), with a +// domain prefix for custom domain deployments (e.g. "example.com:appid"). +func AppID(c context.Context) string { return internal.AppID(c) } + +// DefaultVersionHostname returns the standard hostname of the default version +// of the current application (e.g. "my-app.appspot.com"). This is suitable for +// use in constructing URLs. +func DefaultVersionHostname(c context.Context) string { + return internal.DefaultVersionHostname(c) +} + +// ModuleName returns the module name of the current instance. +func ModuleName(c context.Context) string { + return internal.ModuleName(c) +} + +// ModuleHostname returns a hostname of a module instance. +// If module is the empty string, it refers to the module of the current instance. +// If version is empty, it refers to the version of the current instance if valid, +// or the default version of the module of the current instance. +// If instance is empty, ModuleHostname returns the load-balancing hostname. +func ModuleHostname(c context.Context, module, version, instance string) (string, error) { + req := &modpb.GetHostnameRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + if instance != "" { + req.Instance = &instance + } + res := &modpb.GetHostnameResponse{} + if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { + return "", err + } + return *res.Hostname, nil +} + +// VersionID returns the version ID for the current application. +// It will be of the form "X.Y", where X is specified in app.yaml, +// and Y is a number generated when each version of the app is uploaded. +// It does not include a module name. +func VersionID(c context.Context) string { return internal.VersionID(c) } + +// InstanceID returns a mostly-unique identifier for this instance. +func InstanceID() string { return internal.InstanceID() } + +// Datacenter returns an identifier for the datacenter that the instance is running in. +func Datacenter(c context.Context) string { return internal.Datacenter(c) } + +// ServerSoftware returns the App Engine release version. +// In production, it looks like "Google App Engine/X.Y.Z". +// In the development appserver, it looks like "Development/X.Y". +func ServerSoftware() string { return internal.ServerSoftware() } + +// RequestID returns a string that uniquely identifies the request. +func RequestID(c context.Context) string { return internal.RequestID(c) } + +// AccessToken generates an OAuth2 access token for the specified scopes on +// behalf of service account of this application. This token will expire after +// the returned time. +func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { + req := &pb.GetAccessTokenRequest{Scope: scopes} + res := &pb.GetAccessTokenResponse{} + + err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) + if err != nil { + return "", time.Time{}, err + } + return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil +} + +// Certificate represents a public certificate for the app. +type Certificate struct { + KeyName string + Data []byte // PEM-encoded X.509 certificate +} + +// PublicCertificates retrieves the public certificates for the app. +// They can be used to verify a signature returned by SignBytes. +func PublicCertificates(c context.Context) ([]Certificate, error) { + req := &pb.GetPublicCertificateForAppRequest{} + res := &pb.GetPublicCertificateForAppResponse{} + if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { + return nil, err + } + var cs []Certificate + for _, pc := range res.PublicCertificateList { + cs = append(cs, Certificate{ + KeyName: pc.GetKeyName(), + Data: []byte(pc.GetX509CertificatePem()), + }) + } + return cs, nil +} + +// ServiceAccount returns a string representing the service account name, in +// the form of an email address (typically app_id@appspot.gserviceaccount.com). +func ServiceAccount(c context.Context) (string, error) { + req := &pb.GetServiceAccountNameRequest{} + res := &pb.GetServiceAccountNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) + if err != nil { + return "", err + } + return res.GetServiceAccountName(), err +} + +// SignBytes signs bytes using a private key unique to your application. +func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { + req := &pb.SignForAppRequest{BytesToSign: bytes} + res := &pb.SignForAppResponse{} + + if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { + return "", nil, err + } + return res.GetKeyName(), res.GetSignatureBytes(), nil +} + +func init() { + internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) + internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/image/image.go b/vendor/google.golang.org/appengine/image/image.go new file mode 100644 index 0000000..027a41b --- /dev/null +++ b/vendor/google.golang.org/appengine/image/image.go @@ -0,0 +1,67 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package image provides image services. +package image // import "google.golang.org/appengine/image" + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/image" +) + +type ServingURLOptions struct { + Secure bool // whether the URL should use HTTPS + + // Size must be between zero and 1600. + // If Size is non-zero, a resized version of the image is served, + // and Size is the served image's longest dimension. The aspect ratio is preserved. + // If Crop is true the image is cropped from the center instead of being resized. + Size int + Crop bool +} + +// ServingURL returns a URL that will serve an image from Blobstore. +func ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) { + req := &pb.ImagesGetUrlBaseRequest{ + BlobKey: (*string)(&key), + } + if opts != nil && opts.Secure { + req.CreateSecureUrl = &opts.Secure + } + res := &pb.ImagesGetUrlBaseResponse{} + if err := internal.Call(c, "images", "GetUrlBase", req, res); err != nil { + return nil, err + } + + // The URL may have suffixes added to dynamically resize or crop: + // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio. + // - adding "=s32-c" is the same as "=s32" except it will be cropped. + u := *res.Url + if opts != nil && opts.Size > 0 { + u += fmt.Sprintf("=s%d", opts.Size) + if opts.Crop { + u += "-c" + } + } + return url.Parse(u) +} + +// DeleteServingURL deletes the serving URL for an image. +func DeleteServingURL(c context.Context, key appengine.BlobKey) error { + req := &pb.ImagesDeleteUrlBaseRequest{ + BlobKey: (*string)(&key), + } + res := &pb.ImagesDeleteUrlBaseResponse{} + return internal.Call(c, "images", "DeleteUrlBase", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/internal/aetesting/fake.go b/vendor/google.golang.org/appengine/internal/aetesting/fake.go new file mode 100644 index 0000000..eb5b2c6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/aetesting/fake.go @@ -0,0 +1,81 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package aetesting provides utilities for testing App Engine packages. +// This is not for testing user applications. +package aetesting + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// FakeSingleContext returns a context whose Call invocations will be serviced +// by f, which should be a function that has two arguments of the input and output +// protocol buffer type, and one error return. +func FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context { + fv := reflect.ValueOf(f) + if fv.Kind() != reflect.Func { + t.Fatal("not a function") + } + ft := fv.Type() + if ft.NumIn() != 2 || ft.NumOut() != 1 { + t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut()) + } + for i := 0; i < 2; i++ { + at := ft.In(i) + if !at.Implements(protoMessageType) { + t.Fatalf("arg %d does not implement proto.Message", i) + } + } + if ft.Out(0) != errorType { + t.Fatalf("f's return is %v, want error", ft.Out(0)) + } + s := &single{ + t: t, + service: service, + method: method, + f: fv, + } + return internal.WithCallOverride(internal.ContextForTesting(&http.Request{}), s.call) +} + +var ( + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() +) + +type single struct { + t *testing.T + service, method string + f reflect.Value +} + +func (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error { + if service == "__go__" { + if method == "GetNamespace" { + return nil // always yield an empty namespace + } + return fmt.Errorf("Unknown API call /%s.%s", service, method) + } + if service != s.service || method != s.method { + s.t.Fatalf("Unexpected call to /%s.%s", service, method) + } + ins := []reflect.Value{ + reflect.ValueOf(in), + reflect.ValueOf(out), + } + outs := s.f.Call(ins) + if outs[0].IsNil() { + return nil + } + return outs[0].Interface().(error) +} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go new file mode 100644 index 0000000..ec5aa59 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -0,0 +1,646 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + logpb "google.golang.org/appengine/internal/log" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const ( + apiPath = "/rpc_http" +) + +var ( + // Incoming headers. + ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") + dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") + traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") + curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") + remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + + // Outgoing headers. + apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") + apiEndpointHeaderValue = []string{"app-engine-apis"} + apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") + apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} + apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") + apiContentType = http.CanonicalHeaderKey("Content-Type") + apiContentTypeValue = []string{"application/octet-stream"} + logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") + + apiHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + }, + } +) + +func apiURL() *url.URL { + host, port := "appengine.googleapis.internal", "10001" + if h := os.Getenv("API_HOST"); h != "" { + host = h + } + if p := os.Getenv("API_PORT"); p != "" { + port = p + } + return &url.URL{ + Scheme: "http", + Host: host + ":" + port, + Path: apiPath, + } +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + c := &context{ + req: r, + outHeader: w.Header(), + apiURL: apiURL(), + } + stopFlushing := make(chan int) + + ctxs.Lock() + ctxs.m[r] = c + ctxs.Unlock() + defer func() { + ctxs.Lock() + delete(ctxs.m, r) + ctxs.Unlock() + }() + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + + executeRequestSafely(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go c.flushLog(false) + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +var ctxs = struct { + sync.Mutex + m map[*http.Request]*context + bg *context // background context, lazily initialized + // dec is used by tests to decorate the netcontext.Context returned + // for a given request. This allows tests to add overrides (such as + // WithAppIDOverride) to the context. The map is nil outside tests. + dec map[*http.Request]func(netcontext.Context) netcontext.Context +}{ + m: make(map[*http.Request]*context), +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + ctxs.Lock() + c := ctxs.m[req] + d := ctxs.dec[req] + ctxs.Unlock() + + if d != nil { + parent = d(parent) + } + + if c == nil { + // Someone passed in an http.Request that is not in-flight. + // We panic here rather than panicking at a later point + // so that stack traces will be more sensible. + log.Panic("appengine: NewContext passed an unknown http.Request") + } + return withContext(parent, c) +} + +func BackgroundContext() netcontext.Context { + ctxs.Lock() + defer ctxs.Unlock() + + if ctxs.bg != nil { + return toContext(ctxs.bg) + } + + // Compute background security ticket. + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + + ctxs.bg = &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go ctxs.bg.logFlusher(make(chan int)) + + return toContext(ctxs.bg) +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { + c := &context{ + req: req, + apiURL: apiURL, + } + ctxs.Lock() + defer ctxs.Unlock() + if _, ok := ctxs.m[req]; ok { + log.Panic("req already associated with context") + } + if _, ok := ctxs.dec[req]; ok { + log.Panic("req already associated with context") + } + if ctxs.dec == nil { + ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) + } + ctxs.m[req] = c + ctxs.dec[req] = decorate + + return func() { + ctxs.Lock() + delete(ctxs.m, req) + delete(ctxs.dec, req) + ctxs.Unlock() + } +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errors.New("not an App Engine context") + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + log.Print(logLevelName[level] + ": " + s) +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go new file mode 100644 index 0000000..597f66e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -0,0 +1,159 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "errors" + "fmt" + "net/http" + "time" + + "appengine" + "appengine_internal" + basepb "appengine_internal/base" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var contextKey = "holds an appengine.Context" + +func fromContext(ctx netcontext.Context) appengine.Context { + c, _ := ctx.Value(&contextKey).(appengine.Context) + return c +} + +// This is only for classic App Engine adapters. +func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { + return fromContext(ctx) +} + +func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + + s := &basepb.StringProto{} + c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) + if ns := s.GetValue(); ns != "" { + ctx = NamespacedContext(ctx, ns) + } + + return ctx +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + if req, ok := c.Request().(*http.Request); ok { + return req.Header + } + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + c := appengine.NewContext(req) + return withContext(parent, c) +} + +type testingContext struct { + appengine.Context + + req *http.Request +} + +func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } +func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { + if service == "__go__" && method == "GetNamespace" { + return nil + } + return fmt.Errorf("testingContext: unsupported Call") +} +func (t *testingContext) Request() interface{} { return t.req } + +func ContextForTesting(req *http.Request) netcontext.Context { + return withContext(netcontext.Background(), &testingContext{req: req}) +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errors.New("not an App Engine context") + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + var opts *appengine_internal.CallOptions + if d, ok := ctx.Deadline(); ok { + opts = &appengine_internal.CallOptions{ + Timeout: d.Sub(time.Now()), + } + } + + err := c.Call(service, method, in, out, opts) + switch v := err.(type) { + case *appengine_internal.APIError: + return &APIError{ + Service: v.Service, + Detail: v.Detail, + Code: v.Code, + } + case *appengine_internal.CallError: + return &CallError{ + Detail: v.Detail, + Code: v.Code, + Timeout: v.Timeout, + } + } + return err +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + panic("handleHTTP called; this should be impossible") +} + +func logf(c appengine.Context, level int64, format string, args ...interface{}) { + var fn func(format string, args ...interface{}) + switch level { + case 0: + fn = c.Debugf + case 1: + fn = c.Infof + case 2: + fn = c.Warningf + case 3: + fn = c.Errorf + case 4: + fn = c.Criticalf + default: + // This shouldn't happen. + fn = c.Criticalf + } + fn(format, args...) +} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go new file mode 100644 index 0000000..2db33a7 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -0,0 +1,86 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error + +var callOverrideKey = "holds []CallOverrideFunc" + +func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { + // We avoid appending to any existing call override + // so we don't risk overwriting a popped stack below. + var cofs []CallOverrideFunc + if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { + cofs = append(cofs, uf...) + } + cofs = append(cofs, f) + return netcontext.WithValue(ctx, &callOverrideKey, cofs) +} + +func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { + cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) + if len(cofs) == 0 { + return nil, nil, false + } + // We found a list of overrides; grab the last, and reconstitute a + // context that will hide it. + f := cofs[len(cofs)-1] + ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + return f, ctx, true +} + +type logOverrideFunc func(level int64, format string, args ...interface{}) + +var logOverrideKey = "holds a logOverrideFunc" + +func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { + return netcontext.WithValue(ctx, &logOverrideKey, f) +} + +var appIDOverrideKey = "holds a string, being the full app ID" + +func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { + return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +} + +var namespaceKey = "holds the namespace string" + +func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { + return netcontext.WithValue(ctx, &namespaceKey, ns) +} + +func NamespaceFromContext(ctx netcontext.Context) string { + // If there's no namespace, return the empty string. + ns, _ := ctx.Value(&namespaceKey).(string) + return ns +} + +// FullyQualifiedAppID returns the fully-qualified application ID. +// This may contain a partition prefix (e.g. "s~" for High Replication apps), +// or a domain prefix (e.g. "example.com:"). +func FullyQualifiedAppID(ctx netcontext.Context) string { + if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { + return id + } + return fullyQualifiedAppID(ctx) +} + +func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { + if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { + f(level, format, args...) + return + } + logf(fromContext(ctx), level, format, args...) +} + +// NamespacedContext wraps a Context to support namespaces. +func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { + return withNamespace(ctx, namespace) +} diff --git a/vendor/google.golang.org/appengine/internal/api_race_test.go b/vendor/google.golang.org/appengine/internal/api_race_test.go new file mode 100644 index 0000000..6cfe906 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_race_test.go @@ -0,0 +1,9 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build race + +package internal + +func init() { raceDetector = true } diff --git a/vendor/google.golang.org/appengine/internal/api_test.go b/vendor/google.golang.org/appengine/internal/api_test.go new file mode 100644 index 0000000..386d7f6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_test.go @@ -0,0 +1,467 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const testTicketHeader = "X-Magic-Ticket-Header" + +func init() { + ticketHeader = testTicketHeader +} + +type fakeAPIHandler struct { + hang chan int // used for RunSlowly RPC + + LogFlushes int32 // atomic +} + +func (f *fakeAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + writeResponse := func(res *remotepb.Response) { + hresBody, err := proto.Marshal(res) + if err != nil { + http.Error(w, fmt.Sprintf("Failed encoding API response: %v", err), 500) + return + } + w.Write(hresBody) + } + + if r.URL.Path != "/rpc_http" { + http.NotFound(w, r) + return + } + hreqBody, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, fmt.Sprintf("Bad body: %v", err), 500) + return + } + apiReq := &remotepb.Request{} + if err := proto.Unmarshal(hreqBody, apiReq); err != nil { + http.Error(w, fmt.Sprintf("Bad encoded API request: %v", err), 500) + return + } + if *apiReq.RequestId != "s3cr3t" { + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_SECURITY_VIOLATION)), + Detail: proto.String("bad security ticket"), + }, + }) + return + } + if got, want := r.Header.Get(dapperHeader), "trace-001"; got != want { + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_BAD_REQUEST)), + Detail: proto.String(fmt.Sprintf("trace info = %q, want %q", got, want)), + }, + }) + return + } + + service, method := *apiReq.ServiceName, *apiReq.Method + var resOut proto.Message + if service == "actordb" && method == "LookupActor" { + req := &basepb.StringProto{} + res := &basepb.StringProto{} + if err := proto.Unmarshal(apiReq.Request, req); err != nil { + http.Error(w, fmt.Sprintf("Bad encoded request: %v", err), 500) + return + } + if *req.Value == "Doctor Who" { + res.Value = proto.String("David Tennant") + } + resOut = res + } + if service == "errors" { + switch method { + case "Non200": + http.Error(w, "I'm a little teapot.", 418) + return + case "ShortResponse": + w.Header().Set("Content-Length", "100") + w.Write([]byte("way too short")) + return + case "OverQuota": + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_OVER_QUOTA)), + Detail: proto.String("you are hogging the resources!"), + }, + }) + return + case "RunSlowly": + // TestAPICallRPCFailure creates f.hang, but does not strobe it + // until Call returns with remotepb.RpcError_CANCELLED. + // This is here to force a happens-before relationship between + // the httptest server handler and shutdown. + <-f.hang + resOut = &basepb.VoidProto{} + } + } + if service == "logservice" && method == "Flush" { + // Pretend log flushing is slow. + time.Sleep(50 * time.Millisecond) + atomic.AddInt32(&f.LogFlushes, 1) + resOut = &basepb.VoidProto{} + } + + encOut, err := proto.Marshal(resOut) + if err != nil { + http.Error(w, fmt.Sprintf("Failed encoding response: %v", err), 500) + return + } + writeResponse(&remotepb.Response{ + Response: encOut, + }) +} + +func setup() (f *fakeAPIHandler, c *context, cleanup func()) { + f = &fakeAPIHandler{} + srv := httptest.NewServer(f) + u, err := url.Parse(srv.URL + apiPath) + if err != nil { + panic(fmt.Sprintf("url.Parse(%q): %v", srv.URL+apiPath, err)) + } + return f, &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{"s3cr3t"}, + dapperHeader: []string{"trace-001"}, + }, + }, + apiURL: u, + }, srv.Close +} + +func TestAPICall(t *testing.T) { + _, c, cleanup := setup() + defer cleanup() + + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + err := Call(toContext(c), "actordb", "LookupActor", req, res) + if err != nil { + t.Fatalf("API call failed: %v", err) + } + if got, want := *res.Value, "David Tennant"; got != want { + t.Errorf("Response is %q, want %q", got, want) + } +} + +func TestAPICallRPCFailure(t *testing.T) { + f, c, cleanup := setup() + defer cleanup() + + testCases := []struct { + method string + code remotepb.RpcError_ErrorCode + }{ + {"Non200", remotepb.RpcError_UNKNOWN}, + {"ShortResponse", remotepb.RpcError_UNKNOWN}, + {"OverQuota", remotepb.RpcError_OVER_QUOTA}, + {"RunSlowly", remotepb.RpcError_CANCELLED}, + } + f.hang = make(chan int) // only for RunSlowly + for _, tc := range testCases { + ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond) + err := Call(ctx, "errors", tc.method, &basepb.VoidProto{}, &basepb.VoidProto{}) + ce, ok := err.(*CallError) + if !ok { + t.Errorf("%s: API call error is %T (%v), want *CallError", tc.method, err, err) + continue + } + if ce.Code != int32(tc.code) { + t.Errorf("%s: ce.Code = %d, want %d", tc.method, ce.Code, tc.code) + } + if tc.method == "RunSlowly" { + f.hang <- 1 // release the HTTP handler + } + } +} + +func TestAPICallDialFailure(t *testing.T) { + // See what happens if the API host is unresponsive. + // This should time out quickly, not hang forever. + _, c, cleanup := setup() + defer cleanup() + // Reset the URL to the production address so that dialing fails. + c.apiURL = apiURL() + + start := time.Now() + err := Call(toContext(c), "foo", "bar", &basepb.VoidProto{}, &basepb.VoidProto{}) + const max = 1 * time.Second + if taken := time.Since(start); taken > max { + t.Errorf("Dial hang took too long: %v > %v", taken, max) + } + if err == nil { + t.Error("Call did not fail") + } +} + +func TestDelayedLogFlushing(t *testing.T) { + f, c, cleanup := setup() + defer cleanup() + + http.HandleFunc("/quick_log", func(w http.ResponseWriter, r *http.Request) { + logC := WithContext(netcontext.Background(), r) + fromContext(logC).apiURL = c.apiURL // Otherwise it will try to use the default URL. + Logf(logC, 1, "It's a lovely day.") + w.WriteHeader(200) + w.Write(make([]byte, 100<<10)) // write 100 KB to force HTTP flush + }) + + r := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Path: "/quick_log", + }, + Header: c.req.Header, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + w := httptest.NewRecorder() + + // Check that log flushing does not hold up the HTTP response. + start := time.Now() + handleHTTP(w, r) + if d := time.Since(start); d > 10*time.Millisecond { + t.Errorf("handleHTTP took %v, want under 10ms", d) + } + const hdr = "X-AppEngine-Log-Flush-Count" + if h := w.HeaderMap.Get(hdr); h != "1" { + t.Errorf("%s header = %q, want %q", hdr, h, "1") + } + if f := atomic.LoadInt32(&f.LogFlushes); f != 0 { + t.Errorf("After HTTP response: f.LogFlushes = %d, want 0", f) + } + + // Check that the log flush eventually comes in. + time.Sleep(100 * time.Millisecond) + if f := atomic.LoadInt32(&f.LogFlushes); f != 1 { + t.Errorf("After 100ms: f.LogFlushes = %d, want 1", f) + } +} + +func TestRemoteAddr(t *testing.T) { + var addr string + http.HandleFunc("/remote_addr", func(w http.ResponseWriter, r *http.Request) { + addr = r.RemoteAddr + }) + + testCases := []struct { + headers http.Header + addr string + }{ + {http.Header{"X-Appengine-User-Ip": []string{"10.5.2.1"}}, "10.5.2.1:80"}, + {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4"}}, "1.2.3.4:80"}, + {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4:8080"}}, "1.2.3.4:8080"}, + { + http.Header{"X-Appengine-Remote-Addr": []string{"2401:fa00:9:1:7646:a0ff:fe90:ca66"}}, + "[2401:fa00:9:1:7646:a0ff:fe90:ca66]:80", + }, + { + http.Header{"X-Appengine-Remote-Addr": []string{"[::1]:http"}}, + "[::1]:http", + }, + {http.Header{}, "127.0.0.1:80"}, + } + + for _, tc := range testCases { + r := &http.Request{ + Method: "GET", + URL: &url.URL{Scheme: "http", Path: "/remote_addr"}, + Header: tc.headers, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + handleHTTP(httptest.NewRecorder(), r) + if addr != tc.addr { + t.Errorf("Header %v, got %q, want %q", tc.headers, addr, tc.addr) + } + } +} + +func TestPanickingHandler(t *testing.T) { + http.HandleFunc("/panic", func(http.ResponseWriter, *http.Request) { + panic("whoops!") + }) + r := &http.Request{ + Method: "GET", + URL: &url.URL{Scheme: "http", Path: "/panic"}, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + rec := httptest.NewRecorder() + handleHTTP(rec, r) + if rec.Code != 500 { + t.Errorf("Panicking handler returned HTTP %d, want HTTP %d", rec.Code, 500) + } +} + +var raceDetector = false + +func TestAPICallAllocations(t *testing.T) { + if raceDetector { + t.Skip("not running under race detector") + } + + // Run the test API server in a subprocess so we aren't counting its allocations. + u, cleanup := launchHelperProcess(t) + defer cleanup() + c := &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{"s3cr3t"}, + dapperHeader: []string{"trace-001"}, + }, + }, + apiURL: u, + } + + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + var apiErr error + avg := testing.AllocsPerRun(100, func() { + ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond) + if err := Call(ctx, "actordb", "LookupActor", req, res); err != nil && apiErr == nil { + apiErr = err // get the first error only + } + }) + if apiErr != nil { + t.Errorf("API call failed: %v", apiErr) + } + + // Lots of room for improvement... + // TODO(djd): Reduce maximum to 85 once the App Engine SDK is based on 1.6. + const min, max float64 = 70, 90 + if avg < min || max < avg { + t.Errorf("Allocations per API call = %g, want in [%g,%g]", avg, min, max) + } +} + +func launchHelperProcess(t *testing.T) (apiURL *url.URL, cleanup func()) { + cmd := exec.Command(os.Args[0], "-test.run=TestHelperProcess") + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + stdin, err := cmd.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe: %v", err) + } + stdout, err := cmd.StdoutPipe() + if err != nil { + t.Fatalf("StdoutPipe: %v", err) + } + if err := cmd.Start(); err != nil { + t.Fatalf("Starting helper process: %v", err) + } + + scan := bufio.NewScanner(stdout) + var u *url.URL + for scan.Scan() { + line := scan.Text() + if hp := strings.TrimPrefix(line, helperProcessMagic); hp != line { + var err error + u, err = url.Parse(hp) + if err != nil { + t.Fatalf("Failed to parse %q: %v", hp, err) + } + break + } + } + if err := scan.Err(); err != nil { + t.Fatalf("Scanning helper process stdout: %v", err) + } + if u == nil { + t.Fatal("Helper process never reported") + } + + return u, func() { + stdin.Close() + if err := cmd.Wait(); err != nil { + t.Errorf("Helper process did not exit cleanly: %v", err) + } + } +} + +const helperProcessMagic = "A lovely helper process is listening at " + +// This isn't a real test. It's used as a helper process. +func TestHelperProcess(*testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + defer os.Exit(0) + + f := &fakeAPIHandler{} + srv := httptest.NewServer(f) + defer srv.Close() + fmt.Println(helperProcessMagic + srv.URL + apiPath) + + // Wait for stdin to be closed. + io.Copy(ioutil.Discard, os.Stdin) +} + +func TestBackgroundContext(t *testing.T) { + environ := []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_MINOR_VERSION", "067924799508853122"}, + {"GAE_MODULE_INSTANCE", "0"}, + {"GAE_MODULE_NAME", "default"}, + {"GAE_MODULE_VERSION", "20150612t184001"}, + } + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + defer func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + }() + + ctx, key := fromContext(BackgroundContext()), "X-Magic-Ticket-Header" + if g, w := ctx.req.Header.Get(key), "my-app-id/default.20150612t184001.0"; g != w { + t.Errorf("%v = %q, want %q", key, g, w) + } + + // Check that using the background context doesn't panic. + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + Call(BackgroundContext(), "actordb", "LookupActor", req, res) // expected to fail +} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go new file mode 100644 index 0000000..11df8c0 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id.go @@ -0,0 +1,28 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "strings" +) + +func parseFullAppID(appid string) (partition, domain, displayID string) { + if i := strings.Index(appid, "~"); i != -1 { + partition, appid = appid[:i], appid[i+1:] + } + if i := strings.Index(appid, ":"); i != -1 { + domain, appid = appid[:i], appid[i+1:] + } + return partition, domain, appid +} + +// appID returns "appid" or "domain.com:appid". +func appID(fullAppID string) string { + _, dom, dis := parseFullAppID(fullAppID) + if dom != "" { + return dom + ":" + dis + } + return dis +} diff --git a/vendor/google.golang.org/appengine/internal/app_id_test.go b/vendor/google.golang.org/appengine/internal/app_id_test.go new file mode 100644 index 0000000..e69195c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id_test.go @@ -0,0 +1,34 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "testing" +) + +func TestAppIDParsing(t *testing.T) { + testCases := []struct { + in string + partition, domain, displayID string + }{ + {"simple-app-id", "", "", "simple-app-id"}, + {"domain.com:domain-app-id", "", "domain.com", "domain-app-id"}, + {"part~partition-app-id", "part", "", "partition-app-id"}, + {"part~domain.com:display", "part", "domain.com", "display"}, + } + + for _, tc := range testCases { + part, dom, dis := parseFullAppID(tc.in) + if part != tc.partition { + t.Errorf("partition of %q: got %q, want %q", tc.in, part, tc.partition) + } + if dom != tc.domain { + t.Errorf("domain of %q: got %q, want %q", tc.in, dom, tc.domain) + } + if dis != tc.displayID { + t.Errorf("displayID of %q: got %q, want %q", tc.in, dis, tc.displayID) + } + } +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 0000000..87d9701 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,296 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto +// DO NOT EDIT! + +/* +Package app_identity is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +It has these top-level messages: + AppIdentityServiceError + SignForAppRequest + SignForAppResponse + GetPublicCertificateForAppRequest + PublicCertificate + GetPublicCertificateForAppResponse + GetServiceAccountNameRequest + GetServiceAccountNameResponse + GetAccessTokenRequest + GetAccessTokenResponse + GetDefaultGcsBucketNameRequest + GetDefaultGcsBucketNameResponse +*/ +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} + +type AppIdentityServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto new file mode 100644 index 0000000..19610ca --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "app_identity"; + +package appengine; + +message AppIdentityServiceError { + enum ErrorCode { + SUCCESS = 0; + UNKNOWN_SCOPE = 9; + BLOB_TOO_LARGE = 1000; + DEADLINE_EXCEEDED = 1001; + NOT_A_VALID_APP = 1002; + UNKNOWN_ERROR = 1003; + NOT_ALLOWED = 1005; + NOT_IMPLEMENTED = 1006; + } +} + +message SignForAppRequest { + optional bytes bytes_to_sign = 1; +} + +message SignForAppResponse { + optional string key_name = 1; + optional bytes signature_bytes = 2; +} + +message GetPublicCertificateForAppRequest { +} + +message PublicCertificate { + optional string key_name = 1; + optional string x509_certificate_pem = 2; +} + +message GetPublicCertificateForAppResponse { + repeated PublicCertificate public_certificate_list = 1; + optional int64 max_client_cache_time_in_second = 2; +} + +message GetServiceAccountNameRequest { +} + +message GetServiceAccountNameResponse { + optional string service_account_name = 1; +} + +message GetAccessTokenRequest { + repeated string scope = 1; + optional int64 service_account_id = 2; + optional string service_account_name = 3; +} + +message GetAccessTokenResponse { + optional string access_token = 1; + optional int64 expiration_time = 2; +} + +message GetDefaultGcsBucketNameRequest { +} + +message GetDefaultGcsBucketNameResponse { + optional string default_gcs_bucket_name = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go new file mode 100644 index 0000000..36a1956 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go @@ -0,0 +1,133 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/base/api_base.proto +// DO NOT EDIT! + +/* +Package base is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/base/api_base.proto + +It has these top-level messages: + StringProto + Integer32Proto + Integer64Proto + BoolProto + DoubleProto + BytesProto + VoidProto +*/ +package base + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type StringProto struct { + Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StringProto) Reset() { *m = StringProto{} } +func (m *StringProto) String() string { return proto.CompactTextString(m) } +func (*StringProto) ProtoMessage() {} + +func (m *StringProto) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Integer32Proto struct { + Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } +func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } +func (*Integer32Proto) ProtoMessage() {} + +func (m *Integer32Proto) GetValue() int32 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Integer64Proto struct { + Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } +func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } +func (*Integer64Proto) ProtoMessage() {} + +func (m *Integer64Proto) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BoolProto struct { + Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BoolProto) Reset() { *m = BoolProto{} } +func (m *BoolProto) String() string { return proto.CompactTextString(m) } +func (*BoolProto) ProtoMessage() {} + +func (m *BoolProto) GetValue() bool { + if m != nil && m.Value != nil { + return *m.Value + } + return false +} + +type DoubleProto struct { + Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DoubleProto) Reset() { *m = DoubleProto{} } +func (m *DoubleProto) String() string { return proto.CompactTextString(m) } +func (*DoubleProto) ProtoMessage() {} + +func (m *DoubleProto) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BytesProto struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BytesProto) Reset() { *m = BytesProto{} } +func (m *BytesProto) String() string { return proto.CompactTextString(m) } +func (*BytesProto) ProtoMessage() {} + +func (m *BytesProto) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type VoidProto struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *VoidProto) Reset() { *m = VoidProto{} } +func (m *VoidProto) String() string { return proto.CompactTextString(m) } +func (*VoidProto) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto new file mode 100644 index 0000000..56cd7a3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto @@ -0,0 +1,33 @@ +// Built-in base types for API calls. Primarily useful as return types. + +syntax = "proto2"; +option go_package = "base"; + +package appengine.base; + +message StringProto { + required string value = 1; +} + +message Integer32Proto { + required int32 value = 1; +} + +message Integer64Proto { + required int64 value = 1; +} + +message BoolProto { + required bool value = 1; +} + +message DoubleProto { + required double value = 1; +} + +message BytesProto { + required bytes value = 1 [ctype=CORD]; +} + +message VoidProto { +} diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go new file mode 100644 index 0000000..8705ec3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/blobstore/blobstore_service.proto +// DO NOT EDIT! + +/* +Package blobstore is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/blobstore/blobstore_service.proto + +It has these top-level messages: + BlobstoreServiceError + CreateUploadURLRequest + CreateUploadURLResponse + DeleteBlobRequest + FetchDataRequest + FetchDataResponse + CloneBlobRequest + CloneBlobResponse + DecodeBlobKeyRequest + DecodeBlobKeyResponse + CreateEncodedGoogleStorageKeyRequest + CreateEncodedGoogleStorageKeyResponse +*/ +package blobstore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type BlobstoreServiceError_ErrorCode int32 + +const ( + BlobstoreServiceError_OK BlobstoreServiceError_ErrorCode = 0 + BlobstoreServiceError_INTERNAL_ERROR BlobstoreServiceError_ErrorCode = 1 + BlobstoreServiceError_URL_TOO_LONG BlobstoreServiceError_ErrorCode = 2 + BlobstoreServiceError_PERMISSION_DENIED BlobstoreServiceError_ErrorCode = 3 + BlobstoreServiceError_BLOB_NOT_FOUND BlobstoreServiceError_ErrorCode = 4 + BlobstoreServiceError_DATA_INDEX_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 5 + BlobstoreServiceError_BLOB_FETCH_SIZE_TOO_LARGE BlobstoreServiceError_ErrorCode = 6 + BlobstoreServiceError_ARGUMENT_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 8 + BlobstoreServiceError_INVALID_BLOB_KEY BlobstoreServiceError_ErrorCode = 9 +) + +var BlobstoreServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INTERNAL_ERROR", + 2: "URL_TOO_LONG", + 3: "PERMISSION_DENIED", + 4: "BLOB_NOT_FOUND", + 5: "DATA_INDEX_OUT_OF_RANGE", + 6: "BLOB_FETCH_SIZE_TOO_LARGE", + 8: "ARGUMENT_OUT_OF_RANGE", + 9: "INVALID_BLOB_KEY", +} +var BlobstoreServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INTERNAL_ERROR": 1, + "URL_TOO_LONG": 2, + "PERMISSION_DENIED": 3, + "BLOB_NOT_FOUND": 4, + "DATA_INDEX_OUT_OF_RANGE": 5, + "BLOB_FETCH_SIZE_TOO_LARGE": 6, + "ARGUMENT_OUT_OF_RANGE": 8, + "INVALID_BLOB_KEY": 9, +} + +func (x BlobstoreServiceError_ErrorCode) Enum() *BlobstoreServiceError_ErrorCode { + p := new(BlobstoreServiceError_ErrorCode) + *p = x + return p +} +func (x BlobstoreServiceError_ErrorCode) String() string { + return proto.EnumName(BlobstoreServiceError_ErrorCode_name, int32(x)) +} +func (x *BlobstoreServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BlobstoreServiceError_ErrorCode_value, data, "BlobstoreServiceError_ErrorCode") + if err != nil { + return err + } + *x = BlobstoreServiceError_ErrorCode(value) + return nil +} + +type BlobstoreServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *BlobstoreServiceError) Reset() { *m = BlobstoreServiceError{} } +func (m *BlobstoreServiceError) String() string { return proto.CompactTextString(m) } +func (*BlobstoreServiceError) ProtoMessage() {} + +type CreateUploadURLRequest struct { + SuccessPath *string `protobuf:"bytes,1,req,name=success_path" json:"success_path,omitempty"` + MaxUploadSizeBytes *int64 `protobuf:"varint,2,opt,name=max_upload_size_bytes" json:"max_upload_size_bytes,omitempty"` + MaxUploadSizePerBlobBytes *int64 `protobuf:"varint,3,opt,name=max_upload_size_per_blob_bytes" json:"max_upload_size_per_blob_bytes,omitempty"` + GsBucketName *string `protobuf:"bytes,4,opt,name=gs_bucket_name" json:"gs_bucket_name,omitempty"` + UrlExpiryTimeSeconds *int32 `protobuf:"varint,5,opt,name=url_expiry_time_seconds" json:"url_expiry_time_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateUploadURLRequest) Reset() { *m = CreateUploadURLRequest{} } +func (m *CreateUploadURLRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUploadURLRequest) ProtoMessage() {} + +func (m *CreateUploadURLRequest) GetSuccessPath() string { + if m != nil && m.SuccessPath != nil { + return *m.SuccessPath + } + return "" +} + +func (m *CreateUploadURLRequest) GetMaxUploadSizeBytes() int64 { + if m != nil && m.MaxUploadSizeBytes != nil { + return *m.MaxUploadSizeBytes + } + return 0 +} + +func (m *CreateUploadURLRequest) GetMaxUploadSizePerBlobBytes() int64 { + if m != nil && m.MaxUploadSizePerBlobBytes != nil { + return *m.MaxUploadSizePerBlobBytes + } + return 0 +} + +func (m *CreateUploadURLRequest) GetGsBucketName() string { + if m != nil && m.GsBucketName != nil { + return *m.GsBucketName + } + return "" +} + +func (m *CreateUploadURLRequest) GetUrlExpiryTimeSeconds() int32 { + if m != nil && m.UrlExpiryTimeSeconds != nil { + return *m.UrlExpiryTimeSeconds + } + return 0 +} + +type CreateUploadURLResponse struct { + Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateUploadURLResponse) Reset() { *m = CreateUploadURLResponse{} } +func (m *CreateUploadURLResponse) String() string { return proto.CompactTextString(m) } +func (*CreateUploadURLResponse) ProtoMessage() {} + +func (m *CreateUploadURLResponse) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +type DeleteBlobRequest struct { + BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"` + Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteBlobRequest) Reset() { *m = DeleteBlobRequest{} } +func (m *DeleteBlobRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteBlobRequest) ProtoMessage() {} + +func (m *DeleteBlobRequest) GetBlobKey() []string { + if m != nil { + return m.BlobKey + } + return nil +} + +func (m *DeleteBlobRequest) GetToken() string { + if m != nil && m.Token != nil { + return *m.Token + } + return "" +} + +type FetchDataRequest struct { + BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + StartIndex *int64 `protobuf:"varint,2,req,name=start_index" json:"start_index,omitempty"` + EndIndex *int64 `protobuf:"varint,3,req,name=end_index" json:"end_index,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FetchDataRequest) Reset() { *m = FetchDataRequest{} } +func (m *FetchDataRequest) String() string { return proto.CompactTextString(m) } +func (*FetchDataRequest) ProtoMessage() {} + +func (m *FetchDataRequest) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +func (m *FetchDataRequest) GetStartIndex() int64 { + if m != nil && m.StartIndex != nil { + return *m.StartIndex + } + return 0 +} + +func (m *FetchDataRequest) GetEndIndex() int64 { + if m != nil && m.EndIndex != nil { + return *m.EndIndex + } + return 0 +} + +type FetchDataResponse struct { + Data []byte `protobuf:"bytes,1000,req,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FetchDataResponse) Reset() { *m = FetchDataResponse{} } +func (m *FetchDataResponse) String() string { return proto.CompactTextString(m) } +func (*FetchDataResponse) ProtoMessage() {} + +func (m *FetchDataResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type CloneBlobRequest struct { + BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + MimeType []byte `protobuf:"bytes,2,req,name=mime_type" json:"mime_type,omitempty"` + TargetAppId []byte `protobuf:"bytes,3,req,name=target_app_id" json:"target_app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloneBlobRequest) Reset() { *m = CloneBlobRequest{} } +func (m *CloneBlobRequest) String() string { return proto.CompactTextString(m) } +func (*CloneBlobRequest) ProtoMessage() {} + +func (m *CloneBlobRequest) GetBlobKey() []byte { + if m != nil { + return m.BlobKey + } + return nil +} + +func (m *CloneBlobRequest) GetMimeType() []byte { + if m != nil { + return m.MimeType + } + return nil +} + +func (m *CloneBlobRequest) GetTargetAppId() []byte { + if m != nil { + return m.TargetAppId + } + return nil +} + +type CloneBlobResponse struct { + BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloneBlobResponse) Reset() { *m = CloneBlobResponse{} } +func (m *CloneBlobResponse) String() string { return proto.CompactTextString(m) } +func (*CloneBlobResponse) ProtoMessage() {} + +func (m *CloneBlobResponse) GetBlobKey() []byte { + if m != nil { + return m.BlobKey + } + return nil +} + +type DecodeBlobKeyRequest struct { + BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DecodeBlobKeyRequest) Reset() { *m = DecodeBlobKeyRequest{} } +func (m *DecodeBlobKeyRequest) String() string { return proto.CompactTextString(m) } +func (*DecodeBlobKeyRequest) ProtoMessage() {} + +func (m *DecodeBlobKeyRequest) GetBlobKey() []string { + if m != nil { + return m.BlobKey + } + return nil +} + +type DecodeBlobKeyResponse struct { + Decoded []string `protobuf:"bytes,1,rep,name=decoded" json:"decoded,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DecodeBlobKeyResponse) Reset() { *m = DecodeBlobKeyResponse{} } +func (m *DecodeBlobKeyResponse) String() string { return proto.CompactTextString(m) } +func (*DecodeBlobKeyResponse) ProtoMessage() {} + +func (m *DecodeBlobKeyResponse) GetDecoded() []string { + if m != nil { + return m.Decoded + } + return nil +} + +type CreateEncodedGoogleStorageKeyRequest struct { + Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateEncodedGoogleStorageKeyRequest) Reset() { *m = CreateEncodedGoogleStorageKeyRequest{} } +func (m *CreateEncodedGoogleStorageKeyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateEncodedGoogleStorageKeyRequest) ProtoMessage() {} + +func (m *CreateEncodedGoogleStorageKeyRequest) GetFilename() string { + if m != nil && m.Filename != nil { + return *m.Filename + } + return "" +} + +type CreateEncodedGoogleStorageKeyResponse struct { + BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateEncodedGoogleStorageKeyResponse) Reset() { *m = CreateEncodedGoogleStorageKeyResponse{} } +func (m *CreateEncodedGoogleStorageKeyResponse) String() string { return proto.CompactTextString(m) } +func (*CreateEncodedGoogleStorageKeyResponse) ProtoMessage() {} + +func (m *CreateEncodedGoogleStorageKeyResponse) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto new file mode 100644 index 0000000..33b2650 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto @@ -0,0 +1,71 @@ +syntax = "proto2"; +option go_package = "blobstore"; + +package appengine; + +message BlobstoreServiceError { + enum ErrorCode { + OK = 0; + INTERNAL_ERROR = 1; + URL_TOO_LONG = 2; + PERMISSION_DENIED = 3; + BLOB_NOT_FOUND = 4; + DATA_INDEX_OUT_OF_RANGE = 5; + BLOB_FETCH_SIZE_TOO_LARGE = 6; + ARGUMENT_OUT_OF_RANGE = 8; + INVALID_BLOB_KEY = 9; + } +} + +message CreateUploadURLRequest { + required string success_path = 1; + optional int64 max_upload_size_bytes = 2; + optional int64 max_upload_size_per_blob_bytes = 3; + optional string gs_bucket_name = 4; + optional int32 url_expiry_time_seconds = 5; +} + +message CreateUploadURLResponse { + required string url = 1; +} + +message DeleteBlobRequest { + repeated string blob_key = 1; + optional string token = 2; +} + +message FetchDataRequest { + required string blob_key = 1; + required int64 start_index = 2; + required int64 end_index = 3; +} + +message FetchDataResponse { + required bytes data = 1000 [ctype = CORD]; +} + +message CloneBlobRequest { + required bytes blob_key = 1; + required bytes mime_type = 2; + required bytes target_app_id = 3; +} + +message CloneBlobResponse { + required bytes blob_key = 1; +} + +message DecodeBlobKeyRequest { + repeated string blob_key = 1; +} + +message DecodeBlobKeyResponse { + repeated string decoded = 1; +} + +message CreateEncodedGoogleStorageKeyRequest { + required string filename = 1; +} + +message CreateEncodedGoogleStorageKeyResponse { + required string blob_key = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go new file mode 100644 index 0000000..1736364 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/capability/capability_service.proto +// DO NOT EDIT! + +/* +Package capability is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/capability/capability_service.proto + +It has these top-level messages: + IsEnabledRequest + IsEnabledResponse +*/ +package capability + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type IsEnabledResponse_SummaryStatus int32 + +const ( + IsEnabledResponse_DEFAULT IsEnabledResponse_SummaryStatus = 0 + IsEnabledResponse_ENABLED IsEnabledResponse_SummaryStatus = 1 + IsEnabledResponse_SCHEDULED_FUTURE IsEnabledResponse_SummaryStatus = 2 + IsEnabledResponse_SCHEDULED_NOW IsEnabledResponse_SummaryStatus = 3 + IsEnabledResponse_DISABLED IsEnabledResponse_SummaryStatus = 4 + IsEnabledResponse_UNKNOWN IsEnabledResponse_SummaryStatus = 5 +) + +var IsEnabledResponse_SummaryStatus_name = map[int32]string{ + 0: "DEFAULT", + 1: "ENABLED", + 2: "SCHEDULED_FUTURE", + 3: "SCHEDULED_NOW", + 4: "DISABLED", + 5: "UNKNOWN", +} +var IsEnabledResponse_SummaryStatus_value = map[string]int32{ + "DEFAULT": 0, + "ENABLED": 1, + "SCHEDULED_FUTURE": 2, + "SCHEDULED_NOW": 3, + "DISABLED": 4, + "UNKNOWN": 5, +} + +func (x IsEnabledResponse_SummaryStatus) Enum() *IsEnabledResponse_SummaryStatus { + p := new(IsEnabledResponse_SummaryStatus) + *p = x + return p +} +func (x IsEnabledResponse_SummaryStatus) String() string { + return proto.EnumName(IsEnabledResponse_SummaryStatus_name, int32(x)) +} +func (x *IsEnabledResponse_SummaryStatus) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IsEnabledResponse_SummaryStatus_value, data, "IsEnabledResponse_SummaryStatus") + if err != nil { + return err + } + *x = IsEnabledResponse_SummaryStatus(value) + return nil +} + +type IsEnabledRequest struct { + Package *string `protobuf:"bytes,1,req,name=package" json:"package,omitempty"` + Capability []string `protobuf:"bytes,2,rep,name=capability" json:"capability,omitempty"` + Call []string `protobuf:"bytes,3,rep,name=call" json:"call,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsEnabledRequest) Reset() { *m = IsEnabledRequest{} } +func (m *IsEnabledRequest) String() string { return proto.CompactTextString(m) } +func (*IsEnabledRequest) ProtoMessage() {} + +func (m *IsEnabledRequest) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *IsEnabledRequest) GetCapability() []string { + if m != nil { + return m.Capability + } + return nil +} + +func (m *IsEnabledRequest) GetCall() []string { + if m != nil { + return m.Call + } + return nil +} + +type IsEnabledResponse struct { + SummaryStatus *IsEnabledResponse_SummaryStatus `protobuf:"varint,1,opt,name=summary_status,enum=appengine.IsEnabledResponse_SummaryStatus" json:"summary_status,omitempty"` + TimeUntilScheduled *int64 `protobuf:"varint,2,opt,name=time_until_scheduled" json:"time_until_scheduled,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsEnabledResponse) Reset() { *m = IsEnabledResponse{} } +func (m *IsEnabledResponse) String() string { return proto.CompactTextString(m) } +func (*IsEnabledResponse) ProtoMessage() {} + +func (m *IsEnabledResponse) GetSummaryStatus() IsEnabledResponse_SummaryStatus { + if m != nil && m.SummaryStatus != nil { + return *m.SummaryStatus + } + return IsEnabledResponse_DEFAULT +} + +func (m *IsEnabledResponse) GetTimeUntilScheduled() int64 { + if m != nil && m.TimeUntilScheduled != nil { + return *m.TimeUntilScheduled + } + return 0 +} diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.proto b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto new file mode 100644 index 0000000..5660ab6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto @@ -0,0 +1,28 @@ +syntax = "proto2"; +option go_package = "capability"; + +package appengine; + +message IsEnabledRequest { + required string package = 1; + repeated string capability = 2; + repeated string call = 3; +} + +message IsEnabledResponse { + enum SummaryStatus { + DEFAULT = 0; + ENABLED = 1; + SCHEDULED_FUTURE = 2; + SCHEDULED_NOW = 3; + DISABLED = 4; + UNKNOWN = 5; + } + optional SummaryStatus summary_status = 1; + + optional int64 time_until_scheduled = 2; +} + +service CapabilityService { + rpc IsEnabled(IsEnabledRequest) returns (IsEnabledResponse) {}; +} diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go new file mode 100644 index 0000000..7b8d00c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/channel/channel_service.proto +// DO NOT EDIT! + +/* +Package channel is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/channel/channel_service.proto + +It has these top-level messages: + ChannelServiceError + CreateChannelRequest + CreateChannelResponse + SendMessageRequest +*/ +package channel + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ChannelServiceError_ErrorCode int32 + +const ( + ChannelServiceError_OK ChannelServiceError_ErrorCode = 0 + ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1 + ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2 + ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3 + ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4 + ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5 +) + +var ChannelServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INTERNAL_ERROR", + 2: "INVALID_CHANNEL_KEY", + 3: "BAD_MESSAGE", + 4: "INVALID_CHANNEL_TOKEN_DURATION", + 5: "APPID_ALIAS_REQUIRED", +} +var ChannelServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INTERNAL_ERROR": 1, + "INVALID_CHANNEL_KEY": 2, + "BAD_MESSAGE": 3, + "INVALID_CHANNEL_TOKEN_DURATION": 4, + "APPID_ALIAS_REQUIRED": 5, +} + +func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode { + p := new(ChannelServiceError_ErrorCode) + *p = x + return p +} +func (x ChannelServiceError_ErrorCode) String() string { + return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x)) +} +func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode") + if err != nil { + return err + } + *x = ChannelServiceError_ErrorCode(value) + return nil +} + +type ChannelServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} } +func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) } +func (*ChannelServiceError) ProtoMessage() {} + +type CreateChannelRequest struct { + ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"` + DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} } +func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) } +func (*CreateChannelRequest) ProtoMessage() {} + +func (m *CreateChannelRequest) GetApplicationKey() string { + if m != nil && m.ApplicationKey != nil { + return *m.ApplicationKey + } + return "" +} + +func (m *CreateChannelRequest) GetDurationMinutes() int32 { + if m != nil && m.DurationMinutes != nil { + return *m.DurationMinutes + } + return 0 +} + +type CreateChannelResponse struct { + Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` + DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} } +func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) } +func (*CreateChannelResponse) ProtoMessage() {} + +func (m *CreateChannelResponse) GetToken() string { + if m != nil && m.Token != nil { + return *m.Token + } + return "" +} + +func (m *CreateChannelResponse) GetDurationMinutes() int32 { + if m != nil && m.DurationMinutes != nil { + return *m.DurationMinutes + } + return 0 +} + +type SendMessageRequest struct { + ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"` + Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} } +func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) } +func (*SendMessageRequest) ProtoMessage() {} + +func (m *SendMessageRequest) GetApplicationKey() string { + if m != nil && m.ApplicationKey != nil { + return *m.ApplicationKey + } + return "" +} + +func (m *SendMessageRequest) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.proto b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto new file mode 100644 index 0000000..2b5a918 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto @@ -0,0 +1,30 @@ +syntax = "proto2"; +option go_package = "channel"; + +package appengine; + +message ChannelServiceError { + enum ErrorCode { + OK = 0; + INTERNAL_ERROR = 1; + INVALID_CHANNEL_KEY = 2; + BAD_MESSAGE = 3; + INVALID_CHANNEL_TOKEN_DURATION = 4; + APPID_ALIAS_REQUIRED = 5; + } +} + +message CreateChannelRequest { + required string application_key = 1; + optional int32 duration_minutes = 2; +} + +message CreateChannelResponse { + optional string token = 2; + optional int32 duration_minutes = 3; +} + +message SendMessageRequest { + required string application_key = 1; + required string message = 2; +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go new file mode 100644 index 0000000..8613cb7 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go @@ -0,0 +1,2778 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto +// DO NOT EDIT! + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/datastore/datastore_v3.proto + +It has these top-level messages: + Action + PropertyValue + Property + Path + Reference + User + EntityProto + CompositeProperty + Index + CompositeIndex + IndexPostfix + IndexPosition + Snapshot + InternalHeader + Transaction + Query + CompiledQuery + CompiledCursor + Cursor + Error + Cost + GetRequest + GetResponse + PutRequest + PutResponse + TouchRequest + TouchResponse + DeleteRequest + DeleteResponse + NextRequest + QueryResult + AllocateIdsRequest + AllocateIdsResponse + CompositeIndices + AddActionsRequest + AddActionsResponse + BeginTransactionRequest + CommitResponse +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Property_Meaning int32 + +const ( + Property_NO_MEANING Property_Meaning = 0 + Property_BLOB Property_Meaning = 14 + Property_TEXT Property_Meaning = 15 + Property_BYTESTRING Property_Meaning = 16 + Property_ATOM_CATEGORY Property_Meaning = 1 + Property_ATOM_LINK Property_Meaning = 2 + Property_ATOM_TITLE Property_Meaning = 3 + Property_ATOM_CONTENT Property_Meaning = 4 + Property_ATOM_SUMMARY Property_Meaning = 5 + Property_ATOM_AUTHOR Property_Meaning = 6 + Property_GD_WHEN Property_Meaning = 7 + Property_GD_EMAIL Property_Meaning = 8 + Property_GEORSS_POINT Property_Meaning = 9 + Property_GD_IM Property_Meaning = 10 + Property_GD_PHONENUMBER Property_Meaning = 11 + Property_GD_POSTALADDRESS Property_Meaning = 12 + Property_GD_RATING Property_Meaning = 13 + Property_BLOBKEY Property_Meaning = 17 + Property_ENTITY_PROTO Property_Meaning = 19 + Property_INDEX_VALUE Property_Meaning = 18 +) + +var Property_Meaning_name = map[int32]string{ + 0: "NO_MEANING", + 14: "BLOB", + 15: "TEXT", + 16: "BYTESTRING", + 1: "ATOM_CATEGORY", + 2: "ATOM_LINK", + 3: "ATOM_TITLE", + 4: "ATOM_CONTENT", + 5: "ATOM_SUMMARY", + 6: "ATOM_AUTHOR", + 7: "GD_WHEN", + 8: "GD_EMAIL", + 9: "GEORSS_POINT", + 10: "GD_IM", + 11: "GD_PHONENUMBER", + 12: "GD_POSTALADDRESS", + 13: "GD_RATING", + 17: "BLOBKEY", + 19: "ENTITY_PROTO", + 18: "INDEX_VALUE", +} +var Property_Meaning_value = map[string]int32{ + "NO_MEANING": 0, + "BLOB": 14, + "TEXT": 15, + "BYTESTRING": 16, + "ATOM_CATEGORY": 1, + "ATOM_LINK": 2, + "ATOM_TITLE": 3, + "ATOM_CONTENT": 4, + "ATOM_SUMMARY": 5, + "ATOM_AUTHOR": 6, + "GD_WHEN": 7, + "GD_EMAIL": 8, + "GEORSS_POINT": 9, + "GD_IM": 10, + "GD_PHONENUMBER": 11, + "GD_POSTALADDRESS": 12, + "GD_RATING": 13, + "BLOBKEY": 17, + "ENTITY_PROTO": 19, + "INDEX_VALUE": 18, +} + +func (x Property_Meaning) Enum() *Property_Meaning { + p := new(Property_Meaning) + *p = x + return p +} +func (x Property_Meaning) String() string { + return proto.EnumName(Property_Meaning_name, int32(x)) +} +func (x *Property_Meaning) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") + if err != nil { + return err + } + *x = Property_Meaning(value) + return nil +} + +type Property_FtsTokenizationOption int32 + +const ( + Property_HTML Property_FtsTokenizationOption = 1 + Property_ATOM Property_FtsTokenizationOption = 2 +) + +var Property_FtsTokenizationOption_name = map[int32]string{ + 1: "HTML", + 2: "ATOM", +} +var Property_FtsTokenizationOption_value = map[string]int32{ + "HTML": 1, + "ATOM": 2, +} + +func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { + p := new(Property_FtsTokenizationOption) + *p = x + return p +} +func (x Property_FtsTokenizationOption) String() string { + return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) +} +func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") + if err != nil { + return err + } + *x = Property_FtsTokenizationOption(value) + return nil +} + +type EntityProto_Kind int32 + +const ( + EntityProto_GD_CONTACT EntityProto_Kind = 1 + EntityProto_GD_EVENT EntityProto_Kind = 2 + EntityProto_GD_MESSAGE EntityProto_Kind = 3 +) + +var EntityProto_Kind_name = map[int32]string{ + 1: "GD_CONTACT", + 2: "GD_EVENT", + 3: "GD_MESSAGE", +} +var EntityProto_Kind_value = map[string]int32{ + "GD_CONTACT": 1, + "GD_EVENT": 2, + "GD_MESSAGE": 3, +} + +func (x EntityProto_Kind) Enum() *EntityProto_Kind { + p := new(EntityProto_Kind) + *p = x + return p +} +func (x EntityProto_Kind) String() string { + return proto.EnumName(EntityProto_Kind_name, int32(x)) +} +func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") + if err != nil { + return err + } + *x = EntityProto_Kind(value) + return nil +} + +type Index_Property_Direction int32 + +const ( + Index_Property_ASCENDING Index_Property_Direction = 1 + Index_Property_DESCENDING Index_Property_Direction = 2 +) + +var Index_Property_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Index_Property_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Index_Property_Direction) Enum() *Index_Property_Direction { + p := new(Index_Property_Direction) + *p = x + return p +} +func (x Index_Property_Direction) String() string { + return proto.EnumName(Index_Property_Direction_name, int32(x)) +} +func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") + if err != nil { + return err + } + *x = Index_Property_Direction(value) + return nil +} + +type CompositeIndex_State int32 + +const ( + CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 + CompositeIndex_READ_WRITE CompositeIndex_State = 2 + CompositeIndex_DELETED CompositeIndex_State = 3 + CompositeIndex_ERROR CompositeIndex_State = 4 +) + +var CompositeIndex_State_name = map[int32]string{ + 1: "WRITE_ONLY", + 2: "READ_WRITE", + 3: "DELETED", + 4: "ERROR", +} +var CompositeIndex_State_value = map[string]int32{ + "WRITE_ONLY": 1, + "READ_WRITE": 2, + "DELETED": 3, + "ERROR": 4, +} + +func (x CompositeIndex_State) Enum() *CompositeIndex_State { + p := new(CompositeIndex_State) + *p = x + return p +} +func (x CompositeIndex_State) String() string { + return proto.EnumName(CompositeIndex_State_name, int32(x)) +} +func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") + if err != nil { + return err + } + *x = CompositeIndex_State(value) + return nil +} + +type Snapshot_Status int32 + +const ( + Snapshot_INACTIVE Snapshot_Status = 0 + Snapshot_ACTIVE Snapshot_Status = 1 +) + +var Snapshot_Status_name = map[int32]string{ + 0: "INACTIVE", + 1: "ACTIVE", +} +var Snapshot_Status_value = map[string]int32{ + "INACTIVE": 0, + "ACTIVE": 1, +} + +func (x Snapshot_Status) Enum() *Snapshot_Status { + p := new(Snapshot_Status) + *p = x + return p +} +func (x Snapshot_Status) String() string { + return proto.EnumName(Snapshot_Status_name, int32(x)) +} +func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") + if err != nil { + return err + } + *x = Snapshot_Status(value) + return nil +} + +type Query_Hint int32 + +const ( + Query_ORDER_FIRST Query_Hint = 1 + Query_ANCESTOR_FIRST Query_Hint = 2 + Query_FILTER_FIRST Query_Hint = 3 +) + +var Query_Hint_name = map[int32]string{ + 1: "ORDER_FIRST", + 2: "ANCESTOR_FIRST", + 3: "FILTER_FIRST", +} +var Query_Hint_value = map[string]int32{ + "ORDER_FIRST": 1, + "ANCESTOR_FIRST": 2, + "FILTER_FIRST": 3, +} + +func (x Query_Hint) Enum() *Query_Hint { + p := new(Query_Hint) + *p = x + return p +} +func (x Query_Hint) String() string { + return proto.EnumName(Query_Hint_name, int32(x)) +} +func (x *Query_Hint) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") + if err != nil { + return err + } + *x = Query_Hint(value) + return nil +} + +type Query_Filter_Operator int32 + +const ( + Query_Filter_LESS_THAN Query_Filter_Operator = 1 + Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 + Query_Filter_GREATER_THAN Query_Filter_Operator = 3 + Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 + Query_Filter_EQUAL Query_Filter_Operator = 5 + Query_Filter_IN Query_Filter_Operator = 6 + Query_Filter_EXISTS Query_Filter_Operator = 7 +) + +var Query_Filter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 6: "IN", + 7: "EXISTS", +} +var Query_Filter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "IN": 6, + "EXISTS": 7, +} + +func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { + p := new(Query_Filter_Operator) + *p = x + return p +} +func (x Query_Filter_Operator) String() string { + return proto.EnumName(Query_Filter_Operator_name, int32(x)) +} +func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") + if err != nil { + return err + } + *x = Query_Filter_Operator(value) + return nil +} + +type Query_Order_Direction int32 + +const ( + Query_Order_ASCENDING Query_Order_Direction = 1 + Query_Order_DESCENDING Query_Order_Direction = 2 +) + +var Query_Order_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Query_Order_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Query_Order_Direction) Enum() *Query_Order_Direction { + p := new(Query_Order_Direction) + *p = x + return p +} +func (x Query_Order_Direction) String() string { + return proto.EnumName(Query_Order_Direction_name, int32(x)) +} +func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") + if err != nil { + return err + } + *x = Query_Order_Direction(value) + return nil +} + +type Error_ErrorCode int32 + +const ( + Error_BAD_REQUEST Error_ErrorCode = 1 + Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 + Error_INTERNAL_ERROR Error_ErrorCode = 3 + Error_NEED_INDEX Error_ErrorCode = 4 + Error_TIMEOUT Error_ErrorCode = 5 + Error_PERMISSION_DENIED Error_ErrorCode = 6 + Error_BIGTABLE_ERROR Error_ErrorCode = 7 + Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 + Error_CAPABILITY_DISABLED Error_ErrorCode = 9 + Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 + Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 +) + +var Error_ErrorCode_name = map[int32]string{ + 1: "BAD_REQUEST", + 2: "CONCURRENT_TRANSACTION", + 3: "INTERNAL_ERROR", + 4: "NEED_INDEX", + 5: "TIMEOUT", + 6: "PERMISSION_DENIED", + 7: "BIGTABLE_ERROR", + 8: "COMMITTED_BUT_STILL_APPLYING", + 9: "CAPABILITY_DISABLED", + 10: "TRY_ALTERNATE_BACKEND", + 11: "SAFE_TIME_TOO_OLD", +} +var Error_ErrorCode_value = map[string]int32{ + "BAD_REQUEST": 1, + "CONCURRENT_TRANSACTION": 2, + "INTERNAL_ERROR": 3, + "NEED_INDEX": 4, + "TIMEOUT": 5, + "PERMISSION_DENIED": 6, + "BIGTABLE_ERROR": 7, + "COMMITTED_BUT_STILL_APPLYING": 8, + "CAPABILITY_DISABLED": 9, + "TRY_ALTERNATE_BACKEND": 10, + "SAFE_TIME_TOO_OLD": 11, +} + +func (x Error_ErrorCode) Enum() *Error_ErrorCode { + p := new(Error_ErrorCode) + *p = x + return p +} +func (x Error_ErrorCode) String() string { + return proto.EnumName(Error_ErrorCode_name, int32(x)) +} +func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") + if err != nil { + return err + } + *x = Error_ErrorCode(value) + return nil +} + +type PutRequest_AutoIdPolicy int32 + +const ( + PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 + PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 +) + +var PutRequest_AutoIdPolicy_name = map[int32]string{ + 0: "CURRENT", + 1: "SEQUENTIAL", +} +var PutRequest_AutoIdPolicy_value = map[string]int32{ + "CURRENT": 0, + "SEQUENTIAL": 1, +} + +func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { + p := new(PutRequest_AutoIdPolicy) + *p = x + return p +} +func (x PutRequest_AutoIdPolicy) String() string { + return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) +} +func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") + if err != nil { + return err + } + *x = PutRequest_AutoIdPolicy(value) + return nil +} + +type Action struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} + +type PropertyValue struct { + Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` + BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` + Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` + Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` + Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue) Reset() { *m = PropertyValue{} } +func (m *PropertyValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue) ProtoMessage() {} + +func (m *PropertyValue) GetInt64Value() int64 { + if m != nil && m.Int64Value != nil { + return *m.Int64Value + } + return 0 +} + +func (m *PropertyValue) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *PropertyValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *PropertyValue) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { + if m != nil { + return m.Pointvalue + } + return nil +} + +func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { + if m != nil { + return m.Uservalue + } + return nil +} + +func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { + if m != nil { + return m.Referencevalue + } + return nil +} + +type PropertyValue_PointValue struct { + X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` + Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } +func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_PointValue) ProtoMessage() {} + +func (m *PropertyValue_PointValue) GetX() float64 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *PropertyValue_PointValue) GetY() float64 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type PropertyValue_UserValue struct { + Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } +func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_UserValue) ProtoMessage() {} + +func (m *PropertyValue_UserValue) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *PropertyValue_UserValue) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *PropertyValue_UserValue) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type PropertyValue_ReferenceValue struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } +func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { + if m != nil { + return m.Pathelement + } + return nil +} + +type PropertyValue_ReferenceValue_PathElement struct { + Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue_PathElement) Reset() { + *m = PropertyValue_ReferenceValue_PathElement{} +} +func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Property struct { + Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` + MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` + Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` + Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` + FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` + Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} + +const Default_Property_Meaning Property_Meaning = Property_NO_MEANING +const Default_Property_Searchable bool = false +const Default_Property_Locale string = "en" + +func (m *Property) GetMeaning() Property_Meaning { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return Default_Property_Meaning +} + +func (m *Property) GetMeaningUri() string { + if m != nil && m.MeaningUri != nil { + return *m.MeaningUri + } + return "" +} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +func (m *Property) GetMultiple() bool { + if m != nil && m.Multiple != nil { + return *m.Multiple + } + return false +} + +func (m *Property) GetSearchable() bool { + if m != nil && m.Searchable != nil { + return *m.Searchable + } + return Default_Property_Searchable +} + +func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { + if m != nil && m.FtsTokenizationOption != nil { + return *m.FtsTokenizationOption + } + return Property_HTML +} + +func (m *Property) GetLocale() string { + if m != nil && m.Locale != nil { + return *m.Locale + } + return Default_Property_Locale +} + +type Path struct { + Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} + +func (m *Path) GetElement() []*Path_Element { + if m != nil { + return m.Element + } + return nil +} + +type Path_Element struct { + Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path_Element) Reset() { *m = Path_Element{} } +func (m *Path_Element) String() string { return proto.CompactTextString(m) } +func (*Path_Element) ProtoMessage() {} + +func (m *Path_Element) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *Path_Element) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Path_Element) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Reference struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} + +func (m *Reference) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Reference) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Reference) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +type User struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} + +func (m *User) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *User) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *User) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *User) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *User) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type EntityProto struct { + Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` + EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` + Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` + Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` + KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` + Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EntityProto) Reset() { *m = EntityProto{} } +func (m *EntityProto) String() string { return proto.CompactTextString(m) } +func (*EntityProto) ProtoMessage() {} + +func (m *EntityProto) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *EntityProto) GetEntityGroup() *Path { + if m != nil { + return m.EntityGroup + } + return nil +} + +func (m *EntityProto) GetOwner() *User { + if m != nil { + return m.Owner + } + return nil +} + +func (m *EntityProto) GetKind() EntityProto_Kind { + if m != nil && m.Kind != nil { + return *m.Kind + } + return EntityProto_GD_CONTACT +} + +func (m *EntityProto) GetKindUri() string { + if m != nil && m.KindUri != nil { + return *m.KindUri + } + return "" +} + +func (m *EntityProto) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +func (m *EntityProto) GetRawProperty() []*Property { + if m != nil { + return m.RawProperty + } + return nil +} + +func (m *EntityProto) GetRank() int32 { + if m != nil && m.Rank != nil { + return *m.Rank + } + return 0 +} + +type CompositeProperty struct { + IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` + Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } +func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } +func (*CompositeProperty) ProtoMessage() {} + +func (m *CompositeProperty) GetIndexId() int64 { + if m != nil && m.IndexId != nil { + return *m.IndexId + } + return 0 +} + +func (m *CompositeProperty) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Index struct { + EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` + Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` + Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index) Reset() { *m = Index{} } +func (m *Index) String() string { return proto.CompactTextString(m) } +func (*Index) ProtoMessage() {} + +func (m *Index) GetEntityType() string { + if m != nil && m.EntityType != nil { + return *m.EntityType + } + return "" +} + +func (m *Index) GetAncestor() bool { + if m != nil && m.Ancestor != nil { + return *m.Ancestor + } + return false +} + +func (m *Index) GetProperty() []*Index_Property { + if m != nil { + return m.Property + } + return nil +} + +type Index_Property struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index_Property) Reset() { *m = Index_Property{} } +func (m *Index_Property) String() string { return proto.CompactTextString(m) } +func (*Index_Property) ProtoMessage() {} + +const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING + +func (m *Index_Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Index_Property) GetDirection() Index_Property_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Index_Property_Direction +} + +type CompositeIndex struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` + State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` + OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } +func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } +func (*CompositeIndex) ProtoMessage() {} + +const Default_CompositeIndex_OnlyUseIfRequired bool = false + +func (m *CompositeIndex) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CompositeIndex) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *CompositeIndex) GetDefinition() *Index { + if m != nil { + return m.Definition + } + return nil +} + +func (m *CompositeIndex) GetState() CompositeIndex_State { + if m != nil && m.State != nil { + return *m.State + } + return CompositeIndex_WRITE_ONLY +} + +func (m *CompositeIndex) GetOnlyUseIfRequired() bool { + if m != nil && m.OnlyUseIfRequired != nil { + return *m.OnlyUseIfRequired + } + return Default_CompositeIndex_OnlyUseIfRequired +} + +type IndexPostfix struct { + IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` + Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } +func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix) ProtoMessage() {} + +const Default_IndexPostfix_Before bool = true + +func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { + if m != nil { + return m.IndexValue + } + return nil +} + +func (m *IndexPostfix) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *IndexPostfix) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPostfix_Before +} + +type IndexPostfix_IndexValue struct { + PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` + Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } +func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix_IndexValue) ProtoMessage() {} + +func (m *IndexPostfix_IndexValue) GetPropertyName() string { + if m != nil && m.PropertyName != nil { + return *m.PropertyName + } + return "" +} + +func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type IndexPosition struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPosition) Reset() { *m = IndexPosition{} } +func (m *IndexPosition) String() string { return proto.CompactTextString(m) } +func (*IndexPosition) ProtoMessage() {} + +const Default_IndexPosition_Before bool = true + +func (m *IndexPosition) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *IndexPosition) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPosition_Before +} + +type Snapshot struct { + Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} + +func (m *Snapshot) GetTs() int64 { + if m != nil && m.Ts != nil { + return *m.Ts + } + return 0 +} + +type InternalHeader struct { + Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InternalHeader) Reset() { *m = InternalHeader{} } +func (m *InternalHeader) String() string { return proto.CompactTextString(m) } +func (*InternalHeader) ProtoMessage() {} + +func (m *InternalHeader) GetQos() string { + if m != nil && m.Qos != nil { + return *m.Qos + } + return "" +} + +type Transaction struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` + App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` + MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} + +const Default_Transaction_MarkChanges bool = false + +func (m *Transaction) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Transaction) GetHandle() uint64 { + if m != nil && m.Handle != nil { + return *m.Handle + } + return 0 +} + +func (m *Transaction) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Transaction) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_Transaction_MarkChanges +} + +type Query struct { + Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` + Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` + Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` + SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` + Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` + Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` + Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` + RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` + KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` + Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` + Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` + FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` + PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` + GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` + Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` + MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` + SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` + PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_Offset int32 = 0 +const Default_Query_RequirePerfectPlan bool = false +const Default_Query_KeysOnly bool = false +const Default_Query_Compile bool = false +const Default_Query_PersistOffset bool = false + +func (m *Query) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Query) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Query) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Query) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Query) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +func (m *Query) GetFilter() []*Query_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetSearchQuery() string { + if m != nil && m.SearchQuery != nil { + return *m.SearchQuery + } + return "" +} + +func (m *Query) GetOrder() []*Query_Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetHint() Query_Hint { + if m != nil && m.Hint != nil { + return *m.Hint + } + return Query_ORDER_FIRST +} + +func (m *Query) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *Query) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *Query) GetEndCompiledCursor() *CompiledCursor { + if m != nil { + return m.EndCompiledCursor + } + return nil +} + +func (m *Query) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *Query) GetRequirePerfectPlan() bool { + if m != nil && m.RequirePerfectPlan != nil { + return *m.RequirePerfectPlan + } + return Default_Query_RequirePerfectPlan +} + +func (m *Query) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return Default_Query_KeysOnly +} + +func (m *Query) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *Query) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_Query_Compile +} + +func (m *Query) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *Query) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *Query) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *Query) GetGroupByPropertyName() []string { + if m != nil { + return m.GroupByPropertyName + } + return nil +} + +func (m *Query) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return false +} + +func (m *Query) GetMinSafeTimeSeconds() int64 { + if m != nil && m.MinSafeTimeSeconds != nil { + return *m.MinSafeTimeSeconds + } + return 0 +} + +func (m *Query) GetSafeReplicaName() []string { + if m != nil { + return m.SafeReplicaName + } + return nil +} + +func (m *Query) GetPersistOffset() bool { + if m != nil && m.PersistOffset != nil { + return *m.PersistOffset + } + return Default_Query_PersistOffset +} + +type Query_Filter struct { + Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Filter) Reset() { *m = Query_Filter{} } +func (m *Query_Filter) String() string { return proto.CompactTextString(m) } +func (*Query_Filter) ProtoMessage() {} + +func (m *Query_Filter) GetOp() Query_Filter_Operator { + if m != nil && m.Op != nil { + return *m.Op + } + return Query_Filter_LESS_THAN +} + +func (m *Query_Filter) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +type Query_Order struct { + Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` + Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Order) Reset() { *m = Query_Order{} } +func (m *Query_Order) String() string { return proto.CompactTextString(m) } +func (*Query_Order) ProtoMessage() {} + +const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING + +func (m *Query_Order) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *Query_Order) GetDirection() Query_Order_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Query_Order_Direction +} + +type CompiledQuery struct { + Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` + Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` + IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` + PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` + DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` + Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } +func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery) ProtoMessage() {} + +const Default_CompiledQuery_Offset int32 = 0 + +func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { + if m != nil { + return m.Primaryscan + } + return nil +} + +func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { + if m != nil { + return m.Mergejoinscan + } + return nil +} + +func (m *CompiledQuery) GetIndexDef() *Index { + if m != nil { + return m.IndexDef + } + return nil +} + +func (m *CompiledQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_CompiledQuery_Offset +} + +func (m *CompiledQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *CompiledQuery) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *CompiledQuery) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *CompiledQuery) GetDistinctInfixSize() int32 { + if m != nil && m.DistinctInfixSize != nil { + return *m.DistinctInfixSize + } + return 0 +} + +func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { + if m != nil { + return m.Entityfilter + } + return nil +} + +type CompiledQuery_PrimaryScan struct { + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` + StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` + EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` + EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` + StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` + EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` + EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } +func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_PrimaryScan) ProtoMessage() {} + +func (m *CompiledQuery_PrimaryScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetEndKey() string { + if m != nil && m.EndKey != nil { + return *m.EndKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { + if m != nil && m.EndInclusive != nil { + return *m.EndInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { + if m != nil { + return m.StartPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { + if m != nil { + return m.EndPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { + if m != nil && m.EndUnappliedLogTimestampUs != nil { + return *m.EndUnappliedLogTimestampUs + } + return 0 +} + +type CompiledQuery_MergeJoinScan struct { + IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` + PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` + ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } +func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} + +const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false + +func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { + if m != nil { + return m.PrefixValue + } + return nil +} + +func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { + if m != nil && m.ValuePrefix != nil { + return *m.ValuePrefix + } + return Default_CompiledQuery_MergeJoinScan_ValuePrefix +} + +type CompiledQuery_EntityFilter struct { + Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` + Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } +func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_EntityFilter) ProtoMessage() {} + +const Default_CompiledQuery_EntityFilter_Distinct bool = false + +func (m *CompiledQuery_EntityFilter) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return Default_CompiledQuery_EntityFilter_Distinct +} + +func (m *CompiledQuery_EntityFilter) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +type CompiledCursor struct { + Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } +func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor) ProtoMessage() {} + +func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { + if m != nil { + return m.Position + } + return nil +} + +type CompiledCursor_Position struct { + StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` + Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` + Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` + StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } +func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position) ProtoMessage() {} + +const Default_CompiledCursor_Position_StartInclusive bool = true + +func (m *CompiledCursor_Position) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { + if m != nil { + return m.Indexvalue + } + return nil +} + +func (m *CompiledCursor_Position) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompiledCursor_Position) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return Default_CompiledCursor_Position_StartInclusive +} + +type CompiledCursor_Position_IndexValue struct { + Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` + Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } +func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} + +func (m *CompiledCursor_Position_IndexValue) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type Cursor struct { + Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` + App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} + +func (m *Cursor) GetCursor() uint64 { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return 0 +} + +func (m *Cursor) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +type Error struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} + +type Cost struct { + IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` + IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` + EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` + EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` + Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` + ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` + IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost) Reset() { *m = Cost{} } +func (m *Cost) String() string { return proto.CompactTextString(m) } +func (*Cost) ProtoMessage() {} + +func (m *Cost) GetIndexWrites() int32 { + if m != nil && m.IndexWrites != nil { + return *m.IndexWrites + } + return 0 +} + +func (m *Cost) GetIndexWriteBytes() int32 { + if m != nil && m.IndexWriteBytes != nil { + return *m.IndexWriteBytes + } + return 0 +} + +func (m *Cost) GetEntityWrites() int32 { + if m != nil && m.EntityWrites != nil { + return *m.EntityWrites + } + return 0 +} + +func (m *Cost) GetEntityWriteBytes() int32 { + if m != nil && m.EntityWriteBytes != nil { + return *m.EntityWriteBytes + } + return 0 +} + +func (m *Cost) GetCommitcost() *Cost_CommitCost { + if m != nil { + return m.Commitcost + } + return nil +} + +func (m *Cost) GetApproximateStorageDelta() int32 { + if m != nil && m.ApproximateStorageDelta != nil { + return *m.ApproximateStorageDelta + } + return 0 +} + +func (m *Cost) GetIdSequenceUpdates() int32 { + if m != nil && m.IdSequenceUpdates != nil { + return *m.IdSequenceUpdates + } + return 0 +} + +type Cost_CommitCost struct { + RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` + RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } +func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } +func (*Cost_CommitCost) ProtoMessage() {} + +func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { + if m != nil && m.RequestedEntityPuts != nil { + return *m.RequestedEntityPuts + } + return 0 +} + +func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { + if m != nil && m.RequestedEntityDeletes != nil { + return *m.RequestedEntityDeletes + } + return 0 +} + +type GetRequest struct { + Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` + AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} + +const Default_GetRequest_AllowDeferred bool = false + +func (m *GetRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *GetRequest) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *GetRequest) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *GetRequest) GetAllowDeferred() bool { + if m != nil && m.AllowDeferred != nil { + return *m.AllowDeferred + } + return Default_GetRequest_AllowDeferred +} + +type GetResponse struct { + Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` + Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` + InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} + +const Default_GetResponse_InOrder bool = true + +func (m *GetResponse) GetEntity() []*GetResponse_Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse) GetDeferred() []*Reference { + if m != nil { + return m.Deferred + } + return nil +} + +func (m *GetResponse) GetInOrder() bool { + if m != nil && m.InOrder != nil { + return *m.InOrder + } + return Default_GetResponse_InOrder +} + +type GetResponse_Entity struct { + Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` + Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` + Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } +func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } +func (*GetResponse_Entity) ProtoMessage() {} + +func (m *GetResponse_Entity) GetEntity() *EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse_Entity) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetResponse_Entity) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +type PutRequest struct { + Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` + Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} + +const Default_PutRequest_Trusted bool = false +const Default_PutRequest_Force bool = false +const Default_PutRequest_MarkChanges bool = false +const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT + +func (m *PutRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutRequest) GetEntity() []*EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *PutRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *PutRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_PutRequest_Trusted +} + +func (m *PutRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_PutRequest_Force +} + +func (m *PutRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_PutRequest_MarkChanges +} + +func (m *PutRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { + if m != nil && m.AutoIdPolicy != nil { + return *m.AutoIdPolicy + } + return Default_PutRequest_AutoIdPolicy +} + +type PutResponse struct { + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} + +func (m *PutResponse) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *PutResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type TouchRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` + Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchRequest) Reset() { *m = TouchRequest{} } +func (m *TouchRequest) String() string { return proto.CompactTextString(m) } +func (*TouchRequest) ProtoMessage() {} + +const Default_TouchRequest_Force bool = false + +func (m *TouchRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TouchRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *TouchRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_TouchRequest_Force +} + +func (m *TouchRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type TouchResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchResponse) Reset() { *m = TouchResponse{} } +func (m *TouchResponse) String() string { return proto.CompactTextString(m) } +func (*TouchResponse) ProtoMessage() {} + +func (m *TouchResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type DeleteRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} + +const Default_DeleteRequest_Trusted bool = false +const Default_DeleteRequest_Force bool = false +const Default_DeleteRequest_MarkChanges bool = false + +func (m *DeleteRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *DeleteRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_DeleteRequest_Trusted +} + +func (m *DeleteRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_DeleteRequest_Force +} + +func (m *DeleteRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_DeleteRequest_MarkChanges +} + +func (m *DeleteRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type DeleteResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} + +func (m *DeleteResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *DeleteResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type NextRequest struct { + Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` + Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` + Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` + Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NextRequest) Reset() { *m = NextRequest{} } +func (m *NextRequest) String() string { return proto.CompactTextString(m) } +func (*NextRequest) ProtoMessage() {} + +const Default_NextRequest_Offset int32 = 0 +const Default_NextRequest_Compile bool = false + +func (m *NextRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *NextRequest) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *NextRequest) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *NextRequest) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_NextRequest_Offset +} + +func (m *NextRequest) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_NextRequest_Compile +} + +type QueryResult struct { + Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` + Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` + SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` + MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` + KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` + IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` + SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` + CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` + Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} + +func (m *QueryResult) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *QueryResult) GetResult() []*EntityProto { + if m != nil { + return m.Result + } + return nil +} + +func (m *QueryResult) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +func (m *QueryResult) GetMoreResults() bool { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return false +} + +func (m *QueryResult) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *QueryResult) GetIndexOnly() bool { + if m != nil && m.IndexOnly != nil { + return *m.IndexOnly + } + return false +} + +func (m *QueryResult) GetSmallOps() bool { + if m != nil && m.SmallOps != nil { + return *m.SmallOps + } + return false +} + +func (m *QueryResult) GetCompiledQuery() *CompiledQuery { + if m != nil { + return m.CompiledQuery + } + return nil +} + +func (m *QueryResult) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *QueryResult) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +func (m *QueryResult) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type AllocateIdsRequest struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` + Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` + Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} + +func (m *AllocateIdsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AllocateIdsRequest) GetModelKey() *Reference { + if m != nil { + return m.ModelKey + } + return nil +} + +func (m *AllocateIdsRequest) GetSize() int64 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *AllocateIdsRequest) GetMax() int64 { + if m != nil && m.Max != nil { + return *m.Max + } + return 0 +} + +func (m *AllocateIdsRequest) GetReserve() []*Reference { + if m != nil { + return m.Reserve + } + return nil +} + +type AllocateIdsResponse struct { + Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` + End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` + Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} + +func (m *AllocateIdsResponse) GetStart() int64 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *AllocateIdsResponse) GetEnd() int64 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *AllocateIdsResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type CompositeIndices struct { + Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } +func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } +func (*CompositeIndices) ProtoMessage() {} + +func (m *CompositeIndices) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +type AddActionsRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } +func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } +func (*AddActionsRequest) ProtoMessage() {} + +func (m *AddActionsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AddActionsRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *AddActionsRequest) GetAction() []*Action { + if m != nil { + return m.Action + } + return nil +} + +type AddActionsResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } +func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } +func (*AddActionsResponse) ProtoMessage() {} + +type BeginTransactionRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} + +const Default_BeginTransactionRequest_AllowMultipleEg bool = false + +func (m *BeginTransactionRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BeginTransactionRequest) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { + if m != nil && m.AllowMultipleEg != nil { + return *m.AllowMultipleEg + } + return Default_BeginTransactionRequest_AllowMultipleEg +} + +type CommitResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} + +func (m *CommitResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *CommitResponse) GetVersion() []*CommitResponse_Version { + if m != nil { + return m.Version + } + return nil +} + +type CommitResponse_Version struct { + RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` + Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } +func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } +func (*CommitResponse_Version) ProtoMessage() {} + +func (m *CommitResponse_Version) GetRootEntityKey() *Reference { + if m != nil { + return m.RootEntityKey + } + return nil +} + +func (m *CommitResponse_Version) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto new file mode 100755 index 0000000..e76f126 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto @@ -0,0 +1,541 @@ +syntax = "proto2"; +option go_package = "datastore"; + +package appengine; + +message Action{} + +message PropertyValue { + optional int64 int64Value = 1; + optional bool booleanValue = 2; + optional string stringValue = 3; + optional double doubleValue = 4; + + optional group PointValue = 5 { + required double x = 6; + required double y = 7; + } + + optional group UserValue = 8 { + required string email = 9; + required string auth_domain = 10; + optional string nickname = 11; + optional string federated_identity = 21; + optional string federated_provider = 22; + } + + optional group ReferenceValue = 12 { + required string app = 13; + optional string name_space = 20; + repeated group PathElement = 14 { + required string type = 15; + optional int64 id = 16; + optional string name = 17; + } + } +} + +message Property { + enum Meaning { + NO_MEANING = 0; + BLOB = 14; + TEXT = 15; + BYTESTRING = 16; + + ATOM_CATEGORY = 1; + ATOM_LINK = 2; + ATOM_TITLE = 3; + ATOM_CONTENT = 4; + ATOM_SUMMARY = 5; + ATOM_AUTHOR = 6; + + GD_WHEN = 7; + GD_EMAIL = 8; + GEORSS_POINT = 9; + GD_IM = 10; + + GD_PHONENUMBER = 11; + GD_POSTALADDRESS = 12; + + GD_RATING = 13; + + BLOBKEY = 17; + ENTITY_PROTO = 19; + + INDEX_VALUE = 18; + }; + + optional Meaning meaning = 1 [default = NO_MEANING]; + optional string meaning_uri = 2; + + required string name = 3; + + required PropertyValue value = 5; + + required bool multiple = 4; + + optional bool searchable = 6 [default=false]; + + enum FtsTokenizationOption { + HTML = 1; + ATOM = 2; + } + + optional FtsTokenizationOption fts_tokenization_option = 8; + + optional string locale = 9 [default = "en"]; +} + +message Path { + repeated group Element = 1 { + required string type = 2; + optional int64 id = 3; + optional string name = 4; + } +} + +message Reference { + required string app = 13; + optional string name_space = 20; + required Path path = 14; +} + +message User { + required string email = 1; + required string auth_domain = 2; + optional string nickname = 3; + optional string federated_identity = 6; + optional string federated_provider = 7; +} + +message EntityProto { + required Reference key = 13; + required Path entity_group = 16; + optional User owner = 17; + + enum Kind { + GD_CONTACT = 1; + GD_EVENT = 2; + GD_MESSAGE = 3; + } + optional Kind kind = 4; + optional string kind_uri = 5; + + repeated Property property = 14; + repeated Property raw_property = 15; + + optional int32 rank = 18; +} + +message CompositeProperty { + required int64 index_id = 1; + repeated string value = 2; +} + +message Index { + required string entity_type = 1; + required bool ancestor = 5; + repeated group Property = 2 { + required string name = 3; + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + optional Direction direction = 4 [default = ASCENDING]; + } +} + +message CompositeIndex { + required string app_id = 1; + required int64 id = 2; + required Index definition = 3; + + enum State { + WRITE_ONLY = 1; + READ_WRITE = 2; + DELETED = 3; + ERROR = 4; + } + required State state = 4; + + optional bool only_use_if_required = 6 [default = false]; +} + +message IndexPostfix { + message IndexValue { + required string property_name = 1; + required PropertyValue value = 2; + } + + repeated IndexValue index_value = 1; + + optional Reference key = 2; + + optional bool before = 3 [default=true]; +} + +message IndexPosition { + optional string key = 1; + + optional bool before = 2 [default=true]; +} + +message Snapshot { + enum Status { + INACTIVE = 0; + ACTIVE = 1; + } + + required int64 ts = 1; +} + +message InternalHeader { + optional string qos = 1; +} + +message Transaction { + optional InternalHeader header = 4; + required fixed64 handle = 1; + required string app = 2; + optional bool mark_changes = 3 [default = false]; +} + +message Query { + optional InternalHeader header = 39; + + required string app = 1; + optional string name_space = 29; + + optional string kind = 3; + optional Reference ancestor = 17; + + repeated group Filter = 4 { + enum Operator { + LESS_THAN = 1; + LESS_THAN_OR_EQUAL = 2; + GREATER_THAN = 3; + GREATER_THAN_OR_EQUAL = 4; + EQUAL = 5; + IN = 6; + EXISTS = 7; + } + + required Operator op = 6; + repeated Property property = 14; + } + + optional string search_query = 8; + + repeated group Order = 9 { + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + + required string property = 10; + optional Direction direction = 11 [default = ASCENDING]; + } + + enum Hint { + ORDER_FIRST = 1; + ANCESTOR_FIRST = 2; + FILTER_FIRST = 3; + } + optional Hint hint = 18; + + optional int32 count = 23; + + optional int32 offset = 12 [default = 0]; + + optional int32 limit = 16; + + optional CompiledCursor compiled_cursor = 30; + optional CompiledCursor end_compiled_cursor = 31; + + repeated CompositeIndex composite_index = 19; + + optional bool require_perfect_plan = 20 [default = false]; + + optional bool keys_only = 21 [default = false]; + + optional Transaction transaction = 22; + + optional bool compile = 25 [default = false]; + + optional int64 failover_ms = 26; + + optional bool strong = 32; + + repeated string property_name = 33; + + repeated string group_by_property_name = 34; + + optional bool distinct = 24; + + optional int64 min_safe_time_seconds = 35; + + repeated string safe_replica_name = 36; + + optional bool persist_offset = 37 [default=false]; +} + +message CompiledQuery { + required group PrimaryScan = 1 { + optional string index_name = 2; + + optional string start_key = 3; + optional bool start_inclusive = 4; + optional string end_key = 5; + optional bool end_inclusive = 6; + + repeated string start_postfix_value = 22; + repeated string end_postfix_value = 23; + + optional int64 end_unapplied_log_timestamp_us = 19; + } + + repeated group MergeJoinScan = 7 { + required string index_name = 8; + + repeated string prefix_value = 9; + + optional bool value_prefix = 20 [default=false]; + } + + optional Index index_def = 21; + + optional int32 offset = 10 [default = 0]; + + optional int32 limit = 11; + + required bool keys_only = 12; + + repeated string property_name = 24; + + optional int32 distinct_infix_size = 25; + + optional group EntityFilter = 13 { + optional bool distinct = 14 [default=false]; + + optional string kind = 17; + optional Reference ancestor = 18; + } +} + +message CompiledCursor { + optional group Position = 2 { + optional string start_key = 27; + + repeated group IndexValue = 29 { + optional string property = 30; + required PropertyValue value = 31; + } + + optional Reference key = 32; + + optional bool start_inclusive = 28 [default=true]; + } +} + +message Cursor { + required fixed64 cursor = 1; + + optional string app = 2; +} + +message Error { + enum ErrorCode { + BAD_REQUEST = 1; + CONCURRENT_TRANSACTION = 2; + INTERNAL_ERROR = 3; + NEED_INDEX = 4; + TIMEOUT = 5; + PERMISSION_DENIED = 6; + BIGTABLE_ERROR = 7; + COMMITTED_BUT_STILL_APPLYING = 8; + CAPABILITY_DISABLED = 9; + TRY_ALTERNATE_BACKEND = 10; + SAFE_TIME_TOO_OLD = 11; + } +} + +message Cost { + optional int32 index_writes = 1; + optional int32 index_write_bytes = 2; + optional int32 entity_writes = 3; + optional int32 entity_write_bytes = 4; + optional group CommitCost = 5 { + optional int32 requested_entity_puts = 6; + optional int32 requested_entity_deletes = 7; + }; + optional int32 approximate_storage_delta = 8; + optional int32 id_sequence_updates = 9; +} + +message GetRequest { + optional InternalHeader header = 6; + + repeated Reference key = 1; + optional Transaction transaction = 2; + + optional int64 failover_ms = 3; + + optional bool strong = 4; + + optional bool allow_deferred = 5 [default=false]; +} + +message GetResponse { + repeated group Entity = 1 { + optional EntityProto entity = 2; + optional Reference key = 4; + + optional int64 version = 3; + } + + repeated Reference deferred = 5; + + optional bool in_order = 6 [default=true]; +} + +message PutRequest { + optional InternalHeader header = 11; + + repeated EntityProto entity = 1; + optional Transaction transaction = 2; + repeated CompositeIndex composite_index = 3; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; + + enum AutoIdPolicy { + CURRENT = 0; + SEQUENTIAL = 1; + } + optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; +} + +message PutResponse { + repeated Reference key = 1; + optional Cost cost = 2; + repeated int64 version = 3; +} + +message TouchRequest { + optional InternalHeader header = 10; + + repeated Reference key = 1; + repeated CompositeIndex composite_index = 2; + optional bool force = 3 [default = false]; + repeated Snapshot snapshot = 9; +} + +message TouchResponse { + optional Cost cost = 1; +} + +message DeleteRequest { + optional InternalHeader header = 10; + + repeated Reference key = 6; + optional Transaction transaction = 5; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; +} + +message DeleteResponse { + optional Cost cost = 1; + repeated int64 version = 3; +} + +message NextRequest { + optional InternalHeader header = 5; + + required Cursor cursor = 1; + optional int32 count = 2; + + optional int32 offset = 4 [default = 0]; + + optional bool compile = 3 [default = false]; +} + +message QueryResult { + optional Cursor cursor = 1; + + repeated EntityProto result = 2; + + optional int32 skipped_results = 7; + + required bool more_results = 3; + + optional bool keys_only = 4; + + optional bool index_only = 9; + + optional bool small_ops = 10; + + optional CompiledQuery compiled_query = 5; + + optional CompiledCursor compiled_cursor = 6; + + repeated CompositeIndex index = 8; + + repeated int64 version = 11; +} + +message AllocateIdsRequest { + optional InternalHeader header = 4; + + optional Reference model_key = 1; + + optional int64 size = 2; + + optional int64 max = 3; + + repeated Reference reserve = 5; +} + +message AllocateIdsResponse { + required int64 start = 1; + required int64 end = 2; + optional Cost cost = 3; +} + +message CompositeIndices { + repeated CompositeIndex index = 1; +} + +message AddActionsRequest { + optional InternalHeader header = 3; + + required Transaction transaction = 1; + repeated Action action = 2; +} + +message AddActionsResponse { +} + +message BeginTransactionRequest { + optional InternalHeader header = 3; + + required string app = 1; + optional bool allow_multiple_eg = 2 [default = false]; +} + +message CommitResponse { + optional Cost cost = 1; + + repeated group Version = 3 { + required Reference root_entity_key = 4; + required int64 version = 5; + } +} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go new file mode 100644 index 0000000..d538701 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -0,0 +1,14 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import netcontext "golang.org/x/net/context" + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +func AppID(c netcontext.Context) string { + return appID(FullyQualifiedAppID(c)) +} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go new file mode 100644 index 0000000..e6b9227 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -0,0 +1,27 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine" + + netcontext "golang.org/x/net/context" +) + +func DefaultVersionHostname(ctx netcontext.Context) string { + return appengine.DefaultVersionHostname(fromContext(ctx)) +} + +func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } +func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } +func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } + +func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go new file mode 100644 index 0000000..ebe68b7 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -0,0 +1,97 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "net/http" + "os" + + netcontext "golang.org/x/net/context" +) + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +const ( + hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" + hRequestLogId = "X-AppEngine-Request-Log-Id" + hDatacenter = "X-AppEngine-Datacenter" +) + +func ctxHeaders(ctx netcontext.Context) http.Header { + return fromContext(ctx).Request().Header +} + +func DefaultVersionHostname(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDefaultVersionHostname) +} + +func RequestID(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hRequestLogId) +} + +func Datacenter(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDatacenter) +} + +func ServerSoftware() string { + // TODO(dsymonds): Remove fallback when we've verified this. + if s := os.Getenv("SERVER_SOFTWARE"); s != "" { + return s + } + return "Google App Engine/1.x.x" +} + +// TODO(dsymonds): Remove the metadata fetches. + +func ModuleName(_ netcontext.Context) string { + if s := os.Getenv("GAE_MODULE_NAME"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_name")) +} + +func VersionID(_ netcontext.Context) string { + if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) +} + +func InstanceID() string { + if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_instance")) +} + +func partitionlessAppID() string { + // gae_project has everything except the partition prefix. + appID := os.Getenv("GAE_LONG_APP_ID") + if appID == "" { + appID = string(mustGetMetadata("instance/attributes/gae_project")) + } + return appID +} + +func fullyQualifiedAppID(_ netcontext.Context) string { + appID := partitionlessAppID() + + part := os.Getenv("GAE_PARTITION") + if part == "" { + part = string(mustGetMetadata("instance/attributes/gae_partition")) + } + + if part != "" { + appID = part + "~" + appID + } + return appID +} + +func IsDevAppServer() bool { + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" +} diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.pb.go b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go new file mode 100644 index 0000000..ba7c722 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go @@ -0,0 +1,845 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/image/images_service.proto +// DO NOT EDIT! + +/* +Package image is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/image/images_service.proto + +It has these top-level messages: + ImagesServiceError + ImagesServiceTransform + Transform + ImageData + InputSettings + OutputSettings + ImagesTransformRequest + ImagesTransformResponse + CompositeImageOptions + ImagesCanvas + ImagesCompositeRequest + ImagesCompositeResponse + ImagesHistogramRequest + ImagesHistogram + ImagesHistogramResponse + ImagesGetUrlBaseRequest + ImagesGetUrlBaseResponse + ImagesDeleteUrlBaseRequest + ImagesDeleteUrlBaseResponse +*/ +package image + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ImagesServiceError_ErrorCode int32 + +const ( + ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1 + ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2 + ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3 + ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4 + ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5 + ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6 + ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7 + ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8 +) + +var ImagesServiceError_ErrorCode_name = map[int32]string{ + 1: "UNSPECIFIED_ERROR", + 2: "BAD_TRANSFORM_DATA", + 3: "NOT_IMAGE", + 4: "BAD_IMAGE_DATA", + 5: "IMAGE_TOO_LARGE", + 6: "INVALID_BLOB_KEY", + 7: "ACCESS_DENIED", + 8: "OBJECT_NOT_FOUND", +} +var ImagesServiceError_ErrorCode_value = map[string]int32{ + "UNSPECIFIED_ERROR": 1, + "BAD_TRANSFORM_DATA": 2, + "NOT_IMAGE": 3, + "BAD_IMAGE_DATA": 4, + "IMAGE_TOO_LARGE": 5, + "INVALID_BLOB_KEY": 6, + "ACCESS_DENIED": 7, + "OBJECT_NOT_FOUND": 8, +} + +func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode { + p := new(ImagesServiceError_ErrorCode) + *p = x + return p +} +func (x ImagesServiceError_ErrorCode) String() string { + return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x)) +} +func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ImagesServiceError_ErrorCode(value) + return nil +} + +type ImagesServiceTransform_Type int32 + +const ( + ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1 + ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2 + ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3 + ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4 + ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5 + ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6 +) + +var ImagesServiceTransform_Type_name = map[int32]string{ + 1: "RESIZE", + 2: "ROTATE", + 3: "HORIZONTAL_FLIP", + 4: "VERTICAL_FLIP", + 5: "CROP", + 6: "IM_FEELING_LUCKY", +} +var ImagesServiceTransform_Type_value = map[string]int32{ + "RESIZE": 1, + "ROTATE": 2, + "HORIZONTAL_FLIP": 3, + "VERTICAL_FLIP": 4, + "CROP": 5, + "IM_FEELING_LUCKY": 6, +} + +func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type { + p := new(ImagesServiceTransform_Type) + *p = x + return p +} +func (x ImagesServiceTransform_Type) String() string { + return proto.EnumName(ImagesServiceTransform_Type_name, int32(x)) +} +func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type") + if err != nil { + return err + } + *x = ImagesServiceTransform_Type(value) + return nil +} + +type InputSettings_ORIENTATION_CORRECTION_TYPE int32 + +const ( + InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0 + InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1 +) + +var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{ + 0: "UNCHANGED_ORIENTATION", + 1: "CORRECT_ORIENTATION", +} +var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{ + "UNCHANGED_ORIENTATION": 0, + "CORRECT_ORIENTATION": 1, +} + +func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE { + p := new(InputSettings_ORIENTATION_CORRECTION_TYPE) + *p = x + return p +} +func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string { + return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x)) +} +func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE") + if err != nil { + return err + } + *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value) + return nil +} + +type OutputSettings_MIME_TYPE int32 + +const ( + OutputSettings_PNG OutputSettings_MIME_TYPE = 0 + OutputSettings_JPEG OutputSettings_MIME_TYPE = 1 + OutputSettings_WEBP OutputSettings_MIME_TYPE = 2 +) + +var OutputSettings_MIME_TYPE_name = map[int32]string{ + 0: "PNG", + 1: "JPEG", + 2: "WEBP", +} +var OutputSettings_MIME_TYPE_value = map[string]int32{ + "PNG": 0, + "JPEG": 1, + "WEBP": 2, +} + +func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE { + p := new(OutputSettings_MIME_TYPE) + *p = x + return p +} +func (x OutputSettings_MIME_TYPE) String() string { + return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x)) +} +func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE") + if err != nil { + return err + } + *x = OutputSettings_MIME_TYPE(value) + return nil +} + +type CompositeImageOptions_ANCHOR int32 + +const ( + CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0 + CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1 + CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2 + CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3 + CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4 + CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5 + CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6 + CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7 + CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8 +) + +var CompositeImageOptions_ANCHOR_name = map[int32]string{ + 0: "TOP_LEFT", + 1: "TOP", + 2: "TOP_RIGHT", + 3: "LEFT", + 4: "CENTER", + 5: "RIGHT", + 6: "BOTTOM_LEFT", + 7: "BOTTOM", + 8: "BOTTOM_RIGHT", +} +var CompositeImageOptions_ANCHOR_value = map[string]int32{ + "TOP_LEFT": 0, + "TOP": 1, + "TOP_RIGHT": 2, + "LEFT": 3, + "CENTER": 4, + "RIGHT": 5, + "BOTTOM_LEFT": 6, + "BOTTOM": 7, + "BOTTOM_RIGHT": 8, +} + +func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR { + p := new(CompositeImageOptions_ANCHOR) + *p = x + return p +} +func (x CompositeImageOptions_ANCHOR) String() string { + return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x)) +} +func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR") + if err != nil { + return err + } + *x = CompositeImageOptions_ANCHOR(value) + return nil +} + +type ImagesServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} } +func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) } +func (*ImagesServiceError) ProtoMessage() {} + +type ImagesServiceTransform struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} } +func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) } +func (*ImagesServiceTransform) ProtoMessage() {} + +type Transform struct { + Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"` + Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` + CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"` + CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"` + CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"` + Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"` + HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"` + VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"` + CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"` + CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"` + CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"` + CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"` + Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"` + AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Transform) Reset() { *m = Transform{} } +func (m *Transform) String() string { return proto.CompactTextString(m) } +func (*Transform) ProtoMessage() {} + +const Default_Transform_CropToFit bool = false +const Default_Transform_CropOffsetX float32 = 0.5 +const Default_Transform_CropOffsetY float32 = 0.5 +const Default_Transform_Rotate int32 = 0 +const Default_Transform_HorizontalFlip bool = false +const Default_Transform_VerticalFlip bool = false +const Default_Transform_CropLeftX float32 = 0 +const Default_Transform_CropTopY float32 = 0 +const Default_Transform_CropRightX float32 = 1 +const Default_Transform_CropBottomY float32 = 1 +const Default_Transform_Autolevels bool = false +const Default_Transform_AllowStretch bool = false + +func (m *Transform) GetWidth() int32 { + if m != nil && m.Width != nil { + return *m.Width + } + return 0 +} + +func (m *Transform) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +func (m *Transform) GetCropToFit() bool { + if m != nil && m.CropToFit != nil { + return *m.CropToFit + } + return Default_Transform_CropToFit +} + +func (m *Transform) GetCropOffsetX() float32 { + if m != nil && m.CropOffsetX != nil { + return *m.CropOffsetX + } + return Default_Transform_CropOffsetX +} + +func (m *Transform) GetCropOffsetY() float32 { + if m != nil && m.CropOffsetY != nil { + return *m.CropOffsetY + } + return Default_Transform_CropOffsetY +} + +func (m *Transform) GetRotate() int32 { + if m != nil && m.Rotate != nil { + return *m.Rotate + } + return Default_Transform_Rotate +} + +func (m *Transform) GetHorizontalFlip() bool { + if m != nil && m.HorizontalFlip != nil { + return *m.HorizontalFlip + } + return Default_Transform_HorizontalFlip +} + +func (m *Transform) GetVerticalFlip() bool { + if m != nil && m.VerticalFlip != nil { + return *m.VerticalFlip + } + return Default_Transform_VerticalFlip +} + +func (m *Transform) GetCropLeftX() float32 { + if m != nil && m.CropLeftX != nil { + return *m.CropLeftX + } + return Default_Transform_CropLeftX +} + +func (m *Transform) GetCropTopY() float32 { + if m != nil && m.CropTopY != nil { + return *m.CropTopY + } + return Default_Transform_CropTopY +} + +func (m *Transform) GetCropRightX() float32 { + if m != nil && m.CropRightX != nil { + return *m.CropRightX + } + return Default_Transform_CropRightX +} + +func (m *Transform) GetCropBottomY() float32 { + if m != nil && m.CropBottomY != nil { + return *m.CropBottomY + } + return Default_Transform_CropBottomY +} + +func (m *Transform) GetAutolevels() bool { + if m != nil && m.Autolevels != nil { + return *m.Autolevels + } + return Default_Transform_Autolevels +} + +func (m *Transform) GetAllowStretch() bool { + if m != nil && m.AllowStretch != nil { + return *m.AllowStretch + } + return Default_Transform_AllowStretch +} + +type ImageData struct { + Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"` + BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"` + Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"` + Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImageData) Reset() { *m = ImageData{} } +func (m *ImageData) String() string { return proto.CompactTextString(m) } +func (*ImageData) ProtoMessage() {} + +func (m *ImageData) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *ImageData) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +func (m *ImageData) GetWidth() int32 { + if m != nil && m.Width != nil { + return *m.Width + } + return 0 +} + +func (m *ImageData) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +type InputSettings struct { + CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"` + ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"` + TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InputSettings) Reset() { *m = InputSettings{} } +func (m *InputSettings) String() string { return proto.CompactTextString(m) } +func (*InputSettings) ProtoMessage() {} + +const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION +const Default_InputSettings_ParseMetadata bool = false + +func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE { + if m != nil && m.CorrectExifOrientation != nil { + return *m.CorrectExifOrientation + } + return Default_InputSettings_CorrectExifOrientation +} + +func (m *InputSettings) GetParseMetadata() bool { + if m != nil && m.ParseMetadata != nil { + return *m.ParseMetadata + } + return Default_InputSettings_ParseMetadata +} + +func (m *InputSettings) GetTransparentSubstitutionRgb() int32 { + if m != nil && m.TransparentSubstitutionRgb != nil { + return *m.TransparentSubstitutionRgb + } + return 0 +} + +type OutputSettings struct { + MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"` + Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OutputSettings) Reset() { *m = OutputSettings{} } +func (m *OutputSettings) String() string { return proto.CompactTextString(m) } +func (*OutputSettings) ProtoMessage() {} + +const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG + +func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE { + if m != nil && m.MimeType != nil { + return *m.MimeType + } + return Default_OutputSettings_MimeType +} + +func (m *OutputSettings) GetQuality() int32 { + if m != nil && m.Quality != nil { + return *m.Quality + } + return 0 +} + +type ImagesTransformRequest struct { + Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` + Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"` + Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"` + Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} } +func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesTransformRequest) ProtoMessage() {} + +func (m *ImagesTransformRequest) GetImage() *ImageData { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImagesTransformRequest) GetTransform() []*Transform { + if m != nil { + return m.Transform + } + return nil +} + +func (m *ImagesTransformRequest) GetOutput() *OutputSettings { + if m != nil { + return m.Output + } + return nil +} + +func (m *ImagesTransformRequest) GetInput() *InputSettings { + if m != nil { + return m.Input + } + return nil +} + +type ImagesTransformResponse struct { + Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` + SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} } +func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesTransformResponse) ProtoMessage() {} + +func (m *ImagesTransformResponse) GetImage() *ImageData { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImagesTransformResponse) GetSourceMetadata() string { + if m != nil && m.SourceMetadata != nil { + return *m.SourceMetadata + } + return "" +} + +type CompositeImageOptions struct { + SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"` + XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"` + YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"` + Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"` + Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} } +func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) } +func (*CompositeImageOptions) ProtoMessage() {} + +func (m *CompositeImageOptions) GetSourceIndex() int32 { + if m != nil && m.SourceIndex != nil { + return *m.SourceIndex + } + return 0 +} + +func (m *CompositeImageOptions) GetXOffset() int32 { + if m != nil && m.XOffset != nil { + return *m.XOffset + } + return 0 +} + +func (m *CompositeImageOptions) GetYOffset() int32 { + if m != nil && m.YOffset != nil { + return *m.YOffset + } + return 0 +} + +func (m *CompositeImageOptions) GetOpacity() float32 { + if m != nil && m.Opacity != nil { + return *m.Opacity + } + return 0 +} + +func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR { + if m != nil && m.Anchor != nil { + return *m.Anchor + } + return CompositeImageOptions_TOP_LEFT +} + +type ImagesCanvas struct { + Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"` + Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"` + Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"` + Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} } +func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) } +func (*ImagesCanvas) ProtoMessage() {} + +const Default_ImagesCanvas_Color int32 = -1 + +func (m *ImagesCanvas) GetWidth() int32 { + if m != nil && m.Width != nil { + return *m.Width + } + return 0 +} + +func (m *ImagesCanvas) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +func (m *ImagesCanvas) GetOutput() *OutputSettings { + if m != nil { + return m.Output + } + return nil +} + +func (m *ImagesCanvas) GetColor() int32 { + if m != nil && m.Color != nil { + return *m.Color + } + return Default_ImagesCanvas_Color +} + +type ImagesCompositeRequest struct { + Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"` + Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} } +func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesCompositeRequest) ProtoMessage() {} + +func (m *ImagesCompositeRequest) GetImage() []*ImageData { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas { + if m != nil { + return m.Canvas + } + return nil +} + +type ImagesCompositeResponse struct { + Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} } +func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesCompositeResponse) ProtoMessage() {} + +func (m *ImagesCompositeResponse) GetImage() *ImageData { + if m != nil { + return m.Image + } + return nil +} + +type ImagesHistogramRequest struct { + Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} } +func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesHistogramRequest) ProtoMessage() {} + +func (m *ImagesHistogramRequest) GetImage() *ImageData { + if m != nil { + return m.Image + } + return nil +} + +type ImagesHistogram struct { + Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"` + Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"` + Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} } +func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) } +func (*ImagesHistogram) ProtoMessage() {} + +func (m *ImagesHistogram) GetRed() []int32 { + if m != nil { + return m.Red + } + return nil +} + +func (m *ImagesHistogram) GetGreen() []int32 { + if m != nil { + return m.Green + } + return nil +} + +func (m *ImagesHistogram) GetBlue() []int32 { + if m != nil { + return m.Blue + } + return nil +} + +type ImagesHistogramResponse struct { + Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} } +func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesHistogramResponse) ProtoMessage() {} + +func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram { + if m != nil { + return m.Histogram + } + return nil +} + +type ImagesGetUrlBaseRequest struct { + BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} } +func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesGetUrlBaseRequest) ProtoMessage() {} + +const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false + +func (m *ImagesGetUrlBaseRequest) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool { + if m != nil && m.CreateSecureUrl != nil { + return *m.CreateSecureUrl + } + return Default_ImagesGetUrlBaseRequest_CreateSecureUrl +} + +type ImagesGetUrlBaseResponse struct { + Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} } +func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesGetUrlBaseResponse) ProtoMessage() {} + +func (m *ImagesGetUrlBaseResponse) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +type ImagesDeleteUrlBaseRequest struct { + BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} } +func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {} + +func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +type ImagesDeleteUrlBaseResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} } +func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.proto b/vendor/google.golang.org/appengine/internal/image/images_service.proto new file mode 100644 index 0000000..f0d2ed5 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/image/images_service.proto @@ -0,0 +1,162 @@ +syntax = "proto2"; +option go_package = "image"; + +package appengine; + +message ImagesServiceError { + enum ErrorCode { + UNSPECIFIED_ERROR = 1; + BAD_TRANSFORM_DATA = 2; + NOT_IMAGE = 3; + BAD_IMAGE_DATA = 4; + IMAGE_TOO_LARGE = 5; + INVALID_BLOB_KEY = 6; + ACCESS_DENIED = 7; + OBJECT_NOT_FOUND = 8; + } +} + +message ImagesServiceTransform { + enum Type { + RESIZE = 1; + ROTATE = 2; + HORIZONTAL_FLIP = 3; + VERTICAL_FLIP = 4; + CROP = 5; + IM_FEELING_LUCKY = 6; + } +} + +message Transform { + optional int32 width = 1; + optional int32 height = 2; + optional bool crop_to_fit = 11 [default = false]; + optional float crop_offset_x = 12 [default = 0.5]; + optional float crop_offset_y = 13 [default = 0.5]; + + optional int32 rotate = 3 [default = 0]; + + optional bool horizontal_flip = 4 [default = false]; + + optional bool vertical_flip = 5 [default = false]; + + optional float crop_left_x = 6 [default = 0.0]; + optional float crop_top_y = 7 [default = 0.0]; + optional float crop_right_x = 8 [default = 1.0]; + optional float crop_bottom_y = 9 [default = 1.0]; + + optional bool autolevels = 10 [default = false]; + + optional bool allow_stretch = 14 [default = false]; +} + +message ImageData { + required bytes content = 1 [ctype=CORD]; + optional string blob_key = 2; + + optional int32 width = 3; + optional int32 height = 4; +} + +message InputSettings { + enum ORIENTATION_CORRECTION_TYPE { + UNCHANGED_ORIENTATION = 0; + CORRECT_ORIENTATION = 1; + } + optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1 + [default=UNCHANGED_ORIENTATION]; + optional bool parse_metadata = 2 [default=false]; + optional int32 transparent_substitution_rgb = 3; +} + +message OutputSettings { + enum MIME_TYPE { + PNG = 0; + JPEG = 1; + WEBP = 2; + } + + optional MIME_TYPE mime_type = 1 [default=PNG]; + optional int32 quality = 2; +} + +message ImagesTransformRequest { + required ImageData image = 1; + repeated Transform transform = 2; + required OutputSettings output = 3; + optional InputSettings input = 4; +} + +message ImagesTransformResponse { + required ImageData image = 1; + optional string source_metadata = 2; +} + +message CompositeImageOptions { + required int32 source_index = 1; + required int32 x_offset = 2; + required int32 y_offset = 3; + required float opacity = 4; + + enum ANCHOR { + TOP_LEFT = 0; + TOP = 1; + TOP_RIGHT = 2; + LEFT = 3; + CENTER = 4; + RIGHT = 5; + BOTTOM_LEFT = 6; + BOTTOM = 7; + BOTTOM_RIGHT = 8; + } + + required ANCHOR anchor = 5; +} + +message ImagesCanvas { + required int32 width = 1; + required int32 height = 2; + required OutputSettings output = 3; + optional int32 color = 4 [default=-1]; +} + +message ImagesCompositeRequest { + repeated ImageData image = 1; + repeated CompositeImageOptions options = 2; + required ImagesCanvas canvas = 3; +} + +message ImagesCompositeResponse { + required ImageData image = 1; +} + +message ImagesHistogramRequest { + required ImageData image = 1; +} + +message ImagesHistogram { + repeated int32 red = 1; + repeated int32 green = 2; + repeated int32 blue = 3; +} + +message ImagesHistogramResponse { + required ImagesHistogram histogram = 1; +} + +message ImagesGetUrlBaseRequest { + required string blob_key = 1; + + optional bool create_secure_url = 2 [default = false]; +} + +message ImagesGetUrlBaseResponse { + required string url = 1; +} + +message ImagesDeleteUrlBaseRequest { + required string blob_key = 1; +} + +message ImagesDeleteUrlBaseResponse { +} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go new file mode 100644 index 0000000..051ea39 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal.go @@ -0,0 +1,110 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package internal provides support for package appengine. +// +// Programs should not use this package directly. Its API is not stable. +// Use packages appengine and appengine/* instead. +package internal + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + remotepb "google.golang.org/appengine/internal/remote_api" +) + +// errorCodeMaps is a map of service name to the error code map for the service. +var errorCodeMaps = make(map[string]map[int32]string) + +// RegisterErrorCodeMap is called from API implementations to register their +// error code map. This should only be called from init functions. +func RegisterErrorCodeMap(service string, m map[int32]string) { + errorCodeMaps[service] = m +} + +type timeoutCodeKey struct { + service string + code int32 +} + +// timeoutCodes is the set of service+code pairs that represent timeouts. +var timeoutCodes = make(map[timeoutCodeKey]bool) + +func RegisterTimeoutErrorCode(service string, code int32) { + timeoutCodes[timeoutCodeKey{service, code}] = true +} + +// APIError is the type returned by appengine.Context's Call method +// when an API call fails in an API-specific way. This may be, for instance, +// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. +type APIError struct { + Service string + Detail string + Code int32 // API-specific error code +} + +func (e *APIError) Error() string { + if e.Code == 0 { + if e.Detail == "" { + return "APIError " + } + return e.Detail + } + s := fmt.Sprintf("API error %d", e.Code) + if m, ok := errorCodeMaps[e.Service]; ok { + s += " (" + e.Service + ": " + m[e.Code] + ")" + } else { + // Shouldn't happen, but provide a bit more detail if it does. + s = e.Service + " " + s + } + if e.Detail != "" { + s += ": " + e.Detail + } + return s +} + +func (e *APIError) IsTimeout() bool { + return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] +} + +// CallError is the type returned by appengine.Context's Call method when an +// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. +type CallError struct { + Detail string + Code int32 + // TODO: Remove this if we get a distinguishable error code. + Timeout bool +} + +func (e *CallError) Error() string { + var msg string + switch remotepb.RpcError_ErrorCode(e.Code) { + case remotepb.RpcError_UNKNOWN: + return e.Detail + case remotepb.RpcError_OVER_QUOTA: + msg = "Over quota" + case remotepb.RpcError_CAPABILITY_DISABLED: + msg = "Capability disabled" + case remotepb.RpcError_CANCELLED: + msg = "Canceled" + default: + msg = fmt.Sprintf("Call error %d", e.Code) + } + s := msg + ": " + e.Detail + if e.Timeout { + s += " (timeout)" + } + return s +} + +func (e *CallError) IsTimeout() bool { + return e.Timeout +} + +// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. +// The function should be prepared to be called on the same message more than once; it should only modify the +// RPC request the first time. +var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/internal_vm_test.go b/vendor/google.golang.org/appengine/internal/internal_vm_test.go new file mode 100644 index 0000000..f809761 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal_vm_test.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" +) + +func TestInstallingHealthChecker(t *testing.T) { + try := func(desc string, mux *http.ServeMux, wantCode int, wantBody string) { + installHealthChecker(mux) + srv := httptest.NewServer(mux) + defer srv.Close() + + resp, err := http.Get(srv.URL + "/_ah/health") + if err != nil { + t.Errorf("%s: http.Get: %v", desc, err) + return + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("%s: reading body: %v", desc, err) + return + } + + if resp.StatusCode != wantCode { + t.Errorf("%s: got HTTP %d, want %d", desc, resp.StatusCode, wantCode) + return + } + if wantBody != "" && string(body) != wantBody { + t.Errorf("%s: got HTTP body %q, want %q", desc, body, wantBody) + return + } + } + + // If there's no handlers, or only a root handler, a health checker should be installed. + try("empty mux", http.NewServeMux(), 200, "ok") + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "root handler") + }) + try("mux with root handler", mux, 200, "ok") + + // If there's a custom health check handler, one should not be installed. + mux = http.NewServeMux() + mux.HandleFunc("/_ah/health", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(418) + io.WriteString(w, "I'm short and stout!") + }) + try("mux with custom health checker", mux, 418, "I'm short and stout!") +} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go new file mode 100644 index 0000000..20c595b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go @@ -0,0 +1,899 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/log/log_service.proto +// DO NOT EDIT! + +/* +Package log is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/log/log_service.proto + +It has these top-level messages: + LogServiceError + UserAppLogLine + UserAppLogGroup + FlushRequest + SetStatusRequest + LogOffset + LogLine + RequestLog + LogModuleVersion + LogReadRequest + LogReadResponse + LogUsageRecord + LogUsageRequest + LogUsageResponse +*/ +package log + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type LogServiceError_ErrorCode int32 + +const ( + LogServiceError_OK LogServiceError_ErrorCode = 0 + LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 + LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 +) + +var LogServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "STORAGE_ERROR", +} +var LogServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "STORAGE_ERROR": 2, +} + +func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { + p := new(LogServiceError_ErrorCode) + *p = x + return p +} +func (x LogServiceError_ErrorCode) String() string { + return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) +} +func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") + if err != nil { + return err + } + *x = LogServiceError_ErrorCode(value) + return nil +} + +type LogServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogServiceError) Reset() { *m = LogServiceError{} } +func (m *LogServiceError) String() string { return proto.CompactTextString(m) } +func (*LogServiceError) ProtoMessage() {} + +type UserAppLogLine struct { + TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` + Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } +func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } +func (*UserAppLogLine) ProtoMessage() {} + +func (m *UserAppLogLine) GetTimestampUsec() int64 { + if m != nil && m.TimestampUsec != nil { + return *m.TimestampUsec + } + return 0 +} + +func (m *UserAppLogLine) GetLevel() int64 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *UserAppLogLine) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type UserAppLogGroup struct { + LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } +func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } +func (*UserAppLogGroup) ProtoMessage() {} + +func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { + if m != nil { + return m.LogLine + } + return nil +} + +type FlushRequest struct { + Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} + +func (m *FlushRequest) GetLogs() []byte { + if m != nil { + return m.Logs + } + return nil +} + +type SetStatusRequest struct { + Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } +func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SetStatusRequest) ProtoMessage() {} + +func (m *SetStatusRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +type LogOffset struct { + RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogOffset) Reset() { *m = LogOffset{} } +func (m *LogOffset) String() string { return proto.CompactTextString(m) } +func (*LogOffset) ProtoMessage() {} + +func (m *LogOffset) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +type LogLine struct { + Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` + Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogLine) Reset() { *m = LogLine{} } +func (m *LogLine) String() string { return proto.CompactTextString(m) } +func (*LogLine) ProtoMessage() {} + +func (m *LogLine) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *LogLine) GetLevel() int32 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *LogLine) GetLogMessage() string { + if m != nil && m.LogMessage != nil { + return *m.LogMessage + } + return "" +} + +type RequestLog struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` + RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` + Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` + Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` + Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` + StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` + Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` + Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` + Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` + Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` + HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` + Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` + ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` + Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` + UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` + UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` + Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` + ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` + Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` + Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` + TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` + TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` + WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` + PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` + Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` + CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` + Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` + LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` + AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` + ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` + WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` + WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` + ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` + ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequestLog) Reset() { *m = RequestLog{} } +func (m *RequestLog) String() string { return proto.CompactTextString(m) } +func (*RequestLog) ProtoMessage() {} + +const Default_RequestLog_ModuleId string = "default" +const Default_RequestLog_ReplicaIndex int32 = -1 +const Default_RequestLog_Finished bool = true + +func (m *RequestLog) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *RequestLog) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_RequestLog_ModuleId +} + +func (m *RequestLog) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *RequestLog) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *RequestLog) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *RequestLog) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *RequestLog) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *RequestLog) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *RequestLog) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *RequestLog) GetLatency() int64 { + if m != nil && m.Latency != nil { + return *m.Latency + } + return 0 +} + +func (m *RequestLog) GetMcycles() int64 { + if m != nil && m.Mcycles != nil { + return *m.Mcycles + } + return 0 +} + +func (m *RequestLog) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *RequestLog) GetResource() string { + if m != nil && m.Resource != nil { + return *m.Resource + } + return "" +} + +func (m *RequestLog) GetHttpVersion() string { + if m != nil && m.HttpVersion != nil { + return *m.HttpVersion + } + return "" +} + +func (m *RequestLog) GetStatus() int32 { + if m != nil && m.Status != nil { + return *m.Status + } + return 0 +} + +func (m *RequestLog) GetResponseSize() int64 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *RequestLog) GetReferrer() string { + if m != nil && m.Referrer != nil { + return *m.Referrer + } + return "" +} + +func (m *RequestLog) GetUserAgent() string { + if m != nil && m.UserAgent != nil { + return *m.UserAgent + } + return "" +} + +func (m *RequestLog) GetUrlMapEntry() string { + if m != nil && m.UrlMapEntry != nil { + return *m.UrlMapEntry + } + return "" +} + +func (m *RequestLog) GetCombined() string { + if m != nil && m.Combined != nil { + return *m.Combined + } + return "" +} + +func (m *RequestLog) GetApiMcycles() int64 { + if m != nil && m.ApiMcycles != nil { + return *m.ApiMcycles + } + return 0 +} + +func (m *RequestLog) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *RequestLog) GetCost() float64 { + if m != nil && m.Cost != nil { + return *m.Cost + } + return 0 +} + +func (m *RequestLog) GetTaskQueueName() string { + if m != nil && m.TaskQueueName != nil { + return *m.TaskQueueName + } + return "" +} + +func (m *RequestLog) GetTaskName() string { + if m != nil && m.TaskName != nil { + return *m.TaskName + } + return "" +} + +func (m *RequestLog) GetWasLoadingRequest() bool { + if m != nil && m.WasLoadingRequest != nil { + return *m.WasLoadingRequest + } + return false +} + +func (m *RequestLog) GetPendingTime() int64 { + if m != nil && m.PendingTime != nil { + return *m.PendingTime + } + return 0 +} + +func (m *RequestLog) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return Default_RequestLog_ReplicaIndex +} + +func (m *RequestLog) GetFinished() bool { + if m != nil && m.Finished != nil { + return *m.Finished + } + return Default_RequestLog_Finished +} + +func (m *RequestLog) GetCloneKey() []byte { + if m != nil { + return m.CloneKey + } + return nil +} + +func (m *RequestLog) GetLine() []*LogLine { + if m != nil { + return m.Line + } + return nil +} + +func (m *RequestLog) GetLinesIncomplete() bool { + if m != nil && m.LinesIncomplete != nil { + return *m.LinesIncomplete + } + return false +} + +func (m *RequestLog) GetAppEngineRelease() []byte { + if m != nil { + return m.AppEngineRelease + } + return nil +} + +func (m *RequestLog) GetExitReason() int32 { + if m != nil && m.ExitReason != nil { + return *m.ExitReason + } + return 0 +} + +func (m *RequestLog) GetWasThrottledForTime() bool { + if m != nil && m.WasThrottledForTime != nil { + return *m.WasThrottledForTime + } + return false +} + +func (m *RequestLog) GetWasThrottledForRequests() bool { + if m != nil && m.WasThrottledForRequests != nil { + return *m.WasThrottledForRequests + } + return false +} + +func (m *RequestLog) GetThrottledTime() int64 { + if m != nil && m.ThrottledTime != nil { + return *m.ThrottledTime + } + return 0 +} + +func (m *RequestLog) GetServerName() []byte { + if m != nil { + return m.ServerName + } + return nil +} + +type LogModuleVersion struct { + ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } +func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } +func (*LogModuleVersion) ProtoMessage() {} + +const Default_LogModuleVersion_ModuleId string = "default" + +func (m *LogModuleVersion) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_LogModuleVersion_ModuleId +} + +func (m *LogModuleVersion) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +type LogReadRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` + StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` + RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` + MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` + IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` + Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` + CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` + HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` + IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` + AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` + IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` + IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` + CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` + NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } +func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } +func (*LogReadRequest) ProtoMessage() {} + +func (m *LogReadRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogReadRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { + if m != nil { + return m.ModuleVersion + } + return nil +} + +func (m *LogReadRequest) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogReadRequest) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogReadRequest) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadRequest) GetRequestId() [][]byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *LogReadRequest) GetMinimumLogLevel() int32 { + if m != nil && m.MinimumLogLevel != nil { + return *m.MinimumLogLevel + } + return 0 +} + +func (m *LogReadRequest) GetIncludeIncomplete() bool { + if m != nil && m.IncludeIncomplete != nil { + return *m.IncludeIncomplete + } + return false +} + +func (m *LogReadRequest) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogReadRequest) GetCombinedLogRegex() string { + if m != nil && m.CombinedLogRegex != nil { + return *m.CombinedLogRegex + } + return "" +} + +func (m *LogReadRequest) GetHostRegex() string { + if m != nil && m.HostRegex != nil { + return *m.HostRegex + } + return "" +} + +func (m *LogReadRequest) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return 0 +} + +func (m *LogReadRequest) GetIncludeAppLogs() bool { + if m != nil && m.IncludeAppLogs != nil { + return *m.IncludeAppLogs + } + return false +} + +func (m *LogReadRequest) GetAppLogsPerRequest() int32 { + if m != nil && m.AppLogsPerRequest != nil { + return *m.AppLogsPerRequest + } + return 0 +} + +func (m *LogReadRequest) GetIncludeHost() bool { + if m != nil && m.IncludeHost != nil { + return *m.IncludeHost + } + return false +} + +func (m *LogReadRequest) GetIncludeAll() bool { + if m != nil && m.IncludeAll != nil { + return *m.IncludeAll + } + return false +} + +func (m *LogReadRequest) GetCacheIterator() bool { + if m != nil && m.CacheIterator != nil { + return *m.CacheIterator + } + return false +} + +func (m *LogReadRequest) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return 0 +} + +type LogReadResponse struct { + Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` + Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } +func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } +func (*LogReadResponse) ProtoMessage() {} + +func (m *LogReadResponse) GetLog() []*RequestLog { + if m != nil { + return m.Log + } + return nil +} + +func (m *LogReadResponse) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadResponse) GetLastEndTime() int64 { + if m != nil && m.LastEndTime != nil { + return *m.LastEndTime + } + return 0 +} + +type LogUsageRecord struct { + VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` + Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` + Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } +func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } +func (*LogUsageRecord) ProtoMessage() {} + +func (m *LogUsageRecord) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *LogUsageRecord) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRecord) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRecord) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogUsageRecord) GetTotalSize() int64 { + if m != nil && m.TotalSize != nil { + return *m.TotalSize + } + return 0 +} + +func (m *LogUsageRecord) GetRecords() int32 { + if m != nil && m.Records != nil { + return *m.Records + } + return 0 +} + +type LogUsageRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` + CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` + UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` + VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } +func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } +func (*LogUsageRequest) ProtoMessage() {} + +const Default_LogUsageRequest_ResolutionHours uint32 = 1 + +func (m *LogUsageRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogUsageRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogUsageRequest) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRequest) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRequest) GetResolutionHours() uint32 { + if m != nil && m.ResolutionHours != nil { + return *m.ResolutionHours + } + return Default_LogUsageRequest_ResolutionHours +} + +func (m *LogUsageRequest) GetCombineVersions() bool { + if m != nil && m.CombineVersions != nil { + return *m.CombineVersions + } + return false +} + +func (m *LogUsageRequest) GetUsageVersion() int32 { + if m != nil && m.UsageVersion != nil { + return *m.UsageVersion + } + return 0 +} + +func (m *LogUsageRequest) GetVersionsOnly() bool { + if m != nil && m.VersionsOnly != nil { + return *m.VersionsOnly + } + return false +} + +type LogUsageResponse struct { + Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` + Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } +func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } +func (*LogUsageResponse) ProtoMessage() {} + +func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { + if m != nil { + return m.Usage + } + return nil +} + +func (m *LogUsageResponse) GetSummary() *LogUsageRecord { + if m != nil { + return m.Summary + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto new file mode 100644 index 0000000..8981dc4 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto @@ -0,0 +1,150 @@ +syntax = "proto2"; +option go_package = "log"; + +package appengine; + +message LogServiceError { + enum ErrorCode { + OK = 0; + INVALID_REQUEST = 1; + STORAGE_ERROR = 2; + } +} + +message UserAppLogLine { + required int64 timestamp_usec = 1; + required int64 level = 2; + required string message = 3; +} + +message UserAppLogGroup { + repeated UserAppLogLine log_line = 2; +} + +message FlushRequest { + optional bytes logs = 1; +} + +message SetStatusRequest { + required string status = 1; +} + + +message LogOffset { + optional bytes request_id = 1; +} + +message LogLine { + required int64 time = 1; + required int32 level = 2; + required string log_message = 3; +} + +message RequestLog { + required string app_id = 1; + optional string module_id = 37 [default="default"]; + required string version_id = 2; + required bytes request_id = 3; + optional LogOffset offset = 35; + required string ip = 4; + optional string nickname = 5; + required int64 start_time = 6; + required int64 end_time = 7; + required int64 latency = 8; + required int64 mcycles = 9; + required string method = 10; + required string resource = 11; + required string http_version = 12; + required int32 status = 13; + required int64 response_size = 14; + optional string referrer = 15; + optional string user_agent = 16; + required string url_map_entry = 17; + required string combined = 18; + optional int64 api_mcycles = 19; + optional string host = 20; + optional double cost = 21; + + optional string task_queue_name = 22; + optional string task_name = 23; + + optional bool was_loading_request = 24; + optional int64 pending_time = 25; + optional int32 replica_index = 26 [default = -1]; + optional bool finished = 27 [default = true]; + optional bytes clone_key = 28; + + repeated LogLine line = 29; + + optional bool lines_incomplete = 36; + optional bytes app_engine_release = 38; + + optional int32 exit_reason = 30; + optional bool was_throttled_for_time = 31; + optional bool was_throttled_for_requests = 32; + optional int64 throttled_time = 33; + + optional bytes server_name = 34; +} + +message LogModuleVersion { + optional string module_id = 1 [default="default"]; + optional string version_id = 2; +} + +message LogReadRequest { + required string app_id = 1; + repeated string version_id = 2; + repeated LogModuleVersion module_version = 19; + + optional int64 start_time = 3; + optional int64 end_time = 4; + optional LogOffset offset = 5; + repeated bytes request_id = 6; + + optional int32 minimum_log_level = 7; + optional bool include_incomplete = 8; + optional int64 count = 9; + + optional string combined_log_regex = 14; + optional string host_regex = 15; + optional int32 replica_index = 16; + + optional bool include_app_logs = 10; + optional int32 app_logs_per_request = 17; + optional bool include_host = 11; + optional bool include_all = 12; + optional bool cache_iterator = 13; + optional int32 num_shards = 18; +} + +message LogReadResponse { + repeated RequestLog log = 1; + optional LogOffset offset = 2; + optional int64 last_end_time = 3; +} + +message LogUsageRecord { + optional string version_id = 1; + optional int32 start_time = 2; + optional int32 end_time = 3; + optional int64 count = 4; + optional int64 total_size = 5; + optional int32 records = 6; +} + +message LogUsageRequest { + required string app_id = 1; + repeated string version_id = 2; + optional int32 start_time = 3; + optional int32 end_time = 4; + optional uint32 resolution_hours = 5 [default = 1]; + optional bool combine_versions = 6; + optional int32 usage_version = 7; + optional bool versions_only = 8; +} + +message LogUsageResponse { + repeated LogUsageRecord usage = 1; + optional LogUsageRecord summary = 2; +} diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go new file mode 100644 index 0000000..b8d5f03 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go @@ -0,0 +1,229 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/mail/mail_service.proto +// DO NOT EDIT! + +/* +Package mail is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/mail/mail_service.proto + +It has these top-level messages: + MailServiceError + MailAttachment + MailHeader + MailMessage +*/ +package mail + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type MailServiceError_ErrorCode int32 + +const ( + MailServiceError_OK MailServiceError_ErrorCode = 0 + MailServiceError_INTERNAL_ERROR MailServiceError_ErrorCode = 1 + MailServiceError_BAD_REQUEST MailServiceError_ErrorCode = 2 + MailServiceError_UNAUTHORIZED_SENDER MailServiceError_ErrorCode = 3 + MailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4 + MailServiceError_INVALID_HEADER_NAME MailServiceError_ErrorCode = 5 + MailServiceError_INVALID_CONTENT_ID MailServiceError_ErrorCode = 6 +) + +var MailServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INTERNAL_ERROR", + 2: "BAD_REQUEST", + 3: "UNAUTHORIZED_SENDER", + 4: "INVALID_ATTACHMENT_TYPE", + 5: "INVALID_HEADER_NAME", + 6: "INVALID_CONTENT_ID", +} +var MailServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INTERNAL_ERROR": 1, + "BAD_REQUEST": 2, + "UNAUTHORIZED_SENDER": 3, + "INVALID_ATTACHMENT_TYPE": 4, + "INVALID_HEADER_NAME": 5, + "INVALID_CONTENT_ID": 6, +} + +func (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode { + p := new(MailServiceError_ErrorCode) + *p = x + return p +} +func (x MailServiceError_ErrorCode) String() string { + return proto.EnumName(MailServiceError_ErrorCode_name, int32(x)) +} +func (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, "MailServiceError_ErrorCode") + if err != nil { + return err + } + *x = MailServiceError_ErrorCode(value) + return nil +} + +type MailServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MailServiceError) Reset() { *m = MailServiceError{} } +func (m *MailServiceError) String() string { return proto.CompactTextString(m) } +func (*MailServiceError) ProtoMessage() {} + +type MailAttachment struct { + FileName *string `protobuf:"bytes,1,req,name=FileName" json:"FileName,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"` + ContentID *string `protobuf:"bytes,3,opt,name=ContentID" json:"ContentID,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MailAttachment) Reset() { *m = MailAttachment{} } +func (m *MailAttachment) String() string { return proto.CompactTextString(m) } +func (*MailAttachment) ProtoMessage() {} + +func (m *MailAttachment) GetFileName() string { + if m != nil && m.FileName != nil { + return *m.FileName + } + return "" +} + +func (m *MailAttachment) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *MailAttachment) GetContentID() string { + if m != nil && m.ContentID != nil { + return *m.ContentID + } + return "" +} + +type MailHeader struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MailHeader) Reset() { *m = MailHeader{} } +func (m *MailHeader) String() string { return proto.CompactTextString(m) } +func (*MailHeader) ProtoMessage() {} + +func (m *MailHeader) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MailHeader) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type MailMessage struct { + Sender *string `protobuf:"bytes,1,req,name=Sender" json:"Sender,omitempty"` + ReplyTo *string `protobuf:"bytes,2,opt,name=ReplyTo" json:"ReplyTo,omitempty"` + To []string `protobuf:"bytes,3,rep,name=To" json:"To,omitempty"` + Cc []string `protobuf:"bytes,4,rep,name=Cc" json:"Cc,omitempty"` + Bcc []string `protobuf:"bytes,5,rep,name=Bcc" json:"Bcc,omitempty"` + Subject *string `protobuf:"bytes,6,req,name=Subject" json:"Subject,omitempty"` + TextBody *string `protobuf:"bytes,7,opt,name=TextBody" json:"TextBody,omitempty"` + HtmlBody *string `protobuf:"bytes,8,opt,name=HtmlBody" json:"HtmlBody,omitempty"` + Attachment []*MailAttachment `protobuf:"bytes,9,rep,name=Attachment" json:"Attachment,omitempty"` + Header []*MailHeader `protobuf:"bytes,10,rep,name=Header" json:"Header,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MailMessage) Reset() { *m = MailMessage{} } +func (m *MailMessage) String() string { return proto.CompactTextString(m) } +func (*MailMessage) ProtoMessage() {} + +func (m *MailMessage) GetSender() string { + if m != nil && m.Sender != nil { + return *m.Sender + } + return "" +} + +func (m *MailMessage) GetReplyTo() string { + if m != nil && m.ReplyTo != nil { + return *m.ReplyTo + } + return "" +} + +func (m *MailMessage) GetTo() []string { + if m != nil { + return m.To + } + return nil +} + +func (m *MailMessage) GetCc() []string { + if m != nil { + return m.Cc + } + return nil +} + +func (m *MailMessage) GetBcc() []string { + if m != nil { + return m.Bcc + } + return nil +} + +func (m *MailMessage) GetSubject() string { + if m != nil && m.Subject != nil { + return *m.Subject + } + return "" +} + +func (m *MailMessage) GetTextBody() string { + if m != nil && m.TextBody != nil { + return *m.TextBody + } + return "" +} + +func (m *MailMessage) GetHtmlBody() string { + if m != nil && m.HtmlBody != nil { + return *m.HtmlBody + } + return "" +} + +func (m *MailMessage) GetAttachment() []*MailAttachment { + if m != nil { + return m.Attachment + } + return nil +} + +func (m *MailMessage) GetHeader() []*MailHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.proto b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto new file mode 100644 index 0000000..4e57b7a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto @@ -0,0 +1,45 @@ +syntax = "proto2"; +option go_package = "mail"; + +package appengine; + +message MailServiceError { + enum ErrorCode { + OK = 0; + INTERNAL_ERROR = 1; + BAD_REQUEST = 2; + UNAUTHORIZED_SENDER = 3; + INVALID_ATTACHMENT_TYPE = 4; + INVALID_HEADER_NAME = 5; + INVALID_CONTENT_ID = 6; + } +} + +message MailAttachment { + required string FileName = 1; + required bytes Data = 2; + optional string ContentID = 3; +} + +message MailHeader { + required string name = 1; + required string value = 2; +} + +message MailMessage { + required string Sender = 1; + optional string ReplyTo = 2; + + repeated string To = 3; + repeated string Cc = 4; + repeated string Bcc = 5; + + required string Subject = 6; + + optional string TextBody = 7; + optional string HtmlBody = 8; + + repeated MailAttachment Attachment = 9; + + repeated MailHeader Header = 10; +} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go new file mode 100644 index 0000000..4903616 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -0,0 +1,15 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine_internal" +) + +func Main() { + appengine_internal.Main() +} diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go new file mode 100644 index 0000000..57331ad --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -0,0 +1,44 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "io" + "log" + "net/http" + "net/url" + "os" +) + +func Main() { + installHealthChecker(http.DefaultServeMux) + + port := "8080" + if s := os.Getenv("PORT"); s != "" { + port = s + } + + if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { + log.Fatalf("http.ListenAndServe: %v", err) + } +} + +func installHealthChecker(mux *http.ServeMux) { + // If no health check handler has been installed by this point, add a trivial one. + const healthPath = "/_ah/health" + hreq := &http.Request{ + Method: "GET", + URL: &url.URL{ + Path: healthPath, + }, + } + if _, pat := mux.Handler(hreq); pat != healthPath { + mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + }) + } +} diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go new file mode 100644 index 0000000..252fef8 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go @@ -0,0 +1,938 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/memcache/memcache_service.proto +// DO NOT EDIT! + +/* +Package memcache is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/memcache/memcache_service.proto + +It has these top-level messages: + MemcacheServiceError + AppOverride + MemcacheGetRequest + MemcacheGetResponse + MemcacheSetRequest + MemcacheSetResponse + MemcacheDeleteRequest + MemcacheDeleteResponse + MemcacheIncrementRequest + MemcacheIncrementResponse + MemcacheBatchIncrementRequest + MemcacheBatchIncrementResponse + MemcacheFlushRequest + MemcacheFlushResponse + MemcacheStatsRequest + MergedNamespaceStats + MemcacheStatsResponse + MemcacheGrabTailRequest + MemcacheGrabTailResponse +*/ +package memcache + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type MemcacheServiceError_ErrorCode int32 + +const ( + MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0 + MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1 + MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2 + MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3 + MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6 +) + +var MemcacheServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "UNSPECIFIED_ERROR", + 2: "NAMESPACE_NOT_SET", + 3: "PERMISSION_DENIED", + 6: "INVALID_VALUE", +} +var MemcacheServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "UNSPECIFIED_ERROR": 1, + "NAMESPACE_NOT_SET": 2, + "PERMISSION_DENIED": 3, + "INVALID_VALUE": 6, +} + +func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode { + p := new(MemcacheServiceError_ErrorCode) + *p = x + return p +} +func (x MemcacheServiceError_ErrorCode) String() string { + return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x)) +} +func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode") + if err != nil { + return err + } + *x = MemcacheServiceError_ErrorCode(value) + return nil +} + +type MemcacheSetRequest_SetPolicy int32 + +const ( + MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1 + MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2 + MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3 + MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4 +) + +var MemcacheSetRequest_SetPolicy_name = map[int32]string{ + 1: "SET", + 2: "ADD", + 3: "REPLACE", + 4: "CAS", +} +var MemcacheSetRequest_SetPolicy_value = map[string]int32{ + "SET": 1, + "ADD": 2, + "REPLACE": 3, + "CAS": 4, +} + +func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy { + p := new(MemcacheSetRequest_SetPolicy) + *p = x + return p +} +func (x MemcacheSetRequest_SetPolicy) String() string { + return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x)) +} +func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy") + if err != nil { + return err + } + *x = MemcacheSetRequest_SetPolicy(value) + return nil +} + +type MemcacheSetResponse_SetStatusCode int32 + +const ( + MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1 + MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2 + MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3 + MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4 +) + +var MemcacheSetResponse_SetStatusCode_name = map[int32]string{ + 1: "STORED", + 2: "NOT_STORED", + 3: "ERROR", + 4: "EXISTS", +} +var MemcacheSetResponse_SetStatusCode_value = map[string]int32{ + "STORED": 1, + "NOT_STORED": 2, + "ERROR": 3, + "EXISTS": 4, +} + +func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode { + p := new(MemcacheSetResponse_SetStatusCode) + *p = x + return p +} +func (x MemcacheSetResponse_SetStatusCode) String() string { + return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x)) +} +func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode") + if err != nil { + return err + } + *x = MemcacheSetResponse_SetStatusCode(value) + return nil +} + +type MemcacheDeleteResponse_DeleteStatusCode int32 + +const ( + MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1 + MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2 +) + +var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{ + 1: "DELETED", + 2: "NOT_FOUND", +} +var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{ + "DELETED": 1, + "NOT_FOUND": 2, +} + +func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode { + p := new(MemcacheDeleteResponse_DeleteStatusCode) + *p = x + return p +} +func (x MemcacheDeleteResponse_DeleteStatusCode) String() string { + return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x)) +} +func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode") + if err != nil { + return err + } + *x = MemcacheDeleteResponse_DeleteStatusCode(value) + return nil +} + +type MemcacheIncrementRequest_Direction int32 + +const ( + MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1 + MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2 +) + +var MemcacheIncrementRequest_Direction_name = map[int32]string{ + 1: "INCREMENT", + 2: "DECREMENT", +} +var MemcacheIncrementRequest_Direction_value = map[string]int32{ + "INCREMENT": 1, + "DECREMENT": 2, +} + +func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction { + p := new(MemcacheIncrementRequest_Direction) + *p = x + return p +} +func (x MemcacheIncrementRequest_Direction) String() string { + return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x)) +} +func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction") + if err != nil { + return err + } + *x = MemcacheIncrementRequest_Direction(value) + return nil +} + +type MemcacheIncrementResponse_IncrementStatusCode int32 + +const ( + MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1 + MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2 + MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3 +) + +var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{ + 1: "OK", + 2: "NOT_CHANGED", + 3: "ERROR", +} +var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{ + "OK": 1, + "NOT_CHANGED": 2, + "ERROR": 3, +} + +func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode { + p := new(MemcacheIncrementResponse_IncrementStatusCode) + *p = x + return p +} +func (x MemcacheIncrementResponse_IncrementStatusCode) String() string { + return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x)) +} +func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode") + if err != nil { + return err + } + *x = MemcacheIncrementResponse_IncrementStatusCode(value) + return nil +} + +type MemcacheServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} } +func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) } +func (*MemcacheServiceError) ProtoMessage() {} + +type AppOverride struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"` + IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"` + MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"` + MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AppOverride) Reset() { *m = AppOverride{} } +func (m *AppOverride) String() string { return proto.CompactTextString(m) } +func (*AppOverride) ProtoMessage() {} + +func (m *AppOverride) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *AppOverride) GetNumMemcachegBackends() int32 { + if m != nil && m.NumMemcachegBackends != nil { + return *m.NumMemcachegBackends + } + return 0 +} + +func (m *AppOverride) GetIgnoreShardlock() bool { + if m != nil && m.IgnoreShardlock != nil { + return *m.IgnoreShardlock + } + return false +} + +func (m *AppOverride) GetMemcachePoolHint() string { + if m != nil && m.MemcachePoolHint != nil { + return *m.MemcachePoolHint + } + return "" +} + +func (m *AppOverride) GetMemcacheShardingStrategy() []byte { + if m != nil { + return m.MemcacheShardingStrategy + } + return nil +} + +type MemcacheGetRequest struct { + Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"` + ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"` + Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} } +func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheGetRequest) ProtoMessage() {} + +func (m *MemcacheGetRequest) GetKey() [][]byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheGetRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheGetRequest) GetForCas() bool { + if m != nil && m.ForCas != nil { + return *m.ForCas + } + return false +} + +func (m *MemcacheGetRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheGetResponse struct { + Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} } +func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheGetResponse) ProtoMessage() {} + +func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item { + if m != nil { + return m.Item + } + return nil +} + +type MemcacheGetResponse_Item struct { + Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"` + CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"` + ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} } +func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) } +func (*MemcacheGetResponse_Item) ProtoMessage() {} + +func (m *MemcacheGetResponse_Item) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheGetResponse_Item) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MemcacheGetResponse_Item) GetFlags() uint32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return 0 +} + +func (m *MemcacheGetResponse_Item) GetCasId() uint64 { + if m != nil && m.CasId != nil { + return *m.CasId + } + return 0 +} + +func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 { + if m != nil && m.ExpiresInSeconds != nil { + return *m.ExpiresInSeconds + } + return 0 +} + +type MemcacheSetRequest struct { + Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` + NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"` + Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} } +func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheSetRequest) ProtoMessage() {} + +func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *MemcacheSetRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheSetRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheSetRequest_Item struct { + Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"` + SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"` + ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"` + CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"` + ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} } +func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) } +func (*MemcacheSetRequest_Item) ProtoMessage() {} + +const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET +const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0 + +func (m *MemcacheSetRequest_Item) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheSetRequest_Item) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MemcacheSetRequest_Item) GetFlags() uint32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return 0 +} + +func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy { + if m != nil && m.SetPolicy != nil { + return *m.SetPolicy + } + return Default_MemcacheSetRequest_Item_SetPolicy +} + +func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return Default_MemcacheSetRequest_Item_ExpirationTime +} + +func (m *MemcacheSetRequest_Item) GetCasId() uint64 { + if m != nil && m.CasId != nil { + return *m.CasId + } + return 0 +} + +func (m *MemcacheSetRequest_Item) GetForCas() bool { + if m != nil && m.ForCas != nil { + return *m.ForCas + } + return false +} + +type MemcacheSetResponse struct { + SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} } +func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheSetResponse) ProtoMessage() {} + +func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode { + if m != nil { + return m.SetStatus + } + return nil +} + +type MemcacheDeleteRequest struct { + Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` + NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"` + Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} } +func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheDeleteRequest) ProtoMessage() {} + +func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *MemcacheDeleteRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheDeleteRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheDeleteRequest_Item struct { + Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` + DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} } +func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) } +func (*MemcacheDeleteRequest_Item) ProtoMessage() {} + +const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0 + +func (m *MemcacheDeleteRequest_Item) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 { + if m != nil && m.DeleteTime != nil { + return *m.DeleteTime + } + return Default_MemcacheDeleteRequest_Item_DeleteTime +} + +type MemcacheDeleteResponse struct { + DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} } +func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheDeleteResponse) ProtoMessage() {} + +func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode { + if m != nil { + return m.DeleteStatus + } + return nil +} + +type MemcacheIncrementRequest struct { + Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` + NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"` + Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"` + Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"` + InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"` + InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"` + Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} } +func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheIncrementRequest) ProtoMessage() {} + +const Default_MemcacheIncrementRequest_Delta uint64 = 1 +const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT + +func (m *MemcacheIncrementRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheIncrementRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheIncrementRequest) GetDelta() uint64 { + if m != nil && m.Delta != nil { + return *m.Delta + } + return Default_MemcacheIncrementRequest_Delta +} + +func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_MemcacheIncrementRequest_Direction +} + +func (m *MemcacheIncrementRequest) GetInitialValue() uint64 { + if m != nil && m.InitialValue != nil { + return *m.InitialValue + } + return 0 +} + +func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 { + if m != nil && m.InitialFlags != nil { + return *m.InitialFlags + } + return 0 +} + +func (m *MemcacheIncrementRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheIncrementResponse struct { + NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"` + IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} } +func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheIncrementResponse) ProtoMessage() {} + +func (m *MemcacheIncrementResponse) GetNewValue() uint64 { + if m != nil && m.NewValue != nil { + return *m.NewValue + } + return 0 +} + +func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode { + if m != nil && m.IncrementStatus != nil { + return *m.IncrementStatus + } + return MemcacheIncrementResponse_OK +} + +type MemcacheBatchIncrementRequest struct { + NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"` + Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"` + Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} } +func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheBatchIncrementRequest) ProtoMessage() {} + +func (m *MemcacheBatchIncrementRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest { + if m != nil { + return m.Item + } + return nil +} + +func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheBatchIncrementResponse struct { + Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} } +func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheBatchIncrementResponse) ProtoMessage() {} + +func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse { + if m != nil { + return m.Item + } + return nil +} + +type MemcacheFlushRequest struct { + Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} } +func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheFlushRequest) ProtoMessage() {} + +func (m *MemcacheFlushRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheFlushResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} } +func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheFlushResponse) ProtoMessage() {} + +type MemcacheStatsRequest struct { + Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} } +func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheStatsRequest) ProtoMessage() {} + +func (m *MemcacheStatsRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MergedNamespaceStats struct { + Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"` + Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"` + ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"` + Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"` + Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"` + OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} } +func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) } +func (*MergedNamespaceStats) ProtoMessage() {} + +func (m *MergedNamespaceStats) GetHits() uint64 { + if m != nil && m.Hits != nil { + return *m.Hits + } + return 0 +} + +func (m *MergedNamespaceStats) GetMisses() uint64 { + if m != nil && m.Misses != nil { + return *m.Misses + } + return 0 +} + +func (m *MergedNamespaceStats) GetByteHits() uint64 { + if m != nil && m.ByteHits != nil { + return *m.ByteHits + } + return 0 +} + +func (m *MergedNamespaceStats) GetItems() uint64 { + if m != nil && m.Items != nil { + return *m.Items + } + return 0 +} + +func (m *MergedNamespaceStats) GetBytes() uint64 { + if m != nil && m.Bytes != nil { + return *m.Bytes + } + return 0 +} + +func (m *MergedNamespaceStats) GetOldestItemAge() uint32 { + if m != nil && m.OldestItemAge != nil { + return *m.OldestItemAge + } + return 0 +} + +type MemcacheStatsResponse struct { + Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} } +func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheStatsResponse) ProtoMessage() {} + +func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats { + if m != nil { + return m.Stats + } + return nil +} + +type MemcacheGrabTailRequest struct { + ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"` + NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"` + Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} } +func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheGrabTailRequest) ProtoMessage() {} + +func (m *MemcacheGrabTailRequest) GetItemCount() int32 { + if m != nil && m.ItemCount != nil { + return *m.ItemCount + } + return 0 +} + +func (m *MemcacheGrabTailRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheGrabTailResponse struct { + Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} } +func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheGrabTailResponse) ProtoMessage() {} + +func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item { + if m != nil { + return m.Item + } + return nil +} + +type MemcacheGrabTailResponse_Item struct { + Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} } +func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) } +func (*MemcacheGrabTailResponse_Item) ProtoMessage() {} + +func (m *MemcacheGrabTailResponse_Item) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return 0 +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto new file mode 100644 index 0000000..5f0edcd --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto @@ -0,0 +1,165 @@ +syntax = "proto2"; +option go_package = "memcache"; + +package appengine; + +message MemcacheServiceError { + enum ErrorCode { + OK = 0; + UNSPECIFIED_ERROR = 1; + NAMESPACE_NOT_SET = 2; + PERMISSION_DENIED = 3; + INVALID_VALUE = 6; + } +} + +message AppOverride { + required string app_id = 1; + + optional int32 num_memcacheg_backends = 2 [deprecated=true]; + optional bool ignore_shardlock = 3 [deprecated=true]; + optional string memcache_pool_hint = 4 [deprecated=true]; + optional bytes memcache_sharding_strategy = 5 [deprecated=true]; +} + +message MemcacheGetRequest { + repeated bytes key = 1; + optional string name_space = 2 [default = ""]; + optional bool for_cas = 4; + optional AppOverride override = 5; +} + +message MemcacheGetResponse { + repeated group Item = 1 { + required bytes key = 2; + required bytes value = 3; + optional fixed32 flags = 4; + optional fixed64 cas_id = 5; + optional int32 expires_in_seconds = 6; + } +} + +message MemcacheSetRequest { + enum SetPolicy { + SET = 1; + ADD = 2; + REPLACE = 3; + CAS = 4; + } + repeated group Item = 1 { + required bytes key = 2; + required bytes value = 3; + + optional fixed32 flags = 4; + optional SetPolicy set_policy = 5 [default = SET]; + optional fixed32 expiration_time = 6 [default = 0]; + + optional fixed64 cas_id = 8; + optional bool for_cas = 9; + } + optional string name_space = 7 [default = ""]; + optional AppOverride override = 10; +} + +message MemcacheSetResponse { + enum SetStatusCode { + STORED = 1; + NOT_STORED = 2; + ERROR = 3; + EXISTS = 4; + } + repeated SetStatusCode set_status = 1; +} + +message MemcacheDeleteRequest { + repeated group Item = 1 { + required bytes key = 2; + optional fixed32 delete_time = 3 [default = 0]; + } + optional string name_space = 4 [default = ""]; + optional AppOverride override = 5; +} + +message MemcacheDeleteResponse { + enum DeleteStatusCode { + DELETED = 1; + NOT_FOUND = 2; + } + repeated DeleteStatusCode delete_status = 1; +} + +message MemcacheIncrementRequest { + enum Direction { + INCREMENT = 1; + DECREMENT = 2; + } + required bytes key = 1; + optional string name_space = 4 [default = ""]; + + optional uint64 delta = 2 [default = 1]; + optional Direction direction = 3 [default = INCREMENT]; + + optional uint64 initial_value = 5; + optional fixed32 initial_flags = 6; + optional AppOverride override = 7; +} + +message MemcacheIncrementResponse { + enum IncrementStatusCode { + OK = 1; + NOT_CHANGED = 2; + ERROR = 3; + } + + optional uint64 new_value = 1; + optional IncrementStatusCode increment_status = 2; +} + +message MemcacheBatchIncrementRequest { + optional string name_space = 1 [default = ""]; + repeated MemcacheIncrementRequest item = 2; + optional AppOverride override = 3; +} + +message MemcacheBatchIncrementResponse { + repeated MemcacheIncrementResponse item = 1; +} + +message MemcacheFlushRequest { + optional AppOverride override = 1; +} + +message MemcacheFlushResponse { +} + +message MemcacheStatsRequest { + optional AppOverride override = 1; +} + +message MergedNamespaceStats { + required uint64 hits = 1; + required uint64 misses = 2; + required uint64 byte_hits = 3; + + required uint64 items = 4; + required uint64 bytes = 5; + + required fixed32 oldest_item_age = 6; +} + +message MemcacheStatsResponse { + optional MergedNamespaceStats stats = 1; +} + +message MemcacheGrabTailRequest { + required int32 item_count = 1; + optional string name_space = 2 [default = ""]; + optional AppOverride override = 3; +} + +message MemcacheGrabTailResponse { + repeated group Item = 1 { + required bytes value = 2; + optional fixed32 flags = 3; + } +} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go new file mode 100644 index 0000000..9cc1f71 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/metadata.go @@ -0,0 +1,61 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file has code for accessing metadata. +// +// References: +// https://cloud.google.com/compute/docs/metadata + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" +) + +const ( + metadataHost = "metadata" + metadataPath = "/computeMetadata/v1/" +) + +var ( + metadataRequestHeaders = http.Header{ + "Metadata-Flavor": []string{"Google"}, + } +) + +// TODO(dsymonds): Do we need to support default values, like Python? +func mustGetMetadata(key string) []byte { + b, err := getMetadata(key) + if err != nil { + log.Fatalf("Metadata fetch failed: %v", err) + } + return b +} + +func getMetadata(key string) ([]byte, error) { + // TODO(dsymonds): May need to use url.Parse to support keys with query args. + req := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: metadataHost, + Path: metadataPath + key, + }, + Header: metadataRequestHeaders, + Host: metadataHost, + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 0000000..a0145ed --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,375 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/modules/modules_service.proto +// DO NOT EDIT! + +/* +Package modules is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/modules/modules_service.proto + +It has these top-level messages: + ModulesServiceError + GetModulesRequest + GetModulesResponse + GetVersionsRequest + GetVersionsResponse + GetDefaultVersionRequest + GetDefaultVersionResponse + GetNumInstancesRequest + GetNumInstancesResponse + SetNumInstancesRequest + SetNumInstancesResponse + StartModuleRequest + StartModuleResponse + StopModuleRequest + StopModuleResponse + GetHostnameRequest + GetHostnameResponse +*/ +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} + +type ModulesServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} + +type GetModulesRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto new file mode 100644 index 0000000..d29f006 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto @@ -0,0 +1,80 @@ +syntax = "proto2"; +option go_package = "modules"; + +package appengine; + +message ModulesServiceError { + enum ErrorCode { + OK = 0; + INVALID_MODULE = 1; + INVALID_VERSION = 2; + INVALID_INSTANCES = 3; + TRANSIENT_ERROR = 4; + UNEXPECTED_STATE = 5; + } +} + +message GetModulesRequest { +} + +message GetModulesResponse { + repeated string module = 1; +} + +message GetVersionsRequest { + optional string module = 1; +} + +message GetVersionsResponse { + repeated string version = 1; +} + +message GetDefaultVersionRequest { + optional string module = 1; +} + +message GetDefaultVersionResponse { + required string version = 1; +} + +message GetNumInstancesRequest { + optional string module = 1; + optional string version = 2; +} + +message GetNumInstancesResponse { + required int64 instances = 1; +} + +message SetNumInstancesRequest { + optional string module = 1; + optional string version = 2; + required int64 instances = 3; +} + +message SetNumInstancesResponse {} + +message StartModuleRequest { + required string module = 1; + required string version = 2; +} + +message StartModuleResponse {} + +message StopModuleRequest { + optional string module = 1; + optional string version = 2; +} + +message StopModuleResponse {} + +message GetHostnameRequest { + optional string module = 1; + optional string version = 2; + optional string instance = 3; +} + +message GetHostnameResponse { + required string hostname = 1; +} + diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go new file mode 100644 index 0000000..3b94cf0 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements a network dialer that limits the number of concurrent connections. +// It is only used for API calls. + +import ( + "log" + "net" + "runtime" + "sync" + "time" +) + +var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. + +func limitRelease() { + // non-blocking + select { + case <-limitSem: + default: + // This should not normally happen. + log.Print("appengine: unbalanced limitSem release!") + } +} + +func limitDial(network, addr string) (net.Conn, error) { + limitSem <- 1 + + // Dial with a timeout in case the API host is MIA. + // The connection should normally be very fast. + conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) + if err != nil { + limitRelease() + return nil, err + } + lc := &limitConn{Conn: conn} + runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required + return lc, nil +} + +type limitConn struct { + close sync.Once + net.Conn +} + +func (lc *limitConn) Close() error { + defer lc.close.Do(func() { + limitRelease() + runtime.SetFinalizer(lc, nil) + }) + return lc.Conn.Close() +} diff --git a/vendor/google.golang.org/appengine/internal/net_test.go b/vendor/google.golang.org/appengine/internal/net_test.go new file mode 100644 index 0000000..24da8bb --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net_test.go @@ -0,0 +1,58 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "sync" + "testing" + "time" + + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" +) + +func TestDialLimit(t *testing.T) { + // Fill up semaphore with false acquisitions to permit only two TCP connections at a time. + // We don't replace limitSem because that results in a data race when net/http lazily closes connections. + nFake := cap(limitSem) - 2 + for i := 0; i < nFake; i++ { + limitSem <- 1 + } + defer func() { + for i := 0; i < nFake; i++ { + <-limitSem + } + }() + + f, c, cleanup := setup() // setup is in api_test.go + defer cleanup() + f.hang = make(chan int) + + // If we make two RunSlowly RPCs (which will wait for f.hang to be strobed), + // then the simple Non200 RPC should hang. + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func() { + defer wg.Done() + Call(toContext(c), "errors", "RunSlowly", &basepb.VoidProto{}, &basepb.VoidProto{}) + }() + } + time.Sleep(50 * time.Millisecond) // let those two RPCs start + + ctx, _ := netcontext.WithTimeout(toContext(c), 50*time.Millisecond) + err := Call(ctx, "errors", "Non200", &basepb.VoidProto{}, &basepb.VoidProto{}) + if err != errTimeout { + t.Errorf("Non200 RPC returned with err %v, want errTimeout", err) + } + + // Drain the two RunSlowly calls. + f.hang <- 1 + f.hang <- 1 + wg.Wait() +} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh new file mode 100755 index 0000000..2fdb546 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/regen.sh @@ -0,0 +1,40 @@ +#!/bin/bash -e +# +# This script rebuilds the generated code for the protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. + +PKG=google.golang.org/appengine + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +# Run protoc once per package. +for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done + +for f in $(find $PKG/internal -name '*.pb.go'); do + # Remove proto.RegisterEnum calls. + # These cause duplicate registration panics when these packages + # are used on classic App Engine. proto.RegisterEnum only affects + # parsing the text format; we don't care about that. + # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 + sed -i '/proto.RegisterEnum/d' $f +done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go new file mode 100644 index 0000000..526bd39 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go @@ -0,0 +1,231 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/remote_api/remote_api.proto +// DO NOT EDIT! + +/* +Package remote_api is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/remote_api/remote_api.proto + +It has these top-level messages: + Request + ApplicationError + RpcError + Response +*/ +package remote_api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RpcError_ErrorCode int32 + +const ( + RpcError_UNKNOWN RpcError_ErrorCode = 0 + RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 + RpcError_PARSE_ERROR RpcError_ErrorCode = 2 + RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 + RpcError_OVER_QUOTA RpcError_ErrorCode = 4 + RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 + RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 + RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 + RpcError_BAD_REQUEST RpcError_ErrorCode = 8 + RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 + RpcError_CANCELLED RpcError_ErrorCode = 10 + RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 + RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 +) + +var RpcError_ErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_FOUND", + 2: "PARSE_ERROR", + 3: "SECURITY_VIOLATION", + 4: "OVER_QUOTA", + 5: "REQUEST_TOO_LARGE", + 6: "CAPABILITY_DISABLED", + 7: "FEATURE_DISABLED", + 8: "BAD_REQUEST", + 9: "RESPONSE_TOO_LARGE", + 10: "CANCELLED", + 11: "REPLAY_ERROR", + 12: "DEADLINE_EXCEEDED", +} +var RpcError_ErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_FOUND": 1, + "PARSE_ERROR": 2, + "SECURITY_VIOLATION": 3, + "OVER_QUOTA": 4, + "REQUEST_TOO_LARGE": 5, + "CAPABILITY_DISABLED": 6, + "FEATURE_DISABLED": 7, + "BAD_REQUEST": 8, + "RESPONSE_TOO_LARGE": 9, + "CANCELLED": 10, + "REPLAY_ERROR": 11, + "DEADLINE_EXCEEDED": 12, +} + +func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { + p := new(RpcError_ErrorCode) + *p = x + return p +} +func (x RpcError_ErrorCode) String() string { + return proto.EnumName(RpcError_ErrorCode_name, int32(x)) +} +func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") + if err != nil { + return err + } + *x = RpcError_ErrorCode(value) + return nil +} + +type Request struct { + ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` + Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` + Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` + RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +func (m *Request) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *Request) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *Request) GetRequest() []byte { + if m != nil { + return m.Request + } + return nil +} + +func (m *Request) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +type ApplicationError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ApplicationError) Reset() { *m = ApplicationError{} } +func (m *ApplicationError) String() string { return proto.CompactTextString(m) } +func (*ApplicationError) ProtoMessage() {} + +func (m *ApplicationError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *ApplicationError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type RpcError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RpcError) Reset() { *m = RpcError{} } +func (m *RpcError) String() string { return proto.CompactTextString(m) } +func (*RpcError) ProtoMessage() {} + +func (m *RpcError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *RpcError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type Response struct { + Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` + JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` + RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} + +func (m *Response) GetResponse() []byte { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetException() []byte { + if m != nil { + return m.Exception + } + return nil +} + +func (m *Response) GetApplicationError() *ApplicationError { + if m != nil { + return m.ApplicationError + } + return nil +} + +func (m *Response) GetJavaException() []byte { + if m != nil { + return m.JavaException + } + return nil +} + +func (m *Response) GetRpcError() *RpcError { + if m != nil { + return m.RpcError + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto new file mode 100644 index 0000000..f21763a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto @@ -0,0 +1,44 @@ +syntax = "proto2"; +option go_package = "remote_api"; + +package remote_api; + +message Request { + required string service_name = 2; + required string method = 3; + required bytes request = 4; + optional string request_id = 5; +} + +message ApplicationError { + required int32 code = 1; + required string detail = 2; +} + +message RpcError { + enum ErrorCode { + UNKNOWN = 0; + CALL_NOT_FOUND = 1; + PARSE_ERROR = 2; + SECURITY_VIOLATION = 3; + OVER_QUOTA = 4; + REQUEST_TOO_LARGE = 5; + CAPABILITY_DISABLED = 6; + FEATURE_DISABLED = 7; + BAD_REQUEST = 8; + RESPONSE_TOO_LARGE = 9; + CANCELLED = 10; + REPLAY_ERROR = 11; + DEADLINE_EXCEEDED = 12; + } + required int32 code = 1; + optional string detail = 2; +} + +message Response { + optional bytes response = 1; + optional bytes exception = 2; + optional ApplicationError application_error = 3; + optional bytes java_exception = 4; + optional RpcError rpc_error = 5; +} diff --git a/vendor/google.golang.org/appengine/internal/search/search.pb.go b/vendor/google.golang.org/appengine/internal/search/search.pb.go new file mode 100644 index 0000000..7d8d11d --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/search/search.pb.go @@ -0,0 +1,2127 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/search/search.proto +// DO NOT EDIT! + +/* +Package search is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/search/search.proto + +It has these top-level messages: + Scope + Entry + AccessControlList + FieldValue + Field + FieldTypes + IndexShardSettings + FacetValue + Facet + DocumentMetadata + Document + SearchServiceError + RequestStatus + IndexSpec + IndexMetadata + IndexDocumentParams + IndexDocumentRequest + IndexDocumentResponse + DeleteDocumentParams + DeleteDocumentRequest + DeleteDocumentResponse + ListDocumentsParams + ListDocumentsRequest + ListDocumentsResponse + ListIndexesParams + ListIndexesRequest + ListIndexesResponse + DeleteSchemaParams + DeleteSchemaRequest + DeleteSchemaResponse + SortSpec + ScorerSpec + FieldSpec + FacetRange + FacetRequestParam + FacetAutoDetectParam + FacetRequest + FacetRefinement + SearchParams + SearchRequest + FacetResultValue + FacetResult + SearchResult + SearchResponse +*/ +package search + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Scope_Type int32 + +const ( + Scope_USER_BY_CANONICAL_ID Scope_Type = 1 + Scope_USER_BY_EMAIL Scope_Type = 2 + Scope_GROUP_BY_CANONICAL_ID Scope_Type = 3 + Scope_GROUP_BY_EMAIL Scope_Type = 4 + Scope_GROUP_BY_DOMAIN Scope_Type = 5 + Scope_ALL_USERS Scope_Type = 6 + Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7 +) + +var Scope_Type_name = map[int32]string{ + 1: "USER_BY_CANONICAL_ID", + 2: "USER_BY_EMAIL", + 3: "GROUP_BY_CANONICAL_ID", + 4: "GROUP_BY_EMAIL", + 5: "GROUP_BY_DOMAIN", + 6: "ALL_USERS", + 7: "ALL_AUTHENTICATED_USERS", +} +var Scope_Type_value = map[string]int32{ + "USER_BY_CANONICAL_ID": 1, + "USER_BY_EMAIL": 2, + "GROUP_BY_CANONICAL_ID": 3, + "GROUP_BY_EMAIL": 4, + "GROUP_BY_DOMAIN": 5, + "ALL_USERS": 6, + "ALL_AUTHENTICATED_USERS": 7, +} + +func (x Scope_Type) Enum() *Scope_Type { + p := new(Scope_Type) + *p = x + return p +} +func (x Scope_Type) String() string { + return proto.EnumName(Scope_Type_name, int32(x)) +} +func (x *Scope_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type") + if err != nil { + return err + } + *x = Scope_Type(value) + return nil +} + +type Entry_Permission int32 + +const ( + Entry_READ Entry_Permission = 1 + Entry_WRITE Entry_Permission = 2 + Entry_FULL_CONTROL Entry_Permission = 3 +) + +var Entry_Permission_name = map[int32]string{ + 1: "READ", + 2: "WRITE", + 3: "FULL_CONTROL", +} +var Entry_Permission_value = map[string]int32{ + "READ": 1, + "WRITE": 2, + "FULL_CONTROL": 3, +} + +func (x Entry_Permission) Enum() *Entry_Permission { + p := new(Entry_Permission) + *p = x + return p +} +func (x Entry_Permission) String() string { + return proto.EnumName(Entry_Permission_name, int32(x)) +} +func (x *Entry_Permission) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission") + if err != nil { + return err + } + *x = Entry_Permission(value) + return nil +} + +type FieldValue_ContentType int32 + +const ( + FieldValue_TEXT FieldValue_ContentType = 0 + FieldValue_HTML FieldValue_ContentType = 1 + FieldValue_ATOM FieldValue_ContentType = 2 + FieldValue_DATE FieldValue_ContentType = 3 + FieldValue_NUMBER FieldValue_ContentType = 4 + FieldValue_GEO FieldValue_ContentType = 5 +) + +var FieldValue_ContentType_name = map[int32]string{ + 0: "TEXT", + 1: "HTML", + 2: "ATOM", + 3: "DATE", + 4: "NUMBER", + 5: "GEO", +} +var FieldValue_ContentType_value = map[string]int32{ + "TEXT": 0, + "HTML": 1, + "ATOM": 2, + "DATE": 3, + "NUMBER": 4, + "GEO": 5, +} + +func (x FieldValue_ContentType) Enum() *FieldValue_ContentType { + p := new(FieldValue_ContentType) + *p = x + return p +} +func (x FieldValue_ContentType) String() string { + return proto.EnumName(FieldValue_ContentType_name, int32(x)) +} +func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType") + if err != nil { + return err + } + *x = FieldValue_ContentType(value) + return nil +} + +type FacetValue_ContentType int32 + +const ( + FacetValue_ATOM FacetValue_ContentType = 2 + FacetValue_NUMBER FacetValue_ContentType = 4 +) + +var FacetValue_ContentType_name = map[int32]string{ + 2: "ATOM", + 4: "NUMBER", +} +var FacetValue_ContentType_value = map[string]int32{ + "ATOM": 2, + "NUMBER": 4, +} + +func (x FacetValue_ContentType) Enum() *FacetValue_ContentType { + p := new(FacetValue_ContentType) + *p = x + return p +} +func (x FacetValue_ContentType) String() string { + return proto.EnumName(FacetValue_ContentType_name, int32(x)) +} +func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType") + if err != nil { + return err + } + *x = FacetValue_ContentType(value) + return nil +} + +type Document_Storage int32 + +const ( + Document_DISK Document_Storage = 0 +) + +var Document_Storage_name = map[int32]string{ + 0: "DISK", +} +var Document_Storage_value = map[string]int32{ + "DISK": 0, +} + +func (x Document_Storage) Enum() *Document_Storage { + p := new(Document_Storage) + *p = x + return p +} +func (x Document_Storage) String() string { + return proto.EnumName(Document_Storage_name, int32(x)) +} +func (x *Document_Storage) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage") + if err != nil { + return err + } + *x = Document_Storage(value) + return nil +} + +type SearchServiceError_ErrorCode int32 + +const ( + SearchServiceError_OK SearchServiceError_ErrorCode = 0 + SearchServiceError_INVALID_REQUEST SearchServiceError_ErrorCode = 1 + SearchServiceError_TRANSIENT_ERROR SearchServiceError_ErrorCode = 2 + SearchServiceError_INTERNAL_ERROR SearchServiceError_ErrorCode = 3 + SearchServiceError_PERMISSION_DENIED SearchServiceError_ErrorCode = 4 + SearchServiceError_TIMEOUT SearchServiceError_ErrorCode = 5 + SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6 +) + +var SearchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "TRANSIENT_ERROR", + 3: "INTERNAL_ERROR", + 4: "PERMISSION_DENIED", + 5: "TIMEOUT", + 6: "CONCURRENT_TRANSACTION", +} +var SearchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "TRANSIENT_ERROR": 2, + "INTERNAL_ERROR": 3, + "PERMISSION_DENIED": 4, + "TIMEOUT": 5, + "CONCURRENT_TRANSACTION": 6, +} + +func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode { + p := new(SearchServiceError_ErrorCode) + *p = x + return p +} +func (x SearchServiceError_ErrorCode) String() string { + return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x)) +} +func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode") + if err != nil { + return err + } + *x = SearchServiceError_ErrorCode(value) + return nil +} + +type IndexSpec_Consistency int32 + +const ( + IndexSpec_GLOBAL IndexSpec_Consistency = 0 + IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1 +) + +var IndexSpec_Consistency_name = map[int32]string{ + 0: "GLOBAL", + 1: "PER_DOCUMENT", +} +var IndexSpec_Consistency_value = map[string]int32{ + "GLOBAL": 0, + "PER_DOCUMENT": 1, +} + +func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency { + p := new(IndexSpec_Consistency) + *p = x + return p +} +func (x IndexSpec_Consistency) String() string { + return proto.EnumName(IndexSpec_Consistency_name, int32(x)) +} +func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency") + if err != nil { + return err + } + *x = IndexSpec_Consistency(value) + return nil +} + +type IndexSpec_Source int32 + +const ( + IndexSpec_SEARCH IndexSpec_Source = 0 + IndexSpec_DATASTORE IndexSpec_Source = 1 + IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2 +) + +var IndexSpec_Source_name = map[int32]string{ + 0: "SEARCH", + 1: "DATASTORE", + 2: "CLOUD_STORAGE", +} +var IndexSpec_Source_value = map[string]int32{ + "SEARCH": 0, + "DATASTORE": 1, + "CLOUD_STORAGE": 2, +} + +func (x IndexSpec_Source) Enum() *IndexSpec_Source { + p := new(IndexSpec_Source) + *p = x + return p +} +func (x IndexSpec_Source) String() string { + return proto.EnumName(IndexSpec_Source_name, int32(x)) +} +func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source") + if err != nil { + return err + } + *x = IndexSpec_Source(value) + return nil +} + +type IndexSpec_Mode int32 + +const ( + IndexSpec_PRIORITY IndexSpec_Mode = 0 + IndexSpec_BACKGROUND IndexSpec_Mode = 1 +) + +var IndexSpec_Mode_name = map[int32]string{ + 0: "PRIORITY", + 1: "BACKGROUND", +} +var IndexSpec_Mode_value = map[string]int32{ + "PRIORITY": 0, + "BACKGROUND": 1, +} + +func (x IndexSpec_Mode) Enum() *IndexSpec_Mode { + p := new(IndexSpec_Mode) + *p = x + return p +} +func (x IndexSpec_Mode) String() string { + return proto.EnumName(IndexSpec_Mode_name, int32(x)) +} +func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode") + if err != nil { + return err + } + *x = IndexSpec_Mode(value) + return nil +} + +type IndexDocumentParams_Freshness int32 + +const ( + IndexDocumentParams_SYNCHRONOUSLY IndexDocumentParams_Freshness = 0 + IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1 +) + +var IndexDocumentParams_Freshness_name = map[int32]string{ + 0: "SYNCHRONOUSLY", + 1: "WHEN_CONVENIENT", +} +var IndexDocumentParams_Freshness_value = map[string]int32{ + "SYNCHRONOUSLY": 0, + "WHEN_CONVENIENT": 1, +} + +func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness { + p := new(IndexDocumentParams_Freshness) + *p = x + return p +} +func (x IndexDocumentParams_Freshness) String() string { + return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x)) +} +func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness") + if err != nil { + return err + } + *x = IndexDocumentParams_Freshness(value) + return nil +} + +type ScorerSpec_Scorer int32 + +const ( + ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0 + ScorerSpec_MATCH_SCORER ScorerSpec_Scorer = 2 +) + +var ScorerSpec_Scorer_name = map[int32]string{ + 0: "RESCORING_MATCH_SCORER", + 2: "MATCH_SCORER", +} +var ScorerSpec_Scorer_value = map[string]int32{ + "RESCORING_MATCH_SCORER": 0, + "MATCH_SCORER": 2, +} + +func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer { + p := new(ScorerSpec_Scorer) + *p = x + return p +} +func (x ScorerSpec_Scorer) String() string { + return proto.EnumName(ScorerSpec_Scorer_name, int32(x)) +} +func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer") + if err != nil { + return err + } + *x = ScorerSpec_Scorer(value) + return nil +} + +type SearchParams_CursorType int32 + +const ( + SearchParams_NONE SearchParams_CursorType = 0 + SearchParams_SINGLE SearchParams_CursorType = 1 + SearchParams_PER_RESULT SearchParams_CursorType = 2 +) + +var SearchParams_CursorType_name = map[int32]string{ + 0: "NONE", + 1: "SINGLE", + 2: "PER_RESULT", +} +var SearchParams_CursorType_value = map[string]int32{ + "NONE": 0, + "SINGLE": 1, + "PER_RESULT": 2, +} + +func (x SearchParams_CursorType) Enum() *SearchParams_CursorType { + p := new(SearchParams_CursorType) + *p = x + return p +} +func (x SearchParams_CursorType) String() string { + return proto.EnumName(SearchParams_CursorType_name, int32(x)) +} +func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType") + if err != nil { + return err + } + *x = SearchParams_CursorType(value) + return nil +} + +type SearchParams_ParsingMode int32 + +const ( + SearchParams_STRICT SearchParams_ParsingMode = 0 + SearchParams_RELAXED SearchParams_ParsingMode = 1 +) + +var SearchParams_ParsingMode_name = map[int32]string{ + 0: "STRICT", + 1: "RELAXED", +} +var SearchParams_ParsingMode_value = map[string]int32{ + "STRICT": 0, + "RELAXED": 1, +} + +func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode { + p := new(SearchParams_ParsingMode) + *p = x + return p +} +func (x SearchParams_ParsingMode) String() string { + return proto.EnumName(SearchParams_ParsingMode_name, int32(x)) +} +func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode") + if err != nil { + return err + } + *x = SearchParams_ParsingMode(value) + return nil +} + +type Scope struct { + Type *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Scope) Reset() { *m = Scope{} } +func (m *Scope) String() string { return proto.CompactTextString(m) } +func (*Scope) ProtoMessage() {} + +func (m *Scope) GetType() Scope_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return Scope_USER_BY_CANONICAL_ID +} + +func (m *Scope) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Entry struct { + Scope *Scope `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"` + Permission *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"` + DisplayName *string `protobuf:"bytes,3,opt,name=display_name" json:"display_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} + +func (m *Entry) GetScope() *Scope { + if m != nil { + return m.Scope + } + return nil +} + +func (m *Entry) GetPermission() Entry_Permission { + if m != nil && m.Permission != nil { + return *m.Permission + } + return Entry_READ +} + +func (m *Entry) GetDisplayName() string { + if m != nil && m.DisplayName != nil { + return *m.DisplayName + } + return "" +} + +type AccessControlList struct { + Owner *string `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"` + Entries []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AccessControlList) Reset() { *m = AccessControlList{} } +func (m *AccessControlList) String() string { return proto.CompactTextString(m) } +func (*AccessControlList) ProtoMessage() {} + +func (m *AccessControlList) GetOwner() string { + if m != nil && m.Owner != nil { + return *m.Owner + } + return "" +} + +func (m *AccessControlList) GetEntries() []*Entry { + if m != nil { + return m.Entries + } + return nil +} + +type FieldValue struct { + Type *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"` + Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"` + Geo *FieldValue_Geo `protobuf:"group,4,opt,name=Geo" json:"geo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldValue) Reset() { *m = FieldValue{} } +func (m *FieldValue) String() string { return proto.CompactTextString(m) } +func (*FieldValue) ProtoMessage() {} + +const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT +const Default_FieldValue_Language string = "en" + +func (m *FieldValue) GetType() FieldValue_ContentType { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_FieldValue_Type +} + +func (m *FieldValue) GetLanguage() string { + if m != nil && m.Language != nil { + return *m.Language + } + return Default_FieldValue_Language +} + +func (m *FieldValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *FieldValue) GetGeo() *FieldValue_Geo { + if m != nil { + return m.Geo + } + return nil +} + +type FieldValue_Geo struct { + Lat *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"` + Lng *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldValue_Geo) Reset() { *m = FieldValue_Geo{} } +func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) } +func (*FieldValue_Geo) ProtoMessage() {} + +func (m *FieldValue_Geo) GetLat() float64 { + if m != nil && m.Lat != nil { + return *m.Lat + } + return 0 +} + +func (m *FieldValue_Geo) GetLng() float64 { + if m != nil && m.Lng != nil { + return *m.Lng + } + return 0 +} + +type Field struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} + +func (m *Field) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Field) GetValue() *FieldValue { + if m != nil { + return m.Value + } + return nil +} + +type FieldTypes struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Type []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldTypes) Reset() { *m = FieldTypes{} } +func (m *FieldTypes) String() string { return proto.CompactTextString(m) } +func (*FieldTypes) ProtoMessage() {} + +func (m *FieldTypes) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldTypes) GetType() []FieldValue_ContentType { + if m != nil { + return m.Type + } + return nil +} + +type IndexShardSettings struct { + PrevNumShards []int32 `protobuf:"varint,1,rep,name=prev_num_shards" json:"prev_num_shards,omitempty"` + NumShards *int32 `protobuf:"varint,2,req,name=num_shards,def=1" json:"num_shards,omitempty"` + PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false" json:"prev_num_shards_search_false,omitempty"` + LocalReplica *string `protobuf:"bytes,4,opt,name=local_replica,def=" json:"local_replica,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexShardSettings) Reset() { *m = IndexShardSettings{} } +func (m *IndexShardSettings) String() string { return proto.CompactTextString(m) } +func (*IndexShardSettings) ProtoMessage() {} + +const Default_IndexShardSettings_NumShards int32 = 1 + +func (m *IndexShardSettings) GetPrevNumShards() []int32 { + if m != nil { + return m.PrevNumShards + } + return nil +} + +func (m *IndexShardSettings) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return Default_IndexShardSettings_NumShards +} + +func (m *IndexShardSettings) GetPrevNumShardsSearchFalse() []int32 { + if m != nil { + return m.PrevNumShardsSearchFalse + } + return nil +} + +func (m *IndexShardSettings) GetLocalReplica() string { + if m != nil && m.LocalReplica != nil { + return *m.LocalReplica + } + return "" +} + +type FacetValue struct { + Type *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetValue) Reset() { *m = FacetValue{} } +func (m *FacetValue) String() string { return proto.CompactTextString(m) } +func (*FacetValue) ProtoMessage() {} + +const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM + +func (m *FacetValue) GetType() FacetValue_ContentType { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_FacetValue_Type +} + +func (m *FacetValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +type Facet struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Facet) Reset() { *m = Facet{} } +func (m *Facet) String() string { return proto.CompactTextString(m) } +func (*Facet) ProtoMessage() {} + +func (m *Facet) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Facet) GetValue() *FacetValue { + if m != nil { + return m.Value + } + return nil +} + +type DocumentMetadata struct { + Version *int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` + CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version" json:"committed_st_version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DocumentMetadata) Reset() { *m = DocumentMetadata{} } +func (m *DocumentMetadata) String() string { return proto.CompactTextString(m) } +func (*DocumentMetadata) ProtoMessage() {} + +func (m *DocumentMetadata) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func (m *DocumentMetadata) GetCommittedStVersion() int64 { + if m != nil && m.CommittedStVersion != nil { + return *m.CommittedStVersion + } + return 0 +} + +type Document struct { + Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"` + Field []*Field `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"` + OrderId *int32 `protobuf:"varint,4,opt,name=order_id" json:"order_id,omitempty"` + Storage *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"` + Facet []*Facet `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} + +const Default_Document_Language string = "en" +const Default_Document_Storage Document_Storage = Document_DISK + +func (m *Document) GetId() string { + if m != nil && m.Id != nil { + return *m.Id + } + return "" +} + +func (m *Document) GetLanguage() string { + if m != nil && m.Language != nil { + return *m.Language + } + return Default_Document_Language +} + +func (m *Document) GetField() []*Field { + if m != nil { + return m.Field + } + return nil +} + +func (m *Document) GetOrderId() int32 { + if m != nil && m.OrderId != nil { + return *m.OrderId + } + return 0 +} + +func (m *Document) GetStorage() Document_Storage { + if m != nil && m.Storage != nil { + return *m.Storage + } + return Default_Document_Storage +} + +func (m *Document) GetFacet() []*Facet { + if m != nil { + return m.Facet + } + return nil +} + +type SearchServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchServiceError) Reset() { *m = SearchServiceError{} } +func (m *SearchServiceError) String() string { return proto.CompactTextString(m) } +func (*SearchServiceError) ProtoMessage() {} + +type RequestStatus struct { + Code *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"` + CanonicalCode *int32 `protobuf:"varint,3,opt,name=canonical_code" json:"canonical_code,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequestStatus) Reset() { *m = RequestStatus{} } +func (m *RequestStatus) String() string { return proto.CompactTextString(m) } +func (*RequestStatus) ProtoMessage() {} + +func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode { + if m != nil && m.Code != nil { + return *m.Code + } + return SearchServiceError_OK +} + +func (m *RequestStatus) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +func (m *RequestStatus) GetCanonicalCode() int32 { + if m != nil && m.CanonicalCode != nil { + return *m.CanonicalCode + } + return 0 +} + +type IndexSpec struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Consistency *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"` + Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + Version *int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + Source *IndexSpec_Source `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` + Mode *IndexSpec_Mode `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexSpec) Reset() { *m = IndexSpec{} } +func (m *IndexSpec) String() string { return proto.CompactTextString(m) } +func (*IndexSpec) ProtoMessage() {} + +const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT +const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH +const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY + +func (m *IndexSpec) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *IndexSpec) GetConsistency() IndexSpec_Consistency { + if m != nil && m.Consistency != nil { + return *m.Consistency + } + return Default_IndexSpec_Consistency +} + +func (m *IndexSpec) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +func (m *IndexSpec) GetVersion() int32 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func (m *IndexSpec) GetSource() IndexSpec_Source { + if m != nil && m.Source != nil { + return *m.Source + } + return Default_IndexSpec_Source +} + +func (m *IndexSpec) GetMode() IndexSpec_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_IndexSpec_Mode +} + +type IndexMetadata struct { + IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` + Field []*FieldTypes `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Storage *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexMetadata) Reset() { *m = IndexMetadata{} } +func (m *IndexMetadata) String() string { return proto.CompactTextString(m) } +func (*IndexMetadata) ProtoMessage() {} + +func (m *IndexMetadata) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +func (m *IndexMetadata) GetField() []*FieldTypes { + if m != nil { + return m.Field + } + return nil +} + +func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage { + if m != nil { + return m.Storage + } + return nil +} + +type IndexMetadata_Storage struct { + AmountUsed *int64 `protobuf:"varint,1,opt,name=amount_used" json:"amount_used,omitempty"` + Limit *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexMetadata_Storage) Reset() { *m = IndexMetadata_Storage{} } +func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) } +func (*IndexMetadata_Storage) ProtoMessage() {} + +func (m *IndexMetadata_Storage) GetAmountUsed() int64 { + if m != nil && m.AmountUsed != nil { + return *m.AmountUsed + } + return 0 +} + +func (m *IndexMetadata_Storage) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type IndexDocumentParams struct { + Document []*Document `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"` + Freshness *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"` + IndexSpec *IndexSpec `protobuf:"bytes,3,req,name=index_spec" json:"index_spec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexDocumentParams) Reset() { *m = IndexDocumentParams{} } +func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) } +func (*IndexDocumentParams) ProtoMessage() {} + +const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY + +func (m *IndexDocumentParams) GetDocument() []*Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness { + if m != nil && m.Freshness != nil { + return *m.Freshness + } + return Default_IndexDocumentParams_Freshness +} + +func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +type IndexDocumentRequest struct { + Params *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } +func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*IndexDocumentRequest) ProtoMessage() {} + +func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *IndexDocumentRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type IndexDocumentResponse struct { + Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` + DocId []string `protobuf:"bytes,2,rep,name=doc_id" json:"doc_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } +func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } +func (*IndexDocumentResponse) ProtoMessage() {} + +func (m *IndexDocumentResponse) GetStatus() []*RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *IndexDocumentResponse) GetDocId() []string { + if m != nil { + return m.DocId + } + return nil +} + +type DeleteDocumentParams struct { + DocId []string `protobuf:"bytes,1,rep,name=doc_id" json:"doc_id,omitempty"` + IndexSpec *IndexSpec `protobuf:"bytes,2,req,name=index_spec" json:"index_spec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteDocumentParams) Reset() { *m = DeleteDocumentParams{} } +func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentParams) ProtoMessage() {} + +func (m *DeleteDocumentParams) GetDocId() []string { + if m != nil { + return m.DocId + } + return nil +} + +func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +type DeleteDocumentRequest struct { + Params *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } +func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentRequest) ProtoMessage() {} + +func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *DeleteDocumentRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type DeleteDocumentResponse struct { + Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } +func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentResponse) ProtoMessage() {} + +func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +type ListDocumentsParams struct { + IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` + StartDocId *string `protobuf:"bytes,2,opt,name=start_doc_id" json:"start_doc_id,omitempty"` + IncludeStartDoc *bool `protobuf:"varint,3,opt,name=include_start_doc,def=1" json:"include_start_doc,omitempty"` + Limit *int32 `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,5,opt,name=keys_only" json:"keys_only,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListDocumentsParams) Reset() { *m = ListDocumentsParams{} } +func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) } +func (*ListDocumentsParams) ProtoMessage() {} + +const Default_ListDocumentsParams_IncludeStartDoc bool = true +const Default_ListDocumentsParams_Limit int32 = 100 + +func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +func (m *ListDocumentsParams) GetStartDocId() string { + if m != nil && m.StartDocId != nil { + return *m.StartDocId + } + return "" +} + +func (m *ListDocumentsParams) GetIncludeStartDoc() bool { + if m != nil && m.IncludeStartDoc != nil { + return *m.IncludeStartDoc + } + return Default_ListDocumentsParams_IncludeStartDoc +} + +func (m *ListDocumentsParams) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return Default_ListDocumentsParams_Limit +} + +func (m *ListDocumentsParams) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +type ListDocumentsRequest struct { + Params *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,2,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} } +func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) } +func (*ListDocumentsRequest) ProtoMessage() {} + +func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *ListDocumentsRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type ListDocumentsResponse struct { + Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + Document []*Document `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} } +func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) } +func (*ListDocumentsResponse) ProtoMessage() {} + +func (m *ListDocumentsResponse) GetStatus() *RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *ListDocumentsResponse) GetDocument() []*Document { + if m != nil { + return m.Document + } + return nil +} + +type ListIndexesParams struct { + FetchSchema *bool `protobuf:"varint,1,opt,name=fetch_schema" json:"fetch_schema,omitempty"` + Limit *int32 `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"` + Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + StartIndexName *string `protobuf:"bytes,4,opt,name=start_index_name" json:"start_index_name,omitempty"` + IncludeStartIndex *bool `protobuf:"varint,5,opt,name=include_start_index,def=1" json:"include_start_index,omitempty"` + IndexNamePrefix *string `protobuf:"bytes,6,opt,name=index_name_prefix" json:"index_name_prefix,omitempty"` + Offset *int32 `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"` + Source *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListIndexesParams) Reset() { *m = ListIndexesParams{} } +func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) } +func (*ListIndexesParams) ProtoMessage() {} + +const Default_ListIndexesParams_Limit int32 = 20 +const Default_ListIndexesParams_IncludeStartIndex bool = true +const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH + +func (m *ListIndexesParams) GetFetchSchema() bool { + if m != nil && m.FetchSchema != nil { + return *m.FetchSchema + } + return false +} + +func (m *ListIndexesParams) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return Default_ListIndexesParams_Limit +} + +func (m *ListIndexesParams) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +func (m *ListIndexesParams) GetStartIndexName() string { + if m != nil && m.StartIndexName != nil { + return *m.StartIndexName + } + return "" +} + +func (m *ListIndexesParams) GetIncludeStartIndex() bool { + if m != nil && m.IncludeStartIndex != nil { + return *m.IncludeStartIndex + } + return Default_ListIndexesParams_IncludeStartIndex +} + +func (m *ListIndexesParams) GetIndexNamePrefix() string { + if m != nil && m.IndexNamePrefix != nil { + return *m.IndexNamePrefix + } + return "" +} + +func (m *ListIndexesParams) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *ListIndexesParams) GetSource() IndexSpec_Source { + if m != nil && m.Source != nil { + return *m.Source + } + return Default_ListIndexesParams_Source +} + +type ListIndexesRequest struct { + Params *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListIndexesRequest) Reset() { *m = ListIndexesRequest{} } +func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) } +func (*ListIndexesRequest) ProtoMessage() {} + +func (m *ListIndexesRequest) GetParams() *ListIndexesParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *ListIndexesRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type ListIndexesResponse struct { + Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + IndexMetadata []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata" json:"index_metadata,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListIndexesResponse) Reset() { *m = ListIndexesResponse{} } +func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) } +func (*ListIndexesResponse) ProtoMessage() {} + +func (m *ListIndexesResponse) GetStatus() *RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata { + if m != nil { + return m.IndexMetadata + } + return nil +} + +type DeleteSchemaParams struct { + Source *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` + IndexSpec []*IndexSpec `protobuf:"bytes,2,rep,name=index_spec" json:"index_spec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSchemaParams) Reset() { *m = DeleteSchemaParams{} } +func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) } +func (*DeleteSchemaParams) ProtoMessage() {} + +const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH + +func (m *DeleteSchemaParams) GetSource() IndexSpec_Source { + if m != nil && m.Source != nil { + return *m.Source + } + return Default_DeleteSchemaParams_Source +} + +func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +type DeleteSchemaRequest struct { + Params *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSchemaRequest) Reset() { *m = DeleteSchemaRequest{} } +func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSchemaRequest) ProtoMessage() {} + +func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *DeleteSchemaRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type DeleteSchemaResponse struct { + Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSchemaResponse) Reset() { *m = DeleteSchemaResponse{} } +func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSchemaResponse) ProtoMessage() {} + +func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +type SortSpec struct { + SortExpression *string `protobuf:"bytes,1,req,name=sort_expression" json:"sort_expression,omitempty"` + SortDescending *bool `protobuf:"varint,2,opt,name=sort_descending,def=1" json:"sort_descending,omitempty"` + DefaultValueText *string `protobuf:"bytes,4,opt,name=default_value_text" json:"default_value_text,omitempty"` + DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric" json:"default_value_numeric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SortSpec) Reset() { *m = SortSpec{} } +func (m *SortSpec) String() string { return proto.CompactTextString(m) } +func (*SortSpec) ProtoMessage() {} + +const Default_SortSpec_SortDescending bool = true + +func (m *SortSpec) GetSortExpression() string { + if m != nil && m.SortExpression != nil { + return *m.SortExpression + } + return "" +} + +func (m *SortSpec) GetSortDescending() bool { + if m != nil && m.SortDescending != nil { + return *m.SortDescending + } + return Default_SortSpec_SortDescending +} + +func (m *SortSpec) GetDefaultValueText() string { + if m != nil && m.DefaultValueText != nil { + return *m.DefaultValueText + } + return "" +} + +func (m *SortSpec) GetDefaultValueNumeric() float64 { + if m != nil && m.DefaultValueNumeric != nil { + return *m.DefaultValueNumeric + } + return 0 +} + +type ScorerSpec struct { + Scorer *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"` + Limit *int32 `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"` + MatchScorerParameters *string `protobuf:"bytes,9,opt,name=match_scorer_parameters" json:"match_scorer_parameters,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ScorerSpec) Reset() { *m = ScorerSpec{} } +func (m *ScorerSpec) String() string { return proto.CompactTextString(m) } +func (*ScorerSpec) ProtoMessage() {} + +const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER +const Default_ScorerSpec_Limit int32 = 1000 + +func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer { + if m != nil && m.Scorer != nil { + return *m.Scorer + } + return Default_ScorerSpec_Scorer +} + +func (m *ScorerSpec) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return Default_ScorerSpec_Limit +} + +func (m *ScorerSpec) GetMatchScorerParameters() string { + if m != nil && m.MatchScorerParameters != nil { + return *m.MatchScorerParameters + } + return "" +} + +type FieldSpec struct { + Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"` + Expression []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression" json:"expression,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldSpec) Reset() { *m = FieldSpec{} } +func (m *FieldSpec) String() string { return proto.CompactTextString(m) } +func (*FieldSpec) ProtoMessage() {} + +func (m *FieldSpec) GetName() []string { + if m != nil { + return m.Name + } + return nil +} + +func (m *FieldSpec) GetExpression() []*FieldSpec_Expression { + if m != nil { + return m.Expression + } + return nil +} + +type FieldSpec_Expression struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Expression *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldSpec_Expression) Reset() { *m = FieldSpec_Expression{} } +func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) } +func (*FieldSpec_Expression) ProtoMessage() {} + +func (m *FieldSpec_Expression) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldSpec_Expression) GetExpression() string { + if m != nil && m.Expression != nil { + return *m.Expression + } + return "" +} + +type FacetRange struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Start *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"` + End *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRange) Reset() { *m = FacetRange{} } +func (m *FacetRange) String() string { return proto.CompactTextString(m) } +func (*FacetRange) ProtoMessage() {} + +func (m *FacetRange) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetRange) GetStart() string { + if m != nil && m.Start != nil { + return *m.Start + } + return "" +} + +func (m *FacetRange) GetEnd() string { + if m != nil && m.End != nil { + return *m.End + } + return "" +} + +type FacetRequestParam struct { + ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit" json:"value_limit,omitempty"` + Range []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"` + ValueConstraint []string `protobuf:"bytes,3,rep,name=value_constraint" json:"value_constraint,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRequestParam) Reset() { *m = FacetRequestParam{} } +func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) } +func (*FacetRequestParam) ProtoMessage() {} + +func (m *FacetRequestParam) GetValueLimit() int32 { + if m != nil && m.ValueLimit != nil { + return *m.ValueLimit + } + return 0 +} + +func (m *FacetRequestParam) GetRange() []*FacetRange { + if m != nil { + return m.Range + } + return nil +} + +func (m *FacetRequestParam) GetValueConstraint() []string { + if m != nil { + return m.ValueConstraint + } + return nil +} + +type FacetAutoDetectParam struct { + ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,def=10" json:"value_limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetAutoDetectParam) Reset() { *m = FacetAutoDetectParam{} } +func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) } +func (*FacetAutoDetectParam) ProtoMessage() {} + +const Default_FacetAutoDetectParam_ValueLimit int32 = 10 + +func (m *FacetAutoDetectParam) GetValueLimit() int32 { + if m != nil && m.ValueLimit != nil { + return *m.ValueLimit + } + return Default_FacetAutoDetectParam_ValueLimit +} + +type FacetRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Params *FacetRequestParam `protobuf:"bytes,2,opt,name=params" json:"params,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRequest) Reset() { *m = FacetRequest{} } +func (m *FacetRequest) String() string { return proto.CompactTextString(m) } +func (*FacetRequest) ProtoMessage() {} + +func (m *FacetRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetRequest) GetParams() *FacetRequestParam { + if m != nil { + return m.Params + } + return nil +} + +type FacetRefinement struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Range *FacetRefinement_Range `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRefinement) Reset() { *m = FacetRefinement{} } +func (m *FacetRefinement) String() string { return proto.CompactTextString(m) } +func (*FacetRefinement) ProtoMessage() {} + +func (m *FacetRefinement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetRefinement) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func (m *FacetRefinement) GetRange() *FacetRefinement_Range { + if m != nil { + return m.Range + } + return nil +} + +type FacetRefinement_Range struct { + Start *string `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"` + End *string `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRefinement_Range) Reset() { *m = FacetRefinement_Range{} } +func (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) } +func (*FacetRefinement_Range) ProtoMessage() {} + +func (m *FacetRefinement_Range) GetStart() string { + if m != nil && m.Start != nil { + return *m.Start + } + return "" +} + +func (m *FacetRefinement_Range) GetEnd() string { + if m != nil && m.End != nil { + return *m.End + } + return "" +} + +type SearchParams struct { + IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` + Query *string `protobuf:"bytes,2,req,name=query" json:"query,omitempty"` + Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"` + Offset *int32 `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"` + CursorType *SearchParams_CursorType `protobuf:"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"` + Limit *int32 `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"` + MatchedCountAccuracy *int32 `protobuf:"varint,7,opt,name=matched_count_accuracy" json:"matched_count_accuracy,omitempty"` + SortSpec []*SortSpec `protobuf:"bytes,8,rep,name=sort_spec" json:"sort_spec,omitempty"` + ScorerSpec *ScorerSpec `protobuf:"bytes,9,opt,name=scorer_spec" json:"scorer_spec,omitempty"` + FieldSpec *FieldSpec `protobuf:"bytes,10,opt,name=field_spec" json:"field_spec,omitempty"` + KeysOnly *bool `protobuf:"varint,12,opt,name=keys_only" json:"keys_only,omitempty"` + ParsingMode *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"` + AutoDiscoverFacetCount *int32 `protobuf:"varint,15,opt,name=auto_discover_facet_count,def=0" json:"auto_discover_facet_count,omitempty"` + IncludeFacet []*FacetRequest `protobuf:"bytes,16,rep,name=include_facet" json:"include_facet,omitempty"` + FacetRefinement []*FacetRefinement `protobuf:"bytes,17,rep,name=facet_refinement" json:"facet_refinement,omitempty"` + FacetAutoDetectParam *FacetAutoDetectParam `protobuf:"bytes,18,opt,name=facet_auto_detect_param" json:"facet_auto_detect_param,omitempty"` + FacetDepth *int32 `protobuf:"varint,19,opt,name=facet_depth,def=1000" json:"facet_depth,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchParams) Reset() { *m = SearchParams{} } +func (m *SearchParams) String() string { return proto.CompactTextString(m) } +func (*SearchParams) ProtoMessage() {} + +const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE +const Default_SearchParams_Limit int32 = 20 +const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT +const Default_SearchParams_AutoDiscoverFacetCount int32 = 0 +const Default_SearchParams_FacetDepth int32 = 1000 + +func (m *SearchParams) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +func (m *SearchParams) GetQuery() string { + if m != nil && m.Query != nil { + return *m.Query + } + return "" +} + +func (m *SearchParams) GetCursor() string { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return "" +} + +func (m *SearchParams) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *SearchParams) GetCursorType() SearchParams_CursorType { + if m != nil && m.CursorType != nil { + return *m.CursorType + } + return Default_SearchParams_CursorType +} + +func (m *SearchParams) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return Default_SearchParams_Limit +} + +func (m *SearchParams) GetMatchedCountAccuracy() int32 { + if m != nil && m.MatchedCountAccuracy != nil { + return *m.MatchedCountAccuracy + } + return 0 +} + +func (m *SearchParams) GetSortSpec() []*SortSpec { + if m != nil { + return m.SortSpec + } + return nil +} + +func (m *SearchParams) GetScorerSpec() *ScorerSpec { + if m != nil { + return m.ScorerSpec + } + return nil +} + +func (m *SearchParams) GetFieldSpec() *FieldSpec { + if m != nil { + return m.FieldSpec + } + return nil +} + +func (m *SearchParams) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode { + if m != nil && m.ParsingMode != nil { + return *m.ParsingMode + } + return Default_SearchParams_ParsingMode +} + +func (m *SearchParams) GetAutoDiscoverFacetCount() int32 { + if m != nil && m.AutoDiscoverFacetCount != nil { + return *m.AutoDiscoverFacetCount + } + return Default_SearchParams_AutoDiscoverFacetCount +} + +func (m *SearchParams) GetIncludeFacet() []*FacetRequest { + if m != nil { + return m.IncludeFacet + } + return nil +} + +func (m *SearchParams) GetFacetRefinement() []*FacetRefinement { + if m != nil { + return m.FacetRefinement + } + return nil +} + +func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam { + if m != nil { + return m.FacetAutoDetectParam + } + return nil +} + +func (m *SearchParams) GetFacetDepth() int32 { + if m != nil && m.FacetDepth != nil { + return *m.FacetDepth + } + return Default_SearchParams_FacetDepth +} + +type SearchRequest struct { + Params *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} + +func (m *SearchRequest) GetParams() *SearchParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *SearchRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type FacetResultValue struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,2,req,name=count" json:"count,omitempty"` + Refinement *FacetRefinement `protobuf:"bytes,3,req,name=refinement" json:"refinement,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetResultValue) Reset() { *m = FacetResultValue{} } +func (m *FacetResultValue) String() string { return proto.CompactTextString(m) } +func (*FacetResultValue) ProtoMessage() {} + +func (m *FacetResultValue) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetResultValue) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *FacetResultValue) GetRefinement() *FacetRefinement { + if m != nil { + return m.Refinement + } + return nil +} + +type FacetResult struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value []*FacetResultValue `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetResult) Reset() { *m = FacetResult{} } +func (m *FacetResult) String() string { return proto.CompactTextString(m) } +func (*FacetResult) ProtoMessage() {} + +func (m *FacetResult) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetResult) GetValue() []*FacetResultValue { + if m != nil { + return m.Value + } + return nil +} + +type SearchResult struct { + Document *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"` + Expression []*Field `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"` + Score []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"` + Cursor *string `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchResult) Reset() { *m = SearchResult{} } +func (m *SearchResult) String() string { return proto.CompactTextString(m) } +func (*SearchResult) ProtoMessage() {} + +func (m *SearchResult) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *SearchResult) GetExpression() []*Field { + if m != nil { + return m.Expression + } + return nil +} + +func (m *SearchResult) GetScore() []float64 { + if m != nil { + return m.Score + } + return nil +} + +func (m *SearchResult) GetCursor() string { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return "" +} + +type SearchResponse struct { + Result []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"` + MatchedCount *int64 `protobuf:"varint,2,req,name=matched_count" json:"matched_count,omitempty"` + Status *RequestStatus `protobuf:"bytes,3,req,name=status" json:"status,omitempty"` + Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"` + FacetResult []*FacetResult `protobuf:"bytes,5,rep,name=facet_result" json:"facet_result,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} + +var extRange_SearchResponse = []proto.ExtensionRange{ + {1000, 9999}, +} + +func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_SearchResponse +} +func (m *SearchResponse) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *SearchResponse) GetResult() []*SearchResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *SearchResponse) GetMatchedCount() int64 { + if m != nil && m.MatchedCount != nil { + return *m.MatchedCount + } + return 0 +} + +func (m *SearchResponse) GetStatus() *RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *SearchResponse) GetCursor() string { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return "" +} + +func (m *SearchResponse) GetFacetResult() []*FacetResult { + if m != nil { + return m.FacetResult + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/search/search.proto b/vendor/google.golang.org/appengine/internal/search/search.proto new file mode 100644 index 0000000..219f4c3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/search/search.proto @@ -0,0 +1,388 @@ +syntax = "proto2"; +option go_package = "search"; + +package search; + +message Scope { + enum Type { + USER_BY_CANONICAL_ID = 1; + USER_BY_EMAIL = 2; + GROUP_BY_CANONICAL_ID = 3; + GROUP_BY_EMAIL = 4; + GROUP_BY_DOMAIN = 5; + ALL_USERS = 6; + ALL_AUTHENTICATED_USERS = 7; + } + + optional Type type = 1; + optional string value = 2; +} + +message Entry { + enum Permission { + READ = 1; + WRITE = 2; + FULL_CONTROL = 3; + } + + optional Scope scope = 1; + optional Permission permission = 2; + optional string display_name = 3; +} + +message AccessControlList { + optional string owner = 1; + repeated Entry entries = 2; +} + +message FieldValue { + enum ContentType { + TEXT = 0; + HTML = 1; + ATOM = 2; + DATE = 3; + NUMBER = 4; + GEO = 5; + } + + optional ContentType type = 1 [default = TEXT]; + + optional string language = 2 [default = "en"]; + + optional string string_value = 3; + + optional group Geo = 4 { + required double lat = 5; + required double lng = 6; + } +} + +message Field { + required string name = 1; + required FieldValue value = 2; +} + +message FieldTypes { + required string name = 1; + repeated FieldValue.ContentType type = 2; +} + +message IndexShardSettings { + repeated int32 prev_num_shards = 1; + required int32 num_shards = 2 [default=1]; + repeated int32 prev_num_shards_search_false = 3; + optional string local_replica = 4 [default = ""]; +} + +message FacetValue { + enum ContentType { + ATOM = 2; + NUMBER = 4; + } + + optional ContentType type = 1 [default = ATOM]; + optional string string_value = 3; +} + +message Facet { + required string name = 1; + required FacetValue value = 2; +} + +message DocumentMetadata { + optional int64 version = 1; + optional int64 committed_st_version = 2; +} + +message Document { + optional string id = 1; + optional string language = 2 [default = "en"]; + repeated Field field = 3; + optional int32 order_id = 4; + + enum Storage { + DISK = 0; + } + + optional Storage storage = 5 [default = DISK]; + repeated Facet facet = 8; +} + +message SearchServiceError { + enum ErrorCode { + OK = 0; + INVALID_REQUEST = 1; + TRANSIENT_ERROR = 2; + INTERNAL_ERROR = 3; + PERMISSION_DENIED = 4; + TIMEOUT = 5; + CONCURRENT_TRANSACTION = 6; + } +} + +message RequestStatus { + required SearchServiceError.ErrorCode code = 1; + optional string error_detail = 2; + optional int32 canonical_code = 3; +} + +message IndexSpec { + required string name = 1; + + enum Consistency { + GLOBAL = 0; + PER_DOCUMENT = 1; + } + optional Consistency consistency = 2 [default = PER_DOCUMENT]; + + optional string namespace = 3; + optional int32 version = 4; + + enum Source { + SEARCH = 0; + DATASTORE = 1; + CLOUD_STORAGE = 2; + } + optional Source source = 5 [default = SEARCH]; + + enum Mode { + PRIORITY = 0; + BACKGROUND = 1; + } + optional Mode mode = 6 [default = PRIORITY]; +} + +message IndexMetadata { + required IndexSpec index_spec = 1; + + repeated FieldTypes field = 2; + + message Storage { + optional int64 amount_used = 1; + optional int64 limit = 2; + } + optional Storage storage = 3; +} + +message IndexDocumentParams { + repeated Document document = 1; + + enum Freshness { + SYNCHRONOUSLY = 0; + WHEN_CONVENIENT = 1; + } + optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true]; + + required IndexSpec index_spec = 3; +} + +message IndexDocumentRequest { + required IndexDocumentParams params = 1; + + optional bytes app_id = 3; +} + +message IndexDocumentResponse { + repeated RequestStatus status = 1; + + repeated string doc_id = 2; +} + +message DeleteDocumentParams { + repeated string doc_id = 1; + + required IndexSpec index_spec = 2; +} + +message DeleteDocumentRequest { + required DeleteDocumentParams params = 1; + + optional bytes app_id = 3; +} + +message DeleteDocumentResponse { + repeated RequestStatus status = 1; +} + +message ListDocumentsParams { + required IndexSpec index_spec = 1; + optional string start_doc_id = 2; + optional bool include_start_doc = 3 [default = true]; + optional int32 limit = 4 [default = 100]; + optional bool keys_only = 5; +} + +message ListDocumentsRequest { + required ListDocumentsParams params = 1; + + optional bytes app_id = 2; +} + +message ListDocumentsResponse { + required RequestStatus status = 1; + + repeated Document document = 2; +} + +message ListIndexesParams { + optional bool fetch_schema = 1; + optional int32 limit = 2 [default = 20]; + optional string namespace = 3; + optional string start_index_name = 4; + optional bool include_start_index = 5 [default = true]; + optional string index_name_prefix = 6; + optional int32 offset = 7; + optional IndexSpec.Source source = 8 [default = SEARCH]; +} + +message ListIndexesRequest { + required ListIndexesParams params = 1; + + optional bytes app_id = 3; +} + +message ListIndexesResponse { + required RequestStatus status = 1; + repeated IndexMetadata index_metadata = 2; +} + +message DeleteSchemaParams { + optional IndexSpec.Source source = 1 [default = SEARCH]; + repeated IndexSpec index_spec = 2; +} + +message DeleteSchemaRequest { + required DeleteSchemaParams params = 1; + + optional bytes app_id = 3; +} + +message DeleteSchemaResponse { + repeated RequestStatus status = 1; +} + +message SortSpec { + required string sort_expression = 1; + optional bool sort_descending = 2 [default = true]; + optional string default_value_text = 4; + optional double default_value_numeric = 5; +} + +message ScorerSpec { + enum Scorer { + RESCORING_MATCH_SCORER = 0; + MATCH_SCORER = 2; + } + optional Scorer scorer = 1 [default = MATCH_SCORER]; + + optional int32 limit = 2 [default = 1000]; + optional string match_scorer_parameters = 9; +} + +message FieldSpec { + repeated string name = 1; + + repeated group Expression = 2 { + required string name = 3; + required string expression = 4; + } +} + +message FacetRange { + optional string name = 1; + optional string start = 2; + optional string end = 3; +} + +message FacetRequestParam { + optional int32 value_limit = 1; + repeated FacetRange range = 2; + repeated string value_constraint = 3; +} + +message FacetAutoDetectParam { + optional int32 value_limit = 1 [default = 10]; +} + +message FacetRequest { + required string name = 1; + optional FacetRequestParam params = 2; +} + +message FacetRefinement { + required string name = 1; + optional string value = 2; + + message Range { + optional string start = 1; + optional string end = 2; + } + optional Range range = 3; +} + +message SearchParams { + required IndexSpec index_spec = 1; + required string query = 2; + optional string cursor = 4; + optional int32 offset = 11; + + enum CursorType { + NONE = 0; + SINGLE = 1; + PER_RESULT = 2; + } + optional CursorType cursor_type = 5 [default = NONE]; + + optional int32 limit = 6 [default = 20]; + optional int32 matched_count_accuracy = 7; + repeated SortSpec sort_spec = 8; + optional ScorerSpec scorer_spec = 9; + optional FieldSpec field_spec = 10; + optional bool keys_only = 12; + + enum ParsingMode { + STRICT = 0; + RELAXED = 1; + } + optional ParsingMode parsing_mode = 13 [default = STRICT]; + + optional int32 auto_discover_facet_count = 15 [default = 0]; + repeated FacetRequest include_facet = 16; + repeated FacetRefinement facet_refinement = 17; + optional FacetAutoDetectParam facet_auto_detect_param = 18; + optional int32 facet_depth = 19 [default=1000]; +} + +message SearchRequest { + required SearchParams params = 1; + + optional bytes app_id = 3; +} + +message FacetResultValue { + required string name = 1; + required int32 count = 2; + required FacetRefinement refinement = 3; +} + +message FacetResult { + required string name = 1; + repeated FacetResultValue value = 2; +} + +message SearchResult { + required Document document = 1; + repeated Field expression = 4; + repeated double score = 2; + optional string cursor = 3; +} + +message SearchResponse { + repeated SearchResult result = 1; + required int64 matched_count = 2; + required RequestStatus status = 3; + optional string cursor = 4; + repeated FacetResult facet_result = 5; + + extensions 1000 to 9999; +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go new file mode 100644 index 0000000..60628ec --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go @@ -0,0 +1,1858 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/socket/socket_service.proto +// DO NOT EDIT! + +/* +Package socket is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/socket/socket_service.proto + +It has these top-level messages: + RemoteSocketServiceError + AddressPort + CreateSocketRequest + CreateSocketReply + BindRequest + BindReply + GetSocketNameRequest + GetSocketNameReply + GetPeerNameRequest + GetPeerNameReply + SocketOption + SetSocketOptionsRequest + SetSocketOptionsReply + GetSocketOptionsRequest + GetSocketOptionsReply + ConnectRequest + ConnectReply + ListenRequest + ListenReply + AcceptRequest + AcceptReply + ShutDownRequest + ShutDownReply + CloseRequest + CloseReply + SendRequest + SendReply + ReceiveRequest + ReceiveReply + PollEvent + PollRequest + PollReply + ResolveRequest + ResolveReply +*/ +package socket + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RemoteSocketServiceError_ErrorCode int32 + +const ( + RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 + RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 + RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 + RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 + RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 + RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 +) + +var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ + 1: "SYSTEM_ERROR", + 2: "GAI_ERROR", + 4: "FAILURE", + 5: "PERMISSION_DENIED", + 6: "INVALID_REQUEST", + 7: "SOCKET_CLOSED", +} +var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ + "SYSTEM_ERROR": 1, + "GAI_ERROR": 2, + "FAILURE": 4, + "PERMISSION_DENIED": 5, + "INVALID_REQUEST": 6, + "SOCKET_CLOSED": 7, +} + +func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { + p := new(RemoteSocketServiceError_ErrorCode) + *p = x + return p +} +func (x RemoteSocketServiceError_ErrorCode) String() string { + return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) +} +func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") + if err != nil { + return err + } + *x = RemoteSocketServiceError_ErrorCode(value) + return nil +} + +type RemoteSocketServiceError_SystemError int32 + +const ( + RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 + RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 + RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 + RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 + RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 + RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 + RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 + RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 + RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 + RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 + RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 + RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 + RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 + RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 + RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 + RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 + RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 + RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 + RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 + RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 + RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 + RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 + RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 + RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 + RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 + RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 + RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 + RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 + RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 + RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 + RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 + RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 + RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 + RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 + RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 + RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 + RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 + RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 + RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 + RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 + RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 + RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 + RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 + RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 + RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 + RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 + RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 + RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 + RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 + RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 + RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 + RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 + RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 + RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 + RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 + RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 + RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 + RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 + RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 + RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 + RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 + RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 + RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 + RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 + RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 + RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 + RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 + RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 + RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 + RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 + RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 + RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 + RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 + RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 + RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 + RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 + RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 + RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 + RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 + RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 + RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 + RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 + RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 + RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 + RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 + RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 + RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 + RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 + RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 + RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 + RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 + RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 + RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 + RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 + RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 + RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 + RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 + RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 + RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 + RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 + RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 + RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 + RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 + RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 + RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 + RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 + RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 + RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 + RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 + RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 + RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 + RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 + RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 + RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 + RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 + RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 + RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 + RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 + RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 + RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 + RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 + RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 + RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 + RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 + RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 + RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 + RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 + RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 +) + +var RemoteSocketServiceError_SystemError_name = map[int32]string{ + 0: "SYS_SUCCESS", + 1: "SYS_EPERM", + 2: "SYS_ENOENT", + 3: "SYS_ESRCH", + 4: "SYS_EINTR", + 5: "SYS_EIO", + 6: "SYS_ENXIO", + 7: "SYS_E2BIG", + 8: "SYS_ENOEXEC", + 9: "SYS_EBADF", + 10: "SYS_ECHILD", + 11: "SYS_EAGAIN", + // Duplicate value: 11: "SYS_EWOULDBLOCK", + 12: "SYS_ENOMEM", + 13: "SYS_EACCES", + 14: "SYS_EFAULT", + 15: "SYS_ENOTBLK", + 16: "SYS_EBUSY", + 17: "SYS_EEXIST", + 18: "SYS_EXDEV", + 19: "SYS_ENODEV", + 20: "SYS_ENOTDIR", + 21: "SYS_EISDIR", + 22: "SYS_EINVAL", + 23: "SYS_ENFILE", + 24: "SYS_EMFILE", + 25: "SYS_ENOTTY", + 26: "SYS_ETXTBSY", + 27: "SYS_EFBIG", + 28: "SYS_ENOSPC", + 29: "SYS_ESPIPE", + 30: "SYS_EROFS", + 31: "SYS_EMLINK", + 32: "SYS_EPIPE", + 33: "SYS_EDOM", + 34: "SYS_ERANGE", + 35: "SYS_EDEADLK", + // Duplicate value: 35: "SYS_EDEADLOCK", + 36: "SYS_ENAMETOOLONG", + 37: "SYS_ENOLCK", + 38: "SYS_ENOSYS", + 39: "SYS_ENOTEMPTY", + 40: "SYS_ELOOP", + 42: "SYS_ENOMSG", + 43: "SYS_EIDRM", + 44: "SYS_ECHRNG", + 45: "SYS_EL2NSYNC", + 46: "SYS_EL3HLT", + 47: "SYS_EL3RST", + 48: "SYS_ELNRNG", + 49: "SYS_EUNATCH", + 50: "SYS_ENOCSI", + 51: "SYS_EL2HLT", + 52: "SYS_EBADE", + 53: "SYS_EBADR", + 54: "SYS_EXFULL", + 55: "SYS_ENOANO", + 56: "SYS_EBADRQC", + 57: "SYS_EBADSLT", + 59: "SYS_EBFONT", + 60: "SYS_ENOSTR", + 61: "SYS_ENODATA", + 62: "SYS_ETIME", + 63: "SYS_ENOSR", + 64: "SYS_ENONET", + 65: "SYS_ENOPKG", + 66: "SYS_EREMOTE", + 67: "SYS_ENOLINK", + 68: "SYS_EADV", + 69: "SYS_ESRMNT", + 70: "SYS_ECOMM", + 71: "SYS_EPROTO", + 72: "SYS_EMULTIHOP", + 73: "SYS_EDOTDOT", + 74: "SYS_EBADMSG", + 75: "SYS_EOVERFLOW", + 76: "SYS_ENOTUNIQ", + 77: "SYS_EBADFD", + 78: "SYS_EREMCHG", + 79: "SYS_ELIBACC", + 80: "SYS_ELIBBAD", + 81: "SYS_ELIBSCN", + 82: "SYS_ELIBMAX", + 83: "SYS_ELIBEXEC", + 84: "SYS_EILSEQ", + 85: "SYS_ERESTART", + 86: "SYS_ESTRPIPE", + 87: "SYS_EUSERS", + 88: "SYS_ENOTSOCK", + 89: "SYS_EDESTADDRREQ", + 90: "SYS_EMSGSIZE", + 91: "SYS_EPROTOTYPE", + 92: "SYS_ENOPROTOOPT", + 93: "SYS_EPROTONOSUPPORT", + 94: "SYS_ESOCKTNOSUPPORT", + 95: "SYS_EOPNOTSUPP", + // Duplicate value: 95: "SYS_ENOTSUP", + 96: "SYS_EPFNOSUPPORT", + 97: "SYS_EAFNOSUPPORT", + 98: "SYS_EADDRINUSE", + 99: "SYS_EADDRNOTAVAIL", + 100: "SYS_ENETDOWN", + 101: "SYS_ENETUNREACH", + 102: "SYS_ENETRESET", + 103: "SYS_ECONNABORTED", + 104: "SYS_ECONNRESET", + 105: "SYS_ENOBUFS", + 106: "SYS_EISCONN", + 107: "SYS_ENOTCONN", + 108: "SYS_ESHUTDOWN", + 109: "SYS_ETOOMANYREFS", + 110: "SYS_ETIMEDOUT", + 111: "SYS_ECONNREFUSED", + 112: "SYS_EHOSTDOWN", + 113: "SYS_EHOSTUNREACH", + 114: "SYS_EALREADY", + 115: "SYS_EINPROGRESS", + 116: "SYS_ESTALE", + 117: "SYS_EUCLEAN", + 118: "SYS_ENOTNAM", + 119: "SYS_ENAVAIL", + 120: "SYS_EISNAM", + 121: "SYS_EREMOTEIO", + 122: "SYS_EDQUOT", + 123: "SYS_ENOMEDIUM", + 124: "SYS_EMEDIUMTYPE", + 125: "SYS_ECANCELED", + 126: "SYS_ENOKEY", + 127: "SYS_EKEYEXPIRED", + 128: "SYS_EKEYREVOKED", + 129: "SYS_EKEYREJECTED", + 130: "SYS_EOWNERDEAD", + 131: "SYS_ENOTRECOVERABLE", + 132: "SYS_ERFKILL", +} +var RemoteSocketServiceError_SystemError_value = map[string]int32{ + "SYS_SUCCESS": 0, + "SYS_EPERM": 1, + "SYS_ENOENT": 2, + "SYS_ESRCH": 3, + "SYS_EINTR": 4, + "SYS_EIO": 5, + "SYS_ENXIO": 6, + "SYS_E2BIG": 7, + "SYS_ENOEXEC": 8, + "SYS_EBADF": 9, + "SYS_ECHILD": 10, + "SYS_EAGAIN": 11, + "SYS_EWOULDBLOCK": 11, + "SYS_ENOMEM": 12, + "SYS_EACCES": 13, + "SYS_EFAULT": 14, + "SYS_ENOTBLK": 15, + "SYS_EBUSY": 16, + "SYS_EEXIST": 17, + "SYS_EXDEV": 18, + "SYS_ENODEV": 19, + "SYS_ENOTDIR": 20, + "SYS_EISDIR": 21, + "SYS_EINVAL": 22, + "SYS_ENFILE": 23, + "SYS_EMFILE": 24, + "SYS_ENOTTY": 25, + "SYS_ETXTBSY": 26, + "SYS_EFBIG": 27, + "SYS_ENOSPC": 28, + "SYS_ESPIPE": 29, + "SYS_EROFS": 30, + "SYS_EMLINK": 31, + "SYS_EPIPE": 32, + "SYS_EDOM": 33, + "SYS_ERANGE": 34, + "SYS_EDEADLK": 35, + "SYS_EDEADLOCK": 35, + "SYS_ENAMETOOLONG": 36, + "SYS_ENOLCK": 37, + "SYS_ENOSYS": 38, + "SYS_ENOTEMPTY": 39, + "SYS_ELOOP": 40, + "SYS_ENOMSG": 42, + "SYS_EIDRM": 43, + "SYS_ECHRNG": 44, + "SYS_EL2NSYNC": 45, + "SYS_EL3HLT": 46, + "SYS_EL3RST": 47, + "SYS_ELNRNG": 48, + "SYS_EUNATCH": 49, + "SYS_ENOCSI": 50, + "SYS_EL2HLT": 51, + "SYS_EBADE": 52, + "SYS_EBADR": 53, + "SYS_EXFULL": 54, + "SYS_ENOANO": 55, + "SYS_EBADRQC": 56, + "SYS_EBADSLT": 57, + "SYS_EBFONT": 59, + "SYS_ENOSTR": 60, + "SYS_ENODATA": 61, + "SYS_ETIME": 62, + "SYS_ENOSR": 63, + "SYS_ENONET": 64, + "SYS_ENOPKG": 65, + "SYS_EREMOTE": 66, + "SYS_ENOLINK": 67, + "SYS_EADV": 68, + "SYS_ESRMNT": 69, + "SYS_ECOMM": 70, + "SYS_EPROTO": 71, + "SYS_EMULTIHOP": 72, + "SYS_EDOTDOT": 73, + "SYS_EBADMSG": 74, + "SYS_EOVERFLOW": 75, + "SYS_ENOTUNIQ": 76, + "SYS_EBADFD": 77, + "SYS_EREMCHG": 78, + "SYS_ELIBACC": 79, + "SYS_ELIBBAD": 80, + "SYS_ELIBSCN": 81, + "SYS_ELIBMAX": 82, + "SYS_ELIBEXEC": 83, + "SYS_EILSEQ": 84, + "SYS_ERESTART": 85, + "SYS_ESTRPIPE": 86, + "SYS_EUSERS": 87, + "SYS_ENOTSOCK": 88, + "SYS_EDESTADDRREQ": 89, + "SYS_EMSGSIZE": 90, + "SYS_EPROTOTYPE": 91, + "SYS_ENOPROTOOPT": 92, + "SYS_EPROTONOSUPPORT": 93, + "SYS_ESOCKTNOSUPPORT": 94, + "SYS_EOPNOTSUPP": 95, + "SYS_ENOTSUP": 95, + "SYS_EPFNOSUPPORT": 96, + "SYS_EAFNOSUPPORT": 97, + "SYS_EADDRINUSE": 98, + "SYS_EADDRNOTAVAIL": 99, + "SYS_ENETDOWN": 100, + "SYS_ENETUNREACH": 101, + "SYS_ENETRESET": 102, + "SYS_ECONNABORTED": 103, + "SYS_ECONNRESET": 104, + "SYS_ENOBUFS": 105, + "SYS_EISCONN": 106, + "SYS_ENOTCONN": 107, + "SYS_ESHUTDOWN": 108, + "SYS_ETOOMANYREFS": 109, + "SYS_ETIMEDOUT": 110, + "SYS_ECONNREFUSED": 111, + "SYS_EHOSTDOWN": 112, + "SYS_EHOSTUNREACH": 113, + "SYS_EALREADY": 114, + "SYS_EINPROGRESS": 115, + "SYS_ESTALE": 116, + "SYS_EUCLEAN": 117, + "SYS_ENOTNAM": 118, + "SYS_ENAVAIL": 119, + "SYS_EISNAM": 120, + "SYS_EREMOTEIO": 121, + "SYS_EDQUOT": 122, + "SYS_ENOMEDIUM": 123, + "SYS_EMEDIUMTYPE": 124, + "SYS_ECANCELED": 125, + "SYS_ENOKEY": 126, + "SYS_EKEYEXPIRED": 127, + "SYS_EKEYREVOKED": 128, + "SYS_EKEYREJECTED": 129, + "SYS_EOWNERDEAD": 130, + "SYS_ENOTRECOVERABLE": 131, + "SYS_ERFKILL": 132, +} + +func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { + p := new(RemoteSocketServiceError_SystemError) + *p = x + return p +} +func (x RemoteSocketServiceError_SystemError) String() string { + return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) +} +func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") + if err != nil { + return err + } + *x = RemoteSocketServiceError_SystemError(value) + return nil +} + +type CreateSocketRequest_SocketFamily int32 + +const ( + CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 + CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 +) + +var CreateSocketRequest_SocketFamily_name = map[int32]string{ + 1: "IPv4", + 2: "IPv6", +} +var CreateSocketRequest_SocketFamily_value = map[string]int32{ + "IPv4": 1, + "IPv6": 2, +} + +func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { + p := new(CreateSocketRequest_SocketFamily) + *p = x + return p +} +func (x CreateSocketRequest_SocketFamily) String() string { + return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) +} +func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketFamily(value) + return nil +} + +type CreateSocketRequest_SocketProtocol int32 + +const ( + CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 + CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 +) + +var CreateSocketRequest_SocketProtocol_name = map[int32]string{ + 1: "TCP", + 2: "UDP", +} +var CreateSocketRequest_SocketProtocol_value = map[string]int32{ + "TCP": 1, + "UDP": 2, +} + +func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { + p := new(CreateSocketRequest_SocketProtocol) + *p = x + return p +} +func (x CreateSocketRequest_SocketProtocol) String() string { + return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) +} +func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketProtocol(value) + return nil +} + +type SocketOption_SocketOptionLevel int32 + +const ( + SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 + SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 + SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 + SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 +) + +var SocketOption_SocketOptionLevel_name = map[int32]string{ + 0: "SOCKET_SOL_IP", + 1: "SOCKET_SOL_SOCKET", + 6: "SOCKET_SOL_TCP", + 17: "SOCKET_SOL_UDP", +} +var SocketOption_SocketOptionLevel_value = map[string]int32{ + "SOCKET_SOL_IP": 0, + "SOCKET_SOL_SOCKET": 1, + "SOCKET_SOL_TCP": 6, + "SOCKET_SOL_UDP": 17, +} + +func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { + p := new(SocketOption_SocketOptionLevel) + *p = x + return p +} +func (x SocketOption_SocketOptionLevel) String() string { + return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) +} +func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") + if err != nil { + return err + } + *x = SocketOption_SocketOptionLevel(value) + return nil +} + +type SocketOption_SocketOptionName int32 + +const ( + SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 + SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 + SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 + SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 + SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 +) + +var SocketOption_SocketOptionName_name = map[int32]string{ + 1: "SOCKET_SO_DEBUG", + 2: "SOCKET_SO_REUSEADDR", + 3: "SOCKET_SO_TYPE", + 4: "SOCKET_SO_ERROR", + 5: "SOCKET_SO_DONTROUTE", + 6: "SOCKET_SO_BROADCAST", + 7: "SOCKET_SO_SNDBUF", + 8: "SOCKET_SO_RCVBUF", + 9: "SOCKET_SO_KEEPALIVE", + 10: "SOCKET_SO_OOBINLINE", + 13: "SOCKET_SO_LINGER", + 20: "SOCKET_SO_RCVTIMEO", + 21: "SOCKET_SO_SNDTIMEO", + // Duplicate value: 1: "SOCKET_IP_TOS", + // Duplicate value: 2: "SOCKET_IP_TTL", + // Duplicate value: 3: "SOCKET_IP_HDRINCL", + // Duplicate value: 4: "SOCKET_IP_OPTIONS", + // Duplicate value: 1: "SOCKET_TCP_NODELAY", + // Duplicate value: 2: "SOCKET_TCP_MAXSEG", + // Duplicate value: 3: "SOCKET_TCP_CORK", + // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", + // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", + // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", + // Duplicate value: 7: "SOCKET_TCP_SYNCNT", + // Duplicate value: 8: "SOCKET_TCP_LINGER2", + // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", + // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", + 11: "SOCKET_TCP_INFO", + 12: "SOCKET_TCP_QUICKACK", +} +var SocketOption_SocketOptionName_value = map[string]int32{ + "SOCKET_SO_DEBUG": 1, + "SOCKET_SO_REUSEADDR": 2, + "SOCKET_SO_TYPE": 3, + "SOCKET_SO_ERROR": 4, + "SOCKET_SO_DONTROUTE": 5, + "SOCKET_SO_BROADCAST": 6, + "SOCKET_SO_SNDBUF": 7, + "SOCKET_SO_RCVBUF": 8, + "SOCKET_SO_KEEPALIVE": 9, + "SOCKET_SO_OOBINLINE": 10, + "SOCKET_SO_LINGER": 13, + "SOCKET_SO_RCVTIMEO": 20, + "SOCKET_SO_SNDTIMEO": 21, + "SOCKET_IP_TOS": 1, + "SOCKET_IP_TTL": 2, + "SOCKET_IP_HDRINCL": 3, + "SOCKET_IP_OPTIONS": 4, + "SOCKET_TCP_NODELAY": 1, + "SOCKET_TCP_MAXSEG": 2, + "SOCKET_TCP_CORK": 3, + "SOCKET_TCP_KEEPIDLE": 4, + "SOCKET_TCP_KEEPINTVL": 5, + "SOCKET_TCP_KEEPCNT": 6, + "SOCKET_TCP_SYNCNT": 7, + "SOCKET_TCP_LINGER2": 8, + "SOCKET_TCP_DEFER_ACCEPT": 9, + "SOCKET_TCP_WINDOW_CLAMP": 10, + "SOCKET_TCP_INFO": 11, + "SOCKET_TCP_QUICKACK": 12, +} + +func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { + p := new(SocketOption_SocketOptionName) + *p = x + return p +} +func (x SocketOption_SocketOptionName) String() string { + return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) +} +func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") + if err != nil { + return err + } + *x = SocketOption_SocketOptionName(value) + return nil +} + +type ShutDownRequest_How int32 + +const ( + ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 + ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 + ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 +) + +var ShutDownRequest_How_name = map[int32]string{ + 1: "SOCKET_SHUT_RD", + 2: "SOCKET_SHUT_WR", + 3: "SOCKET_SHUT_RDWR", +} +var ShutDownRequest_How_value = map[string]int32{ + "SOCKET_SHUT_RD": 1, + "SOCKET_SHUT_WR": 2, + "SOCKET_SHUT_RDWR": 3, +} + +func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { + p := new(ShutDownRequest_How) + *p = x + return p +} +func (x ShutDownRequest_How) String() string { + return proto.EnumName(ShutDownRequest_How_name, int32(x)) +} +func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") + if err != nil { + return err + } + *x = ShutDownRequest_How(value) + return nil +} + +type ReceiveRequest_Flags int32 + +const ( + ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 + ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 +) + +var ReceiveRequest_Flags_name = map[int32]string{ + 1: "MSG_OOB", + 2: "MSG_PEEK", +} +var ReceiveRequest_Flags_value = map[string]int32{ + "MSG_OOB": 1, + "MSG_PEEK": 2, +} + +func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { + p := new(ReceiveRequest_Flags) + *p = x + return p +} +func (x ReceiveRequest_Flags) String() string { + return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) +} +func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") + if err != nil { + return err + } + *x = ReceiveRequest_Flags(value) + return nil +} + +type PollEvent_PollEventFlag int32 + +const ( + PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 + PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 + PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 + PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 + PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 + PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 + PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 + PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 + PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 + PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 + PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 + PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 + PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 + PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 +) + +var PollEvent_PollEventFlag_name = map[int32]string{ + 0: "SOCKET_POLLNONE", + 1: "SOCKET_POLLIN", + 2: "SOCKET_POLLPRI", + 4: "SOCKET_POLLOUT", + 8: "SOCKET_POLLERR", + 16: "SOCKET_POLLHUP", + 32: "SOCKET_POLLNVAL", + 64: "SOCKET_POLLRDNORM", + 128: "SOCKET_POLLRDBAND", + 256: "SOCKET_POLLWRNORM", + 512: "SOCKET_POLLWRBAND", + 1024: "SOCKET_POLLMSG", + 4096: "SOCKET_POLLREMOVE", + 8192: "SOCKET_POLLRDHUP", +} +var PollEvent_PollEventFlag_value = map[string]int32{ + "SOCKET_POLLNONE": 0, + "SOCKET_POLLIN": 1, + "SOCKET_POLLPRI": 2, + "SOCKET_POLLOUT": 4, + "SOCKET_POLLERR": 8, + "SOCKET_POLLHUP": 16, + "SOCKET_POLLNVAL": 32, + "SOCKET_POLLRDNORM": 64, + "SOCKET_POLLRDBAND": 128, + "SOCKET_POLLWRNORM": 256, + "SOCKET_POLLWRBAND": 512, + "SOCKET_POLLMSG": 1024, + "SOCKET_POLLREMOVE": 4096, + "SOCKET_POLLRDHUP": 8192, +} + +func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { + p := new(PollEvent_PollEventFlag) + *p = x + return p +} +func (x PollEvent_PollEventFlag) String() string { + return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) +} +func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") + if err != nil { + return err + } + *x = PollEvent_PollEventFlag(value) + return nil +} + +type ResolveReply_ErrorCode int32 + +const ( + ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 + ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 + ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 + ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 + ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 + ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 + ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 + ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 + ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 + ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 + ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 + ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 + ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 + ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 + ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 +) + +var ResolveReply_ErrorCode_name = map[int32]string{ + 1: "SOCKET_EAI_ADDRFAMILY", + 2: "SOCKET_EAI_AGAIN", + 3: "SOCKET_EAI_BADFLAGS", + 4: "SOCKET_EAI_FAIL", + 5: "SOCKET_EAI_FAMILY", + 6: "SOCKET_EAI_MEMORY", + 7: "SOCKET_EAI_NODATA", + 8: "SOCKET_EAI_NONAME", + 9: "SOCKET_EAI_SERVICE", + 10: "SOCKET_EAI_SOCKTYPE", + 11: "SOCKET_EAI_SYSTEM", + 12: "SOCKET_EAI_BADHINTS", + 13: "SOCKET_EAI_PROTOCOL", + 14: "SOCKET_EAI_OVERFLOW", + 15: "SOCKET_EAI_MAX", +} +var ResolveReply_ErrorCode_value = map[string]int32{ + "SOCKET_EAI_ADDRFAMILY": 1, + "SOCKET_EAI_AGAIN": 2, + "SOCKET_EAI_BADFLAGS": 3, + "SOCKET_EAI_FAIL": 4, + "SOCKET_EAI_FAMILY": 5, + "SOCKET_EAI_MEMORY": 6, + "SOCKET_EAI_NODATA": 7, + "SOCKET_EAI_NONAME": 8, + "SOCKET_EAI_SERVICE": 9, + "SOCKET_EAI_SOCKTYPE": 10, + "SOCKET_EAI_SYSTEM": 11, + "SOCKET_EAI_BADHINTS": 12, + "SOCKET_EAI_PROTOCOL": 13, + "SOCKET_EAI_OVERFLOW": 14, + "SOCKET_EAI_MAX": 15, +} + +func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { + p := new(ResolveReply_ErrorCode) + *p = x + return p +} +func (x ResolveReply_ErrorCode) String() string { + return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) +} +func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") + if err != nil { + return err + } + *x = ResolveReply_ErrorCode(value) + return nil +} + +type RemoteSocketServiceError struct { + SystemError *int32 `protobuf:"varint,1,opt,name=system_error,def=0" json:"system_error,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } +func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } +func (*RemoteSocketServiceError) ProtoMessage() {} + +const Default_RemoteSocketServiceError_SystemError int32 = 0 + +func (m *RemoteSocketServiceError) GetSystemError() int32 { + if m != nil && m.SystemError != nil { + return *m.SystemError + } + return Default_RemoteSocketServiceError_SystemError +} + +func (m *RemoteSocketServiceError) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +type AddressPort struct { + Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` + PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address" json:"packed_address,omitempty"` + HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint" json:"hostname_hint,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddressPort) Reset() { *m = AddressPort{} } +func (m *AddressPort) String() string { return proto.CompactTextString(m) } +func (*AddressPort) ProtoMessage() {} + +func (m *AddressPort) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *AddressPort) GetPackedAddress() []byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *AddressPort) GetHostnameHint() string { + if m != nil && m.HostnameHint != nil { + return *m.HostnameHint + } + return "" +} + +type CreateSocketRequest struct { + Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` + Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options" json:"socket_options,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,def=0" json:"listen_backlog,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip" json:"remote_ip,omitempty"` + AppId *string `protobuf:"bytes,9,opt,name=app_id" json:"app_id,omitempty"` + ProjectId *int64 `protobuf:"varint,10,opt,name=project_id" json:"project_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } +func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSocketRequest) ProtoMessage() {} + +const Default_CreateSocketRequest_ListenBacklog int32 = 0 + +func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { + if m != nil && m.Family != nil { + return *m.Family + } + return CreateSocketRequest_IPv4 +} + +func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return CreateSocketRequest_TCP +} + +func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { + if m != nil { + return m.SocketOptions + } + return nil +} + +func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +func (m *CreateSocketRequest) GetListenBacklog() int32 { + if m != nil && m.ListenBacklog != nil { + return *m.ListenBacklog + } + return Default_CreateSocketRequest_ListenBacklog +} + +func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *CreateSocketRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CreateSocketRequest) GetProjectId() int64 { + if m != nil && m.ProjectId != nil { + return *m.ProjectId + } + return 0 +} + +type CreateSocketReply struct { + SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor" json:"socket_descriptor,omitempty"` + ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address" json:"server_address,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } +func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } +func (*CreateSocketReply) ProtoMessage() {} + +var extRange_CreateSocketReply = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_CreateSocketReply +} +func (m *CreateSocketReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *CreateSocketReply) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CreateSocketReply) GetServerAddress() *AddressPort { + if m != nil { + return m.ServerAddress + } + return nil +} + +func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BindRequest) Reset() { *m = BindRequest{} } +func (m *BindRequest) String() string { return proto.CompactTextString(m) } +func (*BindRequest) ProtoMessage() {} + +func (m *BindRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *BindRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BindReply) Reset() { *m = BindReply{} } +func (m *BindReply) String() string { return proto.CompactTextString(m) } +func (*BindReply) ProtoMessage() {} + +func (m *BindReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetSocketNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } +func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameRequest) ProtoMessage() {} + +func (m *GetSocketNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetSocketNameReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } +func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameReply) ProtoMessage() {} + +func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetPeerNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } +func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameRequest) ProtoMessage() {} + +func (m *GetPeerNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetPeerNameReply struct { + PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip" json:"peer_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } +func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameReply) ProtoMessage() {} + +func (m *GetPeerNameReply) GetPeerIp() *AddressPort { + if m != nil { + return m.PeerIp + } + return nil +} + +type SocketOption struct { + Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` + Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SocketOption) Reset() { *m = SocketOption{} } +func (m *SocketOption) String() string { return proto.CompactTextString(m) } +func (*SocketOption) ProtoMessage() {} + +func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { + if m != nil && m.Level != nil { + return *m.Level + } + return SocketOption_SOCKET_SOL_IP +} + +func (m *SocketOption) GetOption() SocketOption_SocketOptionName { + if m != nil && m.Option != nil { + return *m.Option + } + return SocketOption_SOCKET_SO_DEBUG +} + +func (m *SocketOption) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type SetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } +func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsRequest) ProtoMessage() {} + +func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type SetSocketOptionsReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } +func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsReply) ProtoMessage() {} + +type GetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } +func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsRequest) ProtoMessage() {} + +func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type GetSocketOptionsReply struct { + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } +func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsReply) ProtoMessage() {} + +func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type ConnectRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip" json:"remote_ip,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } +func (*ConnectRequest) ProtoMessage() {} + +const Default_ConnectRequest_TimeoutSeconds float64 = -1 + +func (m *ConnectRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ConnectRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *ConnectRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ConnectRequest_TimeoutSeconds +} + +type ConnectReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConnectReply) Reset() { *m = ConnectReply{} } +func (m *ConnectReply) String() string { return proto.CompactTextString(m) } +func (*ConnectReply) ProtoMessage() {} + +var extRange_ConnectReply = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ConnectReply +} +func (m *ConnectReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *ConnectReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type ListenRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListenRequest) Reset() { *m = ListenRequest{} } +func (m *ListenRequest) String() string { return proto.CompactTextString(m) } +func (*ListenRequest) ProtoMessage() {} + +func (m *ListenRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ListenRequest) GetBacklog() int32 { + if m != nil && m.Backlog != nil { + return *m.Backlog + } + return 0 +} + +type ListenReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListenReply) Reset() { *m = ListenReply{} } +func (m *ListenReply) String() string { return proto.CompactTextString(m) } +func (*ListenReply) ProtoMessage() {} + +type AcceptRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } +func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } +func (*AcceptRequest) ProtoMessage() {} + +const Default_AcceptRequest_TimeoutSeconds float64 = -1 + +func (m *AcceptRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *AcceptRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_AcceptRequest_TimeoutSeconds +} + +type AcceptReply struct { + NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor" json:"new_socket_descriptor,omitempty"` + RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address" json:"remote_address,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AcceptReply) Reset() { *m = AcceptReply{} } +func (m *AcceptReply) String() string { return proto.CompactTextString(m) } +func (*AcceptReply) ProtoMessage() {} + +func (m *AcceptReply) GetNewSocketDescriptor() []byte { + if m != nil { + return m.NewSocketDescriptor + } + return nil +} + +func (m *AcceptReply) GetRemoteAddress() *AddressPort { + if m != nil { + return m.RemoteAddress + } + return nil +} + +type ShutDownRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` + SendOffset *int64 `protobuf:"varint,3,req,name=send_offset" json:"send_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } +func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutDownRequest) ProtoMessage() {} + +func (m *ShutDownRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ShutDownRequest) GetHow() ShutDownRequest_How { + if m != nil && m.How != nil { + return *m.How + } + return ShutDownRequest_SOCKET_SHUT_RD +} + +func (m *ShutDownRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return 0 +} + +type ShutDownReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } +func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } +func (*ShutDownReply) ProtoMessage() {} + +type CloseRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,def=-1" json:"send_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} + +const Default_CloseRequest_SendOffset int64 = -1 + +func (m *CloseRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CloseRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return Default_CloseRequest_SendOffset +} + +type CloseReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloseReply) Reset() { *m = CloseReply{} } +func (m *CloseReply) String() string { return proto.CompactTextString(m) } +func (*CloseReply) ProtoMessage() {} + +type SendRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` + StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset" json:"stream_offset,omitempty"` + Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` + SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to" json:"send_to,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SendRequest) Reset() { *m = SendRequest{} } +func (m *SendRequest) String() string { return proto.CompactTextString(m) } +func (*SendRequest) ProtoMessage() {} + +const Default_SendRequest_Flags int32 = 0 +const Default_SendRequest_TimeoutSeconds float64 = -1 + +func (m *SendRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SendRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *SendRequest) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *SendRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_SendRequest_Flags +} + +func (m *SendRequest) GetSendTo() *AddressPort { + if m != nil { + return m.SendTo + } + return nil +} + +func (m *SendRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_SendRequest_TimeoutSeconds +} + +type SendReply struct { + DataSent *int32 `protobuf:"varint,1,opt,name=data_sent" json:"data_sent,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SendReply) Reset() { *m = SendReply{} } +func (m *SendReply) String() string { return proto.CompactTextString(m) } +func (*SendReply) ProtoMessage() {} + +func (m *SendReply) GetDataSent() int32 { + if m != nil && m.DataSent != nil { + return *m.DataSent + } + return 0 +} + +type ReceiveRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + DataSize *int32 `protobuf:"varint,2,req,name=data_size" json:"data_size,omitempty"` + Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } +func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } +func (*ReceiveRequest) ProtoMessage() {} + +const Default_ReceiveRequest_Flags int32 = 0 +const Default_ReceiveRequest_TimeoutSeconds float64 = -1 + +func (m *ReceiveRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ReceiveRequest) GetDataSize() int32 { + if m != nil && m.DataSize != nil { + return *m.DataSize + } + return 0 +} + +func (m *ReceiveRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_ReceiveRequest_Flags +} + +func (m *ReceiveRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ReceiveRequest_TimeoutSeconds +} + +type ReceiveReply struct { + StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset" json:"stream_offset,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from" json:"received_from,omitempty"` + BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size" json:"buffer_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } +func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } +func (*ReceiveReply) ProtoMessage() {} + +func (m *ReceiveReply) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *ReceiveReply) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReceiveReply) GetReceivedFrom() *AddressPort { + if m != nil { + return m.ReceivedFrom + } + return nil +} + +func (m *ReceiveReply) GetBufferSize() int32 { + if m != nil && m.BufferSize != nil { + return *m.BufferSize + } + return 0 +} + +type PollEvent struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events" json:"requested_events,omitempty"` + ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events" json:"observed_events,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollEvent) Reset() { *m = PollEvent{} } +func (m *PollEvent) String() string { return proto.CompactTextString(m) } +func (*PollEvent) ProtoMessage() {} + +func (m *PollEvent) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *PollEvent) GetRequestedEvents() int32 { + if m != nil && m.RequestedEvents != nil { + return *m.RequestedEvents + } + return 0 +} + +func (m *PollEvent) GetObservedEvents() int32 { + if m != nil && m.ObservedEvents != nil { + return *m.ObservedEvents + } + return 0 +} + +type PollRequest struct { + Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollRequest) Reset() { *m = PollRequest{} } +func (m *PollRequest) String() string { return proto.CompactTextString(m) } +func (*PollRequest) ProtoMessage() {} + +const Default_PollRequest_TimeoutSeconds float64 = -1 + +func (m *PollRequest) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +func (m *PollRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_PollRequest_TimeoutSeconds +} + +type PollReply struct { + Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollReply) Reset() { *m = PollReply{} } +func (m *PollReply) String() string { return proto.CompactTextString(m) } +func (*PollReply) ProtoMessage() {} + +func (m *PollReply) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +type ResolveRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } +func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveRequest) ProtoMessage() {} + +func (m *ResolveRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { + if m != nil { + return m.AddressFamilies + } + return nil +} + +type ResolveReply struct { + PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address" json:"packed_address,omitempty"` + CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name" json:"canonical_name,omitempty"` + Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResolveReply) Reset() { *m = ResolveReply{} } +func (m *ResolveReply) String() string { return proto.CompactTextString(m) } +func (*ResolveReply) ProtoMessage() {} + +func (m *ResolveReply) GetPackedAddress() [][]byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *ResolveReply) GetCanonicalName() string { + if m != nil && m.CanonicalName != nil { + return *m.CanonicalName + } + return "" +} + +func (m *ResolveReply) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto new file mode 100644 index 0000000..2fcc795 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto @@ -0,0 +1,460 @@ +syntax = "proto2"; +option go_package = "socket"; + +package appengine; + +message RemoteSocketServiceError { + enum ErrorCode { + SYSTEM_ERROR = 1; + GAI_ERROR = 2; + FAILURE = 4; + PERMISSION_DENIED = 5; + INVALID_REQUEST = 6; + SOCKET_CLOSED = 7; + } + + enum SystemError { + option allow_alias = true; + + SYS_SUCCESS = 0; + SYS_EPERM = 1; + SYS_ENOENT = 2; + SYS_ESRCH = 3; + SYS_EINTR = 4; + SYS_EIO = 5; + SYS_ENXIO = 6; + SYS_E2BIG = 7; + SYS_ENOEXEC = 8; + SYS_EBADF = 9; + SYS_ECHILD = 10; + SYS_EAGAIN = 11; + SYS_EWOULDBLOCK = 11; + SYS_ENOMEM = 12; + SYS_EACCES = 13; + SYS_EFAULT = 14; + SYS_ENOTBLK = 15; + SYS_EBUSY = 16; + SYS_EEXIST = 17; + SYS_EXDEV = 18; + SYS_ENODEV = 19; + SYS_ENOTDIR = 20; + SYS_EISDIR = 21; + SYS_EINVAL = 22; + SYS_ENFILE = 23; + SYS_EMFILE = 24; + SYS_ENOTTY = 25; + SYS_ETXTBSY = 26; + SYS_EFBIG = 27; + SYS_ENOSPC = 28; + SYS_ESPIPE = 29; + SYS_EROFS = 30; + SYS_EMLINK = 31; + SYS_EPIPE = 32; + SYS_EDOM = 33; + SYS_ERANGE = 34; + SYS_EDEADLK = 35; + SYS_EDEADLOCK = 35; + SYS_ENAMETOOLONG = 36; + SYS_ENOLCK = 37; + SYS_ENOSYS = 38; + SYS_ENOTEMPTY = 39; + SYS_ELOOP = 40; + SYS_ENOMSG = 42; + SYS_EIDRM = 43; + SYS_ECHRNG = 44; + SYS_EL2NSYNC = 45; + SYS_EL3HLT = 46; + SYS_EL3RST = 47; + SYS_ELNRNG = 48; + SYS_EUNATCH = 49; + SYS_ENOCSI = 50; + SYS_EL2HLT = 51; + SYS_EBADE = 52; + SYS_EBADR = 53; + SYS_EXFULL = 54; + SYS_ENOANO = 55; + SYS_EBADRQC = 56; + SYS_EBADSLT = 57; + SYS_EBFONT = 59; + SYS_ENOSTR = 60; + SYS_ENODATA = 61; + SYS_ETIME = 62; + SYS_ENOSR = 63; + SYS_ENONET = 64; + SYS_ENOPKG = 65; + SYS_EREMOTE = 66; + SYS_ENOLINK = 67; + SYS_EADV = 68; + SYS_ESRMNT = 69; + SYS_ECOMM = 70; + SYS_EPROTO = 71; + SYS_EMULTIHOP = 72; + SYS_EDOTDOT = 73; + SYS_EBADMSG = 74; + SYS_EOVERFLOW = 75; + SYS_ENOTUNIQ = 76; + SYS_EBADFD = 77; + SYS_EREMCHG = 78; + SYS_ELIBACC = 79; + SYS_ELIBBAD = 80; + SYS_ELIBSCN = 81; + SYS_ELIBMAX = 82; + SYS_ELIBEXEC = 83; + SYS_EILSEQ = 84; + SYS_ERESTART = 85; + SYS_ESTRPIPE = 86; + SYS_EUSERS = 87; + SYS_ENOTSOCK = 88; + SYS_EDESTADDRREQ = 89; + SYS_EMSGSIZE = 90; + SYS_EPROTOTYPE = 91; + SYS_ENOPROTOOPT = 92; + SYS_EPROTONOSUPPORT = 93; + SYS_ESOCKTNOSUPPORT = 94; + SYS_EOPNOTSUPP = 95; + SYS_ENOTSUP = 95; + SYS_EPFNOSUPPORT = 96; + SYS_EAFNOSUPPORT = 97; + SYS_EADDRINUSE = 98; + SYS_EADDRNOTAVAIL = 99; + SYS_ENETDOWN = 100; + SYS_ENETUNREACH = 101; + SYS_ENETRESET = 102; + SYS_ECONNABORTED = 103; + SYS_ECONNRESET = 104; + SYS_ENOBUFS = 105; + SYS_EISCONN = 106; + SYS_ENOTCONN = 107; + SYS_ESHUTDOWN = 108; + SYS_ETOOMANYREFS = 109; + SYS_ETIMEDOUT = 110; + SYS_ECONNREFUSED = 111; + SYS_EHOSTDOWN = 112; + SYS_EHOSTUNREACH = 113; + SYS_EALREADY = 114; + SYS_EINPROGRESS = 115; + SYS_ESTALE = 116; + SYS_EUCLEAN = 117; + SYS_ENOTNAM = 118; + SYS_ENAVAIL = 119; + SYS_EISNAM = 120; + SYS_EREMOTEIO = 121; + SYS_EDQUOT = 122; + SYS_ENOMEDIUM = 123; + SYS_EMEDIUMTYPE = 124; + SYS_ECANCELED = 125; + SYS_ENOKEY = 126; + SYS_EKEYEXPIRED = 127; + SYS_EKEYREVOKED = 128; + SYS_EKEYREJECTED = 129; + SYS_EOWNERDEAD = 130; + SYS_ENOTRECOVERABLE = 131; + SYS_ERFKILL = 132; + } + + optional int32 system_error = 1 [default=0]; + optional string error_detail = 2; +} + +message AddressPort { + required int32 port = 1; + optional bytes packed_address = 2; + + optional string hostname_hint = 3; +} + + + +message CreateSocketRequest { + enum SocketFamily { + IPv4 = 1; + IPv6 = 2; + } + + enum SocketProtocol { + TCP = 1; + UDP = 2; + } + + required SocketFamily family = 1; + required SocketProtocol protocol = 2; + + repeated SocketOption socket_options = 3; + + optional AddressPort proxy_external_ip = 4; + + optional int32 listen_backlog = 5 [default=0]; + + optional AddressPort remote_ip = 6; + + optional string app_id = 9; + + optional int64 project_id = 10; +} + +message CreateSocketReply { + optional string socket_descriptor = 1; + + optional AddressPort server_address = 3; + + optional AddressPort proxy_external_ip = 4; + + extensions 1000 to max; +} + + + +message BindRequest { + required string socket_descriptor = 1; + required AddressPort proxy_external_ip = 2; +} + +message BindReply { + optional AddressPort proxy_external_ip = 1; +} + + + +message GetSocketNameRequest { + required string socket_descriptor = 1; +} + +message GetSocketNameReply { + optional AddressPort proxy_external_ip = 2; +} + + + +message GetPeerNameRequest { + required string socket_descriptor = 1; +} + +message GetPeerNameReply { + optional AddressPort peer_ip = 2; +} + + +message SocketOption { + + enum SocketOptionLevel { + SOCKET_SOL_IP = 0; + SOCKET_SOL_SOCKET = 1; + SOCKET_SOL_TCP = 6; + SOCKET_SOL_UDP = 17; + } + + enum SocketOptionName { + option allow_alias = true; + + SOCKET_SO_DEBUG = 1; + SOCKET_SO_REUSEADDR = 2; + SOCKET_SO_TYPE = 3; + SOCKET_SO_ERROR = 4; + SOCKET_SO_DONTROUTE = 5; + SOCKET_SO_BROADCAST = 6; + SOCKET_SO_SNDBUF = 7; + SOCKET_SO_RCVBUF = 8; + SOCKET_SO_KEEPALIVE = 9; + SOCKET_SO_OOBINLINE = 10; + SOCKET_SO_LINGER = 13; + SOCKET_SO_RCVTIMEO = 20; + SOCKET_SO_SNDTIMEO = 21; + + SOCKET_IP_TOS = 1; + SOCKET_IP_TTL = 2; + SOCKET_IP_HDRINCL = 3; + SOCKET_IP_OPTIONS = 4; + + SOCKET_TCP_NODELAY = 1; + SOCKET_TCP_MAXSEG = 2; + SOCKET_TCP_CORK = 3; + SOCKET_TCP_KEEPIDLE = 4; + SOCKET_TCP_KEEPINTVL = 5; + SOCKET_TCP_KEEPCNT = 6; + SOCKET_TCP_SYNCNT = 7; + SOCKET_TCP_LINGER2 = 8; + SOCKET_TCP_DEFER_ACCEPT = 9; + SOCKET_TCP_WINDOW_CLAMP = 10; + SOCKET_TCP_INFO = 11; + SOCKET_TCP_QUICKACK = 12; + } + + required SocketOptionLevel level = 1; + required SocketOptionName option = 2; + required bytes value = 3; +} + + +message SetSocketOptionsRequest { + required string socket_descriptor = 1; + repeated SocketOption options = 2; +} + +message SetSocketOptionsReply { +} + +message GetSocketOptionsRequest { + required string socket_descriptor = 1; + repeated SocketOption options = 2; +} + +message GetSocketOptionsReply { + repeated SocketOption options = 2; +} + + +message ConnectRequest { + required string socket_descriptor = 1; + required AddressPort remote_ip = 2; + optional double timeout_seconds = 3 [default=-1]; +} + +message ConnectReply { + optional AddressPort proxy_external_ip = 1; + + extensions 1000 to max; +} + + +message ListenRequest { + required string socket_descriptor = 1; + required int32 backlog = 2; +} + +message ListenReply { +} + + +message AcceptRequest { + required string socket_descriptor = 1; + optional double timeout_seconds = 2 [default=-1]; +} + +message AcceptReply { + optional bytes new_socket_descriptor = 2; + optional AddressPort remote_address = 3; +} + + + +message ShutDownRequest { + enum How { + SOCKET_SHUT_RD = 1; + SOCKET_SHUT_WR = 2; + SOCKET_SHUT_RDWR = 3; + } + required string socket_descriptor = 1; + required How how = 2; + required int64 send_offset = 3; +} + +message ShutDownReply { +} + + + +message CloseRequest { + required string socket_descriptor = 1; + optional int64 send_offset = 2 [default=-1]; +} + +message CloseReply { +} + + + +message SendRequest { + required string socket_descriptor = 1; + required bytes data = 2 [ctype=CORD]; + required int64 stream_offset = 3; + optional int32 flags = 4 [default=0]; + optional AddressPort send_to = 5; + optional double timeout_seconds = 6 [default=-1]; +} + +message SendReply { + optional int32 data_sent = 1; +} + + +message ReceiveRequest { + enum Flags { + MSG_OOB = 1; + MSG_PEEK = 2; + } + required string socket_descriptor = 1; + required int32 data_size = 2; + optional int32 flags = 3 [default=0]; + optional double timeout_seconds = 5 [default=-1]; +} + +message ReceiveReply { + optional int64 stream_offset = 2; + optional bytes data = 3 [ctype=CORD]; + optional AddressPort received_from = 4; + optional int32 buffer_size = 5; +} + + + +message PollEvent { + + enum PollEventFlag { + SOCKET_POLLNONE = 0; + SOCKET_POLLIN = 1; + SOCKET_POLLPRI = 2; + SOCKET_POLLOUT = 4; + SOCKET_POLLERR = 8; + SOCKET_POLLHUP = 16; + SOCKET_POLLNVAL = 32; + SOCKET_POLLRDNORM = 64; + SOCKET_POLLRDBAND = 128; + SOCKET_POLLWRNORM = 256; + SOCKET_POLLWRBAND = 512; + SOCKET_POLLMSG = 1024; + SOCKET_POLLREMOVE = 4096; + SOCKET_POLLRDHUP = 8192; + }; + + required string socket_descriptor = 1; + required int32 requested_events = 2; + required int32 observed_events = 3; +} + +message PollRequest { + repeated PollEvent events = 1; + optional double timeout_seconds = 2 [default=-1]; +} + +message PollReply { + repeated PollEvent events = 2; +} + +message ResolveRequest { + required string name = 1; + repeated CreateSocketRequest.SocketFamily address_families = 2; +} + +message ResolveReply { + enum ErrorCode { + SOCKET_EAI_ADDRFAMILY = 1; + SOCKET_EAI_AGAIN = 2; + SOCKET_EAI_BADFLAGS = 3; + SOCKET_EAI_FAIL = 4; + SOCKET_EAI_FAMILY = 5; + SOCKET_EAI_MEMORY = 6; + SOCKET_EAI_NODATA = 7; + SOCKET_EAI_NONAME = 8; + SOCKET_EAI_SERVICE = 9; + SOCKET_EAI_SOCKTYPE = 10; + SOCKET_EAI_SYSTEM = 11; + SOCKET_EAI_BADHINTS = 12; + SOCKET_EAI_PROTOCOL = 13; + SOCKET_EAI_OVERFLOW = 14; + SOCKET_EAI_MAX = 15; + }; + + repeated bytes packed_address = 2; + optional string canonical_name = 3; + repeated string aliases = 4; +} diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.pb.go b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go new file mode 100644 index 0000000..56cc3f8 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go @@ -0,0 +1,198 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/system/system_service.proto +// DO NOT EDIT! + +/* +Package system is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/system/system_service.proto + +It has these top-level messages: + SystemServiceError + SystemStat + GetSystemStatsRequest + GetSystemStatsResponse + StartBackgroundRequestRequest + StartBackgroundRequestResponse +*/ +package system + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type SystemServiceError_ErrorCode int32 + +const ( + SystemServiceError_OK SystemServiceError_ErrorCode = 0 + SystemServiceError_INTERNAL_ERROR SystemServiceError_ErrorCode = 1 + SystemServiceError_BACKEND_REQUIRED SystemServiceError_ErrorCode = 2 + SystemServiceError_LIMIT_REACHED SystemServiceError_ErrorCode = 3 +) + +var SystemServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INTERNAL_ERROR", + 2: "BACKEND_REQUIRED", + 3: "LIMIT_REACHED", +} +var SystemServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INTERNAL_ERROR": 1, + "BACKEND_REQUIRED": 2, + "LIMIT_REACHED": 3, +} + +func (x SystemServiceError_ErrorCode) Enum() *SystemServiceError_ErrorCode { + p := new(SystemServiceError_ErrorCode) + *p = x + return p +} +func (x SystemServiceError_ErrorCode) String() string { + return proto.EnumName(SystemServiceError_ErrorCode_name, int32(x)) +} +func (x *SystemServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SystemServiceError_ErrorCode_value, data, "SystemServiceError_ErrorCode") + if err != nil { + return err + } + *x = SystemServiceError_ErrorCode(value) + return nil +} + +type SystemServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SystemServiceError) Reset() { *m = SystemServiceError{} } +func (m *SystemServiceError) String() string { return proto.CompactTextString(m) } +func (*SystemServiceError) ProtoMessage() {} + +type SystemStat struct { + // Instaneous value of this stat. + Current *float64 `protobuf:"fixed64,1,opt,name=current" json:"current,omitempty"` + // Average over time, if this stat has an instaneous value. + Average1M *float64 `protobuf:"fixed64,3,opt,name=average1m" json:"average1m,omitempty"` + Average10M *float64 `protobuf:"fixed64,4,opt,name=average10m" json:"average10m,omitempty"` + // Total value, if the stat accumulates over time. + Total *float64 `protobuf:"fixed64,2,opt,name=total" json:"total,omitempty"` + // Rate over time, if this stat accumulates. + Rate1M *float64 `protobuf:"fixed64,5,opt,name=rate1m" json:"rate1m,omitempty"` + Rate10M *float64 `protobuf:"fixed64,6,opt,name=rate10m" json:"rate10m,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SystemStat) Reset() { *m = SystemStat{} } +func (m *SystemStat) String() string { return proto.CompactTextString(m) } +func (*SystemStat) ProtoMessage() {} + +func (m *SystemStat) GetCurrent() float64 { + if m != nil && m.Current != nil { + return *m.Current + } + return 0 +} + +func (m *SystemStat) GetAverage1M() float64 { + if m != nil && m.Average1M != nil { + return *m.Average1M + } + return 0 +} + +func (m *SystemStat) GetAverage10M() float64 { + if m != nil && m.Average10M != nil { + return *m.Average10M + } + return 0 +} + +func (m *SystemStat) GetTotal() float64 { + if m != nil && m.Total != nil { + return *m.Total + } + return 0 +} + +func (m *SystemStat) GetRate1M() float64 { + if m != nil && m.Rate1M != nil { + return *m.Rate1M + } + return 0 +} + +func (m *SystemStat) GetRate10M() float64 { + if m != nil && m.Rate10M != nil { + return *m.Rate10M + } + return 0 +} + +type GetSystemStatsRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSystemStatsRequest) Reset() { *m = GetSystemStatsRequest{} } +func (m *GetSystemStatsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSystemStatsRequest) ProtoMessage() {} + +type GetSystemStatsResponse struct { + // CPU used by this instance, in mcycles. + Cpu *SystemStat `protobuf:"bytes,1,opt,name=cpu" json:"cpu,omitempty"` + // Physical memory (RAM) used by this instance, in megabytes. + Memory *SystemStat `protobuf:"bytes,2,opt,name=memory" json:"memory,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSystemStatsResponse) Reset() { *m = GetSystemStatsResponse{} } +func (m *GetSystemStatsResponse) String() string { return proto.CompactTextString(m) } +func (*GetSystemStatsResponse) ProtoMessage() {} + +func (m *GetSystemStatsResponse) GetCpu() *SystemStat { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *GetSystemStatsResponse) GetMemory() *SystemStat { + if m != nil { + return m.Memory + } + return nil +} + +type StartBackgroundRequestRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartBackgroundRequestRequest) Reset() { *m = StartBackgroundRequestRequest{} } +func (m *StartBackgroundRequestRequest) String() string { return proto.CompactTextString(m) } +func (*StartBackgroundRequestRequest) ProtoMessage() {} + +type StartBackgroundRequestResponse struct { + // Every /_ah/background request will have an X-AppEngine-BackgroundRequest + // header, whose value will be equal to this parameter, the request_id. + RequestId *string `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartBackgroundRequestResponse) Reset() { *m = StartBackgroundRequestResponse{} } +func (m *StartBackgroundRequestResponse) String() string { return proto.CompactTextString(m) } +func (*StartBackgroundRequestResponse) ProtoMessage() {} + +func (m *StartBackgroundRequestResponse) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.proto b/vendor/google.golang.org/appengine/internal/system/system_service.proto new file mode 100644 index 0000000..32c0bf8 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/system/system_service.proto @@ -0,0 +1,49 @@ +syntax = "proto2"; +option go_package = "system"; + +package appengine; + +message SystemServiceError { + enum ErrorCode { + OK = 0; + INTERNAL_ERROR = 1; + BACKEND_REQUIRED = 2; + LIMIT_REACHED = 3; + } +} + +message SystemStat { + // Instaneous value of this stat. + optional double current = 1; + + // Average over time, if this stat has an instaneous value. + optional double average1m = 3; + optional double average10m = 4; + + // Total value, if the stat accumulates over time. + optional double total = 2; + + // Rate over time, if this stat accumulates. + optional double rate1m = 5; + optional double rate10m = 6; +} + +message GetSystemStatsRequest { +} + +message GetSystemStatsResponse { + // CPU used by this instance, in mcycles. + optional SystemStat cpu = 1; + + // Physical memory (RAM) used by this instance, in megabytes. + optional SystemStat memory = 2; +} + +message StartBackgroundRequestRequest { +} + +message StartBackgroundRequestResponse { + // Every /_ah/background request will have an X-AppEngine-BackgroundRequest + // header, whose value will be equal to this parameter, the request_id. + optional string request_id = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go new file mode 100644 index 0000000..c3d428e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go @@ -0,0 +1,1888 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto +// DO NOT EDIT! + +/* +Package taskqueue is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto + +It has these top-level messages: + TaskQueueServiceError + TaskPayload + TaskQueueRetryParameters + TaskQueueAcl + TaskQueueHttpHeader + TaskQueueMode + TaskQueueAddRequest + TaskQueueAddResponse + TaskQueueBulkAddRequest + TaskQueueBulkAddResponse + TaskQueueDeleteRequest + TaskQueueDeleteResponse + TaskQueueForceRunRequest + TaskQueueForceRunResponse + TaskQueueUpdateQueueRequest + TaskQueueUpdateQueueResponse + TaskQueueFetchQueuesRequest + TaskQueueFetchQueuesResponse + TaskQueueFetchQueueStatsRequest + TaskQueueScannerQueueInfo + TaskQueueFetchQueueStatsResponse + TaskQueuePauseQueueRequest + TaskQueuePauseQueueResponse + TaskQueuePurgeQueueRequest + TaskQueuePurgeQueueResponse + TaskQueueDeleteQueueRequest + TaskQueueDeleteQueueResponse + TaskQueueDeleteGroupRequest + TaskQueueDeleteGroupResponse + TaskQueueQueryTasksRequest + TaskQueueQueryTasksResponse + TaskQueueFetchTaskRequest + TaskQueueFetchTaskResponse + TaskQueueUpdateStorageLimitRequest + TaskQueueUpdateStorageLimitResponse + TaskQueueQueryAndOwnTasksRequest + TaskQueueQueryAndOwnTasksResponse + TaskQueueModifyTaskLeaseRequest + TaskQueueModifyTaskLeaseResponse +*/ +package taskqueue + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import appengine "google.golang.org/appengine/internal/datastore" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type TaskQueueServiceError_ErrorCode int32 + +const ( + TaskQueueServiceError_OK TaskQueueServiceError_ErrorCode = 0 + TaskQueueServiceError_UNKNOWN_QUEUE TaskQueueServiceError_ErrorCode = 1 + TaskQueueServiceError_TRANSIENT_ERROR TaskQueueServiceError_ErrorCode = 2 + TaskQueueServiceError_INTERNAL_ERROR TaskQueueServiceError_ErrorCode = 3 + TaskQueueServiceError_TASK_TOO_LARGE TaskQueueServiceError_ErrorCode = 4 + TaskQueueServiceError_INVALID_TASK_NAME TaskQueueServiceError_ErrorCode = 5 + TaskQueueServiceError_INVALID_QUEUE_NAME TaskQueueServiceError_ErrorCode = 6 + TaskQueueServiceError_INVALID_URL TaskQueueServiceError_ErrorCode = 7 + TaskQueueServiceError_INVALID_QUEUE_RATE TaskQueueServiceError_ErrorCode = 8 + TaskQueueServiceError_PERMISSION_DENIED TaskQueueServiceError_ErrorCode = 9 + TaskQueueServiceError_TASK_ALREADY_EXISTS TaskQueueServiceError_ErrorCode = 10 + TaskQueueServiceError_TOMBSTONED_TASK TaskQueueServiceError_ErrorCode = 11 + TaskQueueServiceError_INVALID_ETA TaskQueueServiceError_ErrorCode = 12 + TaskQueueServiceError_INVALID_REQUEST TaskQueueServiceError_ErrorCode = 13 + TaskQueueServiceError_UNKNOWN_TASK TaskQueueServiceError_ErrorCode = 14 + TaskQueueServiceError_TOMBSTONED_QUEUE TaskQueueServiceError_ErrorCode = 15 + TaskQueueServiceError_DUPLICATE_TASK_NAME TaskQueueServiceError_ErrorCode = 16 + TaskQueueServiceError_SKIPPED TaskQueueServiceError_ErrorCode = 17 + TaskQueueServiceError_TOO_MANY_TASKS TaskQueueServiceError_ErrorCode = 18 + TaskQueueServiceError_INVALID_PAYLOAD TaskQueueServiceError_ErrorCode = 19 + TaskQueueServiceError_INVALID_RETRY_PARAMETERS TaskQueueServiceError_ErrorCode = 20 + TaskQueueServiceError_INVALID_QUEUE_MODE TaskQueueServiceError_ErrorCode = 21 + TaskQueueServiceError_ACL_LOOKUP_ERROR TaskQueueServiceError_ErrorCode = 22 + TaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23 + TaskQueueServiceError_INCORRECT_CREATOR_NAME TaskQueueServiceError_ErrorCode = 24 + TaskQueueServiceError_TASK_LEASE_EXPIRED TaskQueueServiceError_ErrorCode = 25 + TaskQueueServiceError_QUEUE_PAUSED TaskQueueServiceError_ErrorCode = 26 + TaskQueueServiceError_INVALID_TAG TaskQueueServiceError_ErrorCode = 27 + // Reserved range for the Datastore error codes. + // Original Datastore error code is shifted by DATASTORE_ERROR offset. + TaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000 +) + +var TaskQueueServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "UNKNOWN_QUEUE", + 2: "TRANSIENT_ERROR", + 3: "INTERNAL_ERROR", + 4: "TASK_TOO_LARGE", + 5: "INVALID_TASK_NAME", + 6: "INVALID_QUEUE_NAME", + 7: "INVALID_URL", + 8: "INVALID_QUEUE_RATE", + 9: "PERMISSION_DENIED", + 10: "TASK_ALREADY_EXISTS", + 11: "TOMBSTONED_TASK", + 12: "INVALID_ETA", + 13: "INVALID_REQUEST", + 14: "UNKNOWN_TASK", + 15: "TOMBSTONED_QUEUE", + 16: "DUPLICATE_TASK_NAME", + 17: "SKIPPED", + 18: "TOO_MANY_TASKS", + 19: "INVALID_PAYLOAD", + 20: "INVALID_RETRY_PARAMETERS", + 21: "INVALID_QUEUE_MODE", + 22: "ACL_LOOKUP_ERROR", + 23: "TRANSACTIONAL_REQUEST_TOO_LARGE", + 24: "INCORRECT_CREATOR_NAME", + 25: "TASK_LEASE_EXPIRED", + 26: "QUEUE_PAUSED", + 27: "INVALID_TAG", + 10000: "DATASTORE_ERROR", +} +var TaskQueueServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "UNKNOWN_QUEUE": 1, + "TRANSIENT_ERROR": 2, + "INTERNAL_ERROR": 3, + "TASK_TOO_LARGE": 4, + "INVALID_TASK_NAME": 5, + "INVALID_QUEUE_NAME": 6, + "INVALID_URL": 7, + "INVALID_QUEUE_RATE": 8, + "PERMISSION_DENIED": 9, + "TASK_ALREADY_EXISTS": 10, + "TOMBSTONED_TASK": 11, + "INVALID_ETA": 12, + "INVALID_REQUEST": 13, + "UNKNOWN_TASK": 14, + "TOMBSTONED_QUEUE": 15, + "DUPLICATE_TASK_NAME": 16, + "SKIPPED": 17, + "TOO_MANY_TASKS": 18, + "INVALID_PAYLOAD": 19, + "INVALID_RETRY_PARAMETERS": 20, + "INVALID_QUEUE_MODE": 21, + "ACL_LOOKUP_ERROR": 22, + "TRANSACTIONAL_REQUEST_TOO_LARGE": 23, + "INCORRECT_CREATOR_NAME": 24, + "TASK_LEASE_EXPIRED": 25, + "QUEUE_PAUSED": 26, + "INVALID_TAG": 27, + "DATASTORE_ERROR": 10000, +} + +func (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode { + p := new(TaskQueueServiceError_ErrorCode) + *p = x + return p +} +func (x TaskQueueServiceError_ErrorCode) String() string { + return proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x)) +} +func (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, "TaskQueueServiceError_ErrorCode") + if err != nil { + return err + } + *x = TaskQueueServiceError_ErrorCode(value) + return nil +} + +type TaskQueueMode_Mode int32 + +const ( + TaskQueueMode_PUSH TaskQueueMode_Mode = 0 + TaskQueueMode_PULL TaskQueueMode_Mode = 1 +) + +var TaskQueueMode_Mode_name = map[int32]string{ + 0: "PUSH", + 1: "PULL", +} +var TaskQueueMode_Mode_value = map[string]int32{ + "PUSH": 0, + "PULL": 1, +} + +func (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode { + p := new(TaskQueueMode_Mode) + *p = x + return p +} +func (x TaskQueueMode_Mode) String() string { + return proto.EnumName(TaskQueueMode_Mode_name, int32(x)) +} +func (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, "TaskQueueMode_Mode") + if err != nil { + return err + } + *x = TaskQueueMode_Mode(value) + return nil +} + +type TaskQueueAddRequest_RequestMethod int32 + +const ( + TaskQueueAddRequest_GET TaskQueueAddRequest_RequestMethod = 1 + TaskQueueAddRequest_POST TaskQueueAddRequest_RequestMethod = 2 + TaskQueueAddRequest_HEAD TaskQueueAddRequest_RequestMethod = 3 + TaskQueueAddRequest_PUT TaskQueueAddRequest_RequestMethod = 4 + TaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5 +) + +var TaskQueueAddRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", +} +var TaskQueueAddRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, +} + +func (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod { + p := new(TaskQueueAddRequest_RequestMethod) + *p = x + return p +} +func (x TaskQueueAddRequest_RequestMethod) String() string { + return proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x)) +} +func (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, "TaskQueueAddRequest_RequestMethod") + if err != nil { + return err + } + *x = TaskQueueAddRequest_RequestMethod(value) + return nil +} + +type TaskQueueQueryTasksResponse_Task_RequestMethod int32 + +const ( + TaskQueueQueryTasksResponse_Task_GET TaskQueueQueryTasksResponse_Task_RequestMethod = 1 + TaskQueueQueryTasksResponse_Task_POST TaskQueueQueryTasksResponse_Task_RequestMethod = 2 + TaskQueueQueryTasksResponse_Task_HEAD TaskQueueQueryTasksResponse_Task_RequestMethod = 3 + TaskQueueQueryTasksResponse_Task_PUT TaskQueueQueryTasksResponse_Task_RequestMethod = 4 + TaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5 +) + +var TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", +} +var TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, +} + +func (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod { + p := new(TaskQueueQueryTasksResponse_Task_RequestMethod) + *p = x + return p +} +func (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string { + return proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x)) +} +func (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, "TaskQueueQueryTasksResponse_Task_RequestMethod") + if err != nil { + return err + } + *x = TaskQueueQueryTasksResponse_Task_RequestMethod(value) + return nil +} + +type TaskQueueServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueServiceError) Reset() { *m = TaskQueueServiceError{} } +func (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) } +func (*TaskQueueServiceError) ProtoMessage() {} + +type TaskPayload struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskPayload) Reset() { *m = TaskPayload{} } +func (m *TaskPayload) String() string { return proto.CompactTextString(m) } +func (*TaskPayload) ProtoMessage() {} + +func (m *TaskPayload) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *TaskPayload) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *TaskPayload) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *TaskPayload) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*TaskPayload)(nil) +var _ proto.Unmarshaler = (*TaskPayload)(nil) + +var extRange_TaskPayload = []proto.ExtensionRange{ + {10, 2147483646}, +} + +func (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_TaskPayload +} +func (m *TaskPayload) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type TaskQueueRetryParameters struct { + RetryLimit *int32 `protobuf:"varint,1,opt,name=retry_limit" json:"retry_limit,omitempty"` + AgeLimitSec *int64 `protobuf:"varint,2,opt,name=age_limit_sec" json:"age_limit_sec,omitempty"` + MinBackoffSec *float64 `protobuf:"fixed64,3,opt,name=min_backoff_sec,def=0.1" json:"min_backoff_sec,omitempty"` + MaxBackoffSec *float64 `protobuf:"fixed64,4,opt,name=max_backoff_sec,def=3600" json:"max_backoff_sec,omitempty"` + MaxDoublings *int32 `protobuf:"varint,5,opt,name=max_doublings,def=16" json:"max_doublings,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueRetryParameters) Reset() { *m = TaskQueueRetryParameters{} } +func (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) } +func (*TaskQueueRetryParameters) ProtoMessage() {} + +const Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1 +const Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600 +const Default_TaskQueueRetryParameters_MaxDoublings int32 = 16 + +func (m *TaskQueueRetryParameters) GetRetryLimit() int32 { + if m != nil && m.RetryLimit != nil { + return *m.RetryLimit + } + return 0 +} + +func (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 { + if m != nil && m.AgeLimitSec != nil { + return *m.AgeLimitSec + } + return 0 +} + +func (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 { + if m != nil && m.MinBackoffSec != nil { + return *m.MinBackoffSec + } + return Default_TaskQueueRetryParameters_MinBackoffSec +} + +func (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 { + if m != nil && m.MaxBackoffSec != nil { + return *m.MaxBackoffSec + } + return Default_TaskQueueRetryParameters_MaxBackoffSec +} + +func (m *TaskQueueRetryParameters) GetMaxDoublings() int32 { + if m != nil && m.MaxDoublings != nil { + return *m.MaxDoublings + } + return Default_TaskQueueRetryParameters_MaxDoublings +} + +type TaskQueueAcl struct { + UserEmail [][]byte `protobuf:"bytes,1,rep,name=user_email" json:"user_email,omitempty"` + WriterEmail [][]byte `protobuf:"bytes,2,rep,name=writer_email" json:"writer_email,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAcl) Reset() { *m = TaskQueueAcl{} } +func (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAcl) ProtoMessage() {} + +func (m *TaskQueueAcl) GetUserEmail() [][]byte { + if m != nil { + return m.UserEmail + } + return nil +} + +func (m *TaskQueueAcl) GetWriterEmail() [][]byte { + if m != nil { + return m.WriterEmail + } + return nil +} + +type TaskQueueHttpHeader struct { + Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueHttpHeader) Reset() { *m = TaskQueueHttpHeader{} } +func (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) } +func (*TaskQueueHttpHeader) ProtoMessage() {} + +func (m *TaskQueueHttpHeader) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *TaskQueueHttpHeader) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type TaskQueueMode struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueMode) Reset() { *m = TaskQueueMode{} } +func (m *TaskQueueMode) String() string { return proto.CompactTextString(m) } +func (*TaskQueueMode) ProtoMessage() {} + +type TaskQueueAddRequest struct { + QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` + TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` + EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` + Method *TaskQueueAddRequest_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2" json:"method,omitempty"` + Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"` + Header []*TaskQueueAddRequest_Header `protobuf:"group,6,rep,name=Header" json:"header,omitempty"` + Body []byte `protobuf:"bytes,9,opt,name=body" json:"body,omitempty"` + Transaction *appengine.Transaction `protobuf:"bytes,10,opt,name=transaction" json:"transaction,omitempty"` + AppId []byte `protobuf:"bytes,11,opt,name=app_id" json:"app_id,omitempty"` + Crontimetable *TaskQueueAddRequest_CronTimetable `protobuf:"group,12,opt,name=CronTimetable" json:"crontimetable,omitempty"` + Description []byte `protobuf:"bytes,15,opt,name=description" json:"description,omitempty"` + Payload *TaskPayload `protobuf:"bytes,16,opt,name=payload" json:"payload,omitempty"` + RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,17,opt,name=retry_parameters" json:"retry_parameters,omitempty"` + Mode *TaskQueueMode_Mode `protobuf:"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` + Tag []byte `protobuf:"bytes,19,opt,name=tag" json:"tag,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAddRequest) Reset() { *m = TaskQueueAddRequest{} } +func (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAddRequest) ProtoMessage() {} + +const Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST +const Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH + +func (m *TaskQueueAddRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueAddRequest) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueAddRequest) GetEtaUsec() int64 { + if m != nil && m.EtaUsec != nil { + return *m.EtaUsec + } + return 0 +} + +func (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return Default_TaskQueueAddRequest_Method +} + +func (m *TaskQueueAddRequest) GetUrl() []byte { + if m != nil { + return m.Url + } + return nil +} + +func (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *TaskQueueAddRequest) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *TaskQueueAddRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable { + if m != nil { + return m.Crontimetable + } + return nil +} + +func (m *TaskQueueAddRequest) GetDescription() []byte { + if m != nil { + return m.Description + } + return nil +} + +func (m *TaskQueueAddRequest) GetPayload() *TaskPayload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters { + if m != nil { + return m.RetryParameters + } + return nil +} + +func (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_TaskQueueAddRequest_Mode +} + +func (m *TaskQueueAddRequest) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +type TaskQueueAddRequest_Header struct { + Key []byte `protobuf:"bytes,7,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,8,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAddRequest_Header) Reset() { *m = TaskQueueAddRequest_Header{} } +func (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAddRequest_Header) ProtoMessage() {} + +func (m *TaskQueueAddRequest_Header) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *TaskQueueAddRequest_Header) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type TaskQueueAddRequest_CronTimetable struct { + Schedule []byte `protobuf:"bytes,13,req,name=schedule" json:"schedule,omitempty"` + Timezone []byte `protobuf:"bytes,14,req,name=timezone" json:"timezone,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAddRequest_CronTimetable) Reset() { *m = TaskQueueAddRequest_CronTimetable{} } +func (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAddRequest_CronTimetable) ProtoMessage() {} + +func (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte { + if m != nil { + return m.Schedule + } + return nil +} + +func (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte { + if m != nil { + return m.Timezone + } + return nil +} + +type TaskQueueAddResponse struct { + ChosenTaskName []byte `protobuf:"bytes,1,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAddResponse) Reset() { *m = TaskQueueAddResponse{} } +func (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAddResponse) ProtoMessage() {} + +func (m *TaskQueueAddResponse) GetChosenTaskName() []byte { + if m != nil { + return m.ChosenTaskName + } + return nil +} + +type TaskQueueBulkAddRequest struct { + AddRequest []*TaskQueueAddRequest `protobuf:"bytes,1,rep,name=add_request" json:"add_request,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueBulkAddRequest) Reset() { *m = TaskQueueBulkAddRequest{} } +func (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueBulkAddRequest) ProtoMessage() {} + +func (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest { + if m != nil { + return m.AddRequest + } + return nil +} + +type TaskQueueBulkAddResponse struct { + Taskresult []*TaskQueueBulkAddResponse_TaskResult `protobuf:"group,1,rep,name=TaskResult" json:"taskresult,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueBulkAddResponse) Reset() { *m = TaskQueueBulkAddResponse{} } +func (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueBulkAddResponse) ProtoMessage() {} + +func (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult { + if m != nil { + return m.Taskresult + } + return nil +} + +type TaskQueueBulkAddResponse_TaskResult struct { + Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` + ChosenTaskName []byte `protobuf:"bytes,3,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueBulkAddResponse_TaskResult) Reset() { *m = TaskQueueBulkAddResponse_TaskResult{} } +func (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) } +func (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage() {} + +func (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode { + if m != nil && m.Result != nil { + return *m.Result + } + return TaskQueueServiceError_OK +} + +func (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte { + if m != nil { + return m.ChosenTaskName + } + return nil +} + +type TaskQueueDeleteRequest struct { + QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` + TaskName [][]byte `protobuf:"bytes,2,rep,name=task_name" json:"task_name,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteRequest) Reset() { *m = TaskQueueDeleteRequest{} } +func (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteRequest) ProtoMessage() {} + +func (m *TaskQueueDeleteRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueDeleteRequest) GetTaskName() [][]byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueDeleteRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type TaskQueueDeleteResponse struct { + Result []TaskQueueServiceError_ErrorCode `protobuf:"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteResponse) Reset() { *m = TaskQueueDeleteResponse{} } +func (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteResponse) ProtoMessage() {} + +func (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode { + if m != nil { + return m.Result + } + return nil +} + +type TaskQueueForceRunRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueForceRunRequest) Reset() { *m = TaskQueueForceRunRequest{} } +func (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueForceRunRequest) ProtoMessage() {} + +func (m *TaskQueueForceRunRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueForceRunRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueForceRunRequest) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +type TaskQueueForceRunResponse struct { + Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueForceRunResponse) Reset() { *m = TaskQueueForceRunResponse{} } +func (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueForceRunResponse) ProtoMessage() {} + +func (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode { + if m != nil && m.Result != nil { + return *m.Result + } + return TaskQueueServiceError_OK +} + +type TaskQueueUpdateQueueRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"` + BucketCapacity *int32 `protobuf:"varint,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"` + UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"` + RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,6,opt,name=retry_parameters" json:"retry_parameters,omitempty"` + MaxConcurrentRequests *int32 `protobuf:"varint,7,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"` + Mode *TaskQueueMode_Mode `protobuf:"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` + Acl *TaskQueueAcl `protobuf:"bytes,9,opt,name=acl" json:"acl,omitempty"` + HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,10,rep,name=header_override" json:"header_override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueUpdateQueueRequest) Reset() { *m = TaskQueueUpdateQueueRequest{} } +func (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueUpdateQueueRequest) ProtoMessage() {} + +const Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH + +func (m *TaskQueueUpdateQueueRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 { + if m != nil && m.BucketRefillPerSecond != nil { + return *m.BucketRefillPerSecond + } + return 0 +} + +func (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 { + if m != nil && m.BucketCapacity != nil { + return *m.BucketCapacity + } + return 0 +} + +func (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string { + if m != nil && m.UserSpecifiedRate != nil { + return *m.UserSpecifiedRate + } + return "" +} + +func (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters { + if m != nil { + return m.RetryParameters + } + return nil +} + +func (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 { + if m != nil && m.MaxConcurrentRequests != nil { + return *m.MaxConcurrentRequests + } + return 0 +} + +func (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_TaskQueueUpdateQueueRequest_Mode +} + +func (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl { + if m != nil { + return m.Acl + } + return nil +} + +func (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader { + if m != nil { + return m.HeaderOverride + } + return nil +} + +type TaskQueueUpdateQueueResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueUpdateQueueResponse) Reset() { *m = TaskQueueUpdateQueueResponse{} } +func (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueUpdateQueueResponse) ProtoMessage() {} + +type TaskQueueFetchQueuesRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + MaxRows *int32 `protobuf:"varint,2,req,name=max_rows" json:"max_rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueuesRequest) Reset() { *m = TaskQueueFetchQueuesRequest{} } +func (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueuesRequest) ProtoMessage() {} + +func (m *TaskQueueFetchQueuesRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 { + if m != nil && m.MaxRows != nil { + return *m.MaxRows + } + return 0 +} + +type TaskQueueFetchQueuesResponse struct { + Queue []*TaskQueueFetchQueuesResponse_Queue `protobuf:"group,1,rep,name=Queue" json:"queue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueuesResponse) Reset() { *m = TaskQueueFetchQueuesResponse{} } +func (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueuesResponse) ProtoMessage() {} + +func (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue { + if m != nil { + return m.Queue + } + return nil +} + +type TaskQueueFetchQueuesResponse_Queue struct { + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"` + BucketCapacity *float64 `protobuf:"fixed64,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"` + UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"` + Paused *bool `protobuf:"varint,6,req,name=paused,def=0" json:"paused,omitempty"` + RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,7,opt,name=retry_parameters" json:"retry_parameters,omitempty"` + MaxConcurrentRequests *int32 `protobuf:"varint,8,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"` + Mode *TaskQueueMode_Mode `protobuf:"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` + Acl *TaskQueueAcl `protobuf:"bytes,10,opt,name=acl" json:"acl,omitempty"` + HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,11,rep,name=header_override" json:"header_override,omitempty"` + CreatorName *string `protobuf:"bytes,12,opt,name=creator_name,def=apphosting" json:"creator_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueuesResponse_Queue) Reset() { *m = TaskQueueFetchQueuesResponse_Queue{} } +func (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage() {} + +const Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false +const Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH +const Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = "apphosting" + +func (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 { + if m != nil && m.BucketRefillPerSecond != nil { + return *m.BucketRefillPerSecond + } + return 0 +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 { + if m != nil && m.BucketCapacity != nil { + return *m.BucketCapacity + } + return 0 +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string { + if m != nil && m.UserSpecifiedRate != nil { + return *m.UserSpecifiedRate + } + return "" +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool { + if m != nil && m.Paused != nil { + return *m.Paused + } + return Default_TaskQueueFetchQueuesResponse_Queue_Paused +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters { + if m != nil { + return m.RetryParameters + } + return nil +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 { + if m != nil && m.MaxConcurrentRequests != nil { + return *m.MaxConcurrentRequests + } + return 0 +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_TaskQueueFetchQueuesResponse_Queue_Mode +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl { + if m != nil { + return m.Acl + } + return nil +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader { + if m != nil { + return m.HeaderOverride + } + return nil +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string { + if m != nil && m.CreatorName != nil { + return *m.CreatorName + } + return Default_TaskQueueFetchQueuesResponse_Queue_CreatorName +} + +type TaskQueueFetchQueueStatsRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName [][]byte `protobuf:"bytes,2,rep,name=queue_name" json:"queue_name,omitempty"` + MaxNumTasks *int32 `protobuf:"varint,3,opt,name=max_num_tasks,def=0" json:"max_num_tasks,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueueStatsRequest) Reset() { *m = TaskQueueFetchQueueStatsRequest{} } +func (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueueStatsRequest) ProtoMessage() {} + +const Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0 + +func (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 { + if m != nil && m.MaxNumTasks != nil { + return *m.MaxNumTasks + } + return Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks +} + +type TaskQueueScannerQueueInfo struct { + ExecutedLastMinute *int64 `protobuf:"varint,1,req,name=executed_last_minute" json:"executed_last_minute,omitempty"` + ExecutedLastHour *int64 `protobuf:"varint,2,req,name=executed_last_hour" json:"executed_last_hour,omitempty"` + SamplingDurationSeconds *float64 `protobuf:"fixed64,3,req,name=sampling_duration_seconds" json:"sampling_duration_seconds,omitempty"` + RequestsInFlight *int32 `protobuf:"varint,4,opt,name=requests_in_flight" json:"requests_in_flight,omitempty"` + EnforcedRate *float64 `protobuf:"fixed64,5,opt,name=enforced_rate" json:"enforced_rate,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueScannerQueueInfo) Reset() { *m = TaskQueueScannerQueueInfo{} } +func (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) } +func (*TaskQueueScannerQueueInfo) ProtoMessage() {} + +func (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 { + if m != nil && m.ExecutedLastMinute != nil { + return *m.ExecutedLastMinute + } + return 0 +} + +func (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 { + if m != nil && m.ExecutedLastHour != nil { + return *m.ExecutedLastHour + } + return 0 +} + +func (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 { + if m != nil && m.SamplingDurationSeconds != nil { + return *m.SamplingDurationSeconds + } + return 0 +} + +func (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 { + if m != nil && m.RequestsInFlight != nil { + return *m.RequestsInFlight + } + return 0 +} + +func (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 { + if m != nil && m.EnforcedRate != nil { + return *m.EnforcedRate + } + return 0 +} + +type TaskQueueFetchQueueStatsResponse struct { + Queuestats []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:"group,1,rep,name=QueueStats" json:"queuestats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueueStatsResponse) Reset() { *m = TaskQueueFetchQueueStatsResponse{} } +func (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueueStatsResponse) ProtoMessage() {} + +func (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats { + if m != nil { + return m.Queuestats + } + return nil +} + +type TaskQueueFetchQueueStatsResponse_QueueStats struct { + NumTasks *int32 `protobuf:"varint,2,req,name=num_tasks" json:"num_tasks,omitempty"` + OldestEtaUsec *int64 `protobuf:"varint,3,req,name=oldest_eta_usec" json:"oldest_eta_usec,omitempty"` + ScannerInfo *TaskQueueScannerQueueInfo `protobuf:"bytes,4,opt,name=scanner_info" json:"scanner_info,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() { + *m = TaskQueueFetchQueueStatsResponse_QueueStats{} +} +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string { + return proto.CompactTextString(m) +} +func (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {} + +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 { + if m != nil && m.NumTasks != nil { + return *m.NumTasks + } + return 0 +} + +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 { + if m != nil && m.OldestEtaUsec != nil { + return *m.OldestEtaUsec + } + return 0 +} + +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo { + if m != nil { + return m.ScannerInfo + } + return nil +} + +type TaskQueuePauseQueueRequest struct { + AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + Pause *bool `protobuf:"varint,3,req,name=pause" json:"pause,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueuePauseQueueRequest) Reset() { *m = TaskQueuePauseQueueRequest{} } +func (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueuePauseQueueRequest) ProtoMessage() {} + +func (m *TaskQueuePauseQueueRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueuePauseQueueRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueuePauseQueueRequest) GetPause() bool { + if m != nil && m.Pause != nil { + return *m.Pause + } + return false +} + +type TaskQueuePauseQueueResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueuePauseQueueResponse) Reset() { *m = TaskQueuePauseQueueResponse{} } +func (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueuePauseQueueResponse) ProtoMessage() {} + +type TaskQueuePurgeQueueRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueuePurgeQueueRequest) Reset() { *m = TaskQueuePurgeQueueRequest{} } +func (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueuePurgeQueueRequest) ProtoMessage() {} + +func (m *TaskQueuePurgeQueueRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +type TaskQueuePurgeQueueResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueuePurgeQueueResponse) Reset() { *m = TaskQueuePurgeQueueResponse{} } +func (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueuePurgeQueueResponse) ProtoMessage() {} + +type TaskQueueDeleteQueueRequest struct { + AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteQueueRequest) Reset() { *m = TaskQueueDeleteQueueRequest{} } +func (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteQueueRequest) ProtoMessage() {} + +func (m *TaskQueueDeleteQueueRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +type TaskQueueDeleteQueueResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteQueueResponse) Reset() { *m = TaskQueueDeleteQueueResponse{} } +func (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteQueueResponse) ProtoMessage() {} + +type TaskQueueDeleteGroupRequest struct { + AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteGroupRequest) Reset() { *m = TaskQueueDeleteGroupRequest{} } +func (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteGroupRequest) ProtoMessage() {} + +func (m *TaskQueueDeleteGroupRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type TaskQueueDeleteGroupResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteGroupResponse) Reset() { *m = TaskQueueDeleteGroupResponse{} } +func (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteGroupResponse) ProtoMessage() {} + +type TaskQueueQueryTasksRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + StartTaskName []byte `protobuf:"bytes,3,opt,name=start_task_name" json:"start_task_name,omitempty"` + StartEtaUsec *int64 `protobuf:"varint,4,opt,name=start_eta_usec" json:"start_eta_usec,omitempty"` + StartTag []byte `protobuf:"bytes,6,opt,name=start_tag" json:"start_tag,omitempty"` + MaxRows *int32 `protobuf:"varint,5,opt,name=max_rows,def=1" json:"max_rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksRequest) Reset() { *m = TaskQueueQueryTasksRequest{} } +func (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksRequest) ProtoMessage() {} + +const Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1 + +func (m *TaskQueueQueryTasksRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueQueryTasksRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte { + if m != nil { + return m.StartTaskName + } + return nil +} + +func (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 { + if m != nil && m.StartEtaUsec != nil { + return *m.StartEtaUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksRequest) GetStartTag() []byte { + if m != nil { + return m.StartTag + } + return nil +} + +func (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 { + if m != nil && m.MaxRows != nil { + return *m.MaxRows + } + return Default_TaskQueueQueryTasksRequest_MaxRows +} + +type TaskQueueQueryTasksResponse struct { + Task []*TaskQueueQueryTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse) Reset() { *m = TaskQueueQueryTasksResponse{} } +func (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksResponse) ProtoMessage() {} + +func (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task { + if m != nil { + return m.Task + } + return nil +} + +type TaskQueueQueryTasksResponse_Task struct { + TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` + EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` + Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"` + Method *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod" json:"method,omitempty"` + RetryCount *int32 `protobuf:"varint,6,opt,name=retry_count,def=0" json:"retry_count,omitempty"` + Header []*TaskQueueQueryTasksResponse_Task_Header `protobuf:"group,7,rep,name=Header" json:"header,omitempty"` + BodySize *int32 `protobuf:"varint,10,opt,name=body_size" json:"body_size,omitempty"` + Body []byte `protobuf:"bytes,11,opt,name=body" json:"body,omitempty"` + CreationTimeUsec *int64 `protobuf:"varint,12,req,name=creation_time_usec" json:"creation_time_usec,omitempty"` + Crontimetable *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:"group,13,opt,name=CronTimetable" json:"crontimetable,omitempty"` + Runlog *TaskQueueQueryTasksResponse_Task_RunLog `protobuf:"group,16,opt,name=RunLog" json:"runlog,omitempty"` + Description []byte `protobuf:"bytes,21,opt,name=description" json:"description,omitempty"` + Payload *TaskPayload `protobuf:"bytes,22,opt,name=payload" json:"payload,omitempty"` + RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,23,opt,name=retry_parameters" json:"retry_parameters,omitempty"` + FirstTryUsec *int64 `protobuf:"varint,24,opt,name=first_try_usec" json:"first_try_usec,omitempty"` + Tag []byte `protobuf:"bytes,25,opt,name=tag" json:"tag,omitempty"` + ExecutionCount *int32 `protobuf:"varint,26,opt,name=execution_count,def=0" json:"execution_count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse_Task) Reset() { *m = TaskQueueQueryTasksResponse_Task{} } +func (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksResponse_Task) ProtoMessage() {} + +const Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0 +const Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0 + +func (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 { + if m != nil && m.EtaUsec != nil { + return *m.EtaUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte { + if m != nil { + return m.Url + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return TaskQueueQueryTasksResponse_Task_GET +} + +func (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 { + if m != nil && m.RetryCount != nil { + return *m.RetryCount + } + return Default_TaskQueueQueryTasksResponse_Task_RetryCount +} + +func (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 { + if m != nil && m.BodySize != nil { + return *m.BodySize + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 { + if m != nil && m.CreationTimeUsec != nil { + return *m.CreationTimeUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable { + if m != nil { + return m.Crontimetable + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog { + if m != nil { + return m.Runlog + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte { + if m != nil { + return m.Description + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters { + if m != nil { + return m.RetryParameters + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 { + if m != nil && m.FirstTryUsec != nil { + return *m.FirstTryUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 { + if m != nil && m.ExecutionCount != nil { + return *m.ExecutionCount + } + return Default_TaskQueueQueryTasksResponse_Task_ExecutionCount +} + +type TaskQueueQueryTasksResponse_Task_Header struct { + Key []byte `protobuf:"bytes,8,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,9,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse_Task_Header) Reset() { + *m = TaskQueueQueryTasksResponse_Task_Header{} +} +func (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage() {} + +func (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type TaskQueueQueryTasksResponse_Task_CronTimetable struct { + Schedule []byte `protobuf:"bytes,14,req,name=schedule" json:"schedule,omitempty"` + Timezone []byte `protobuf:"bytes,15,req,name=timezone" json:"timezone,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() { + *m = TaskQueueQueryTasksResponse_Task_CronTimetable{} +} +func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string { + return proto.CompactTextString(m) +} +func (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {} + +func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte { + if m != nil { + return m.Schedule + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte { + if m != nil { + return m.Timezone + } + return nil +} + +type TaskQueueQueryTasksResponse_Task_RunLog struct { + DispatchedUsec *int64 `protobuf:"varint,17,req,name=dispatched_usec" json:"dispatched_usec,omitempty"` + LagUsec *int64 `protobuf:"varint,18,req,name=lag_usec" json:"lag_usec,omitempty"` + ElapsedUsec *int64 `protobuf:"varint,19,req,name=elapsed_usec" json:"elapsed_usec,omitempty"` + ResponseCode *int64 `protobuf:"varint,20,opt,name=response_code" json:"response_code,omitempty"` + RetryReason *string `protobuf:"bytes,27,opt,name=retry_reason" json:"retry_reason,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() { + *m = TaskQueueQueryTasksResponse_Task_RunLog{} +} +func (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage() {} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 { + if m != nil && m.DispatchedUsec != nil { + return *m.DispatchedUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 { + if m != nil && m.LagUsec != nil { + return *m.LagUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 { + if m != nil && m.ElapsedUsec != nil { + return *m.ElapsedUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 { + if m != nil && m.ResponseCode != nil { + return *m.ResponseCode + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string { + if m != nil && m.RetryReason != nil { + return *m.RetryReason + } + return "" +} + +type TaskQueueFetchTaskRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchTaskRequest) Reset() { *m = TaskQueueFetchTaskRequest{} } +func (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchTaskRequest) ProtoMessage() {} + +func (m *TaskQueueFetchTaskRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueFetchTaskRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueFetchTaskRequest) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +type TaskQueueFetchTaskResponse struct { + Task *TaskQueueQueryTasksResponse `protobuf:"bytes,1,req,name=task" json:"task,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchTaskResponse) Reset() { *m = TaskQueueFetchTaskResponse{} } +func (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchTaskResponse) ProtoMessage() {} + +func (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse { + if m != nil { + return m.Task + } + return nil +} + +type TaskQueueUpdateStorageLimitRequest struct { + AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + Limit *int64 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueUpdateStorageLimitRequest) Reset() { *m = TaskQueueUpdateStorageLimitRequest{} } +func (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueUpdateStorageLimitRequest) ProtoMessage() {} + +func (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type TaskQueueUpdateStorageLimitResponse struct { + NewLimit *int64 `protobuf:"varint,1,req,name=new_limit" json:"new_limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueUpdateStorageLimitResponse) Reset() { *m = TaskQueueUpdateStorageLimitResponse{} } +func (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueUpdateStorageLimitResponse) ProtoMessage() {} + +func (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 { + if m != nil && m.NewLimit != nil { + return *m.NewLimit + } + return 0 +} + +type TaskQueueQueryAndOwnTasksRequest struct { + QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` + LeaseSeconds *float64 `protobuf:"fixed64,2,req,name=lease_seconds" json:"lease_seconds,omitempty"` + MaxTasks *int64 `protobuf:"varint,3,req,name=max_tasks" json:"max_tasks,omitempty"` + GroupByTag *bool `protobuf:"varint,4,opt,name=group_by_tag,def=0" json:"group_by_tag,omitempty"` + Tag []byte `protobuf:"bytes,5,opt,name=tag" json:"tag,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryAndOwnTasksRequest) Reset() { *m = TaskQueueQueryAndOwnTasksRequest{} } +func (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage() {} + +const Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false + +func (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 { + if m != nil && m.LeaseSeconds != nil { + return *m.LeaseSeconds + } + return 0 +} + +func (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 { + if m != nil && m.MaxTasks != nil { + return *m.MaxTasks + } + return 0 +} + +func (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool { + if m != nil && m.GroupByTag != nil { + return *m.GroupByTag + } + return Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag +} + +func (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +type TaskQueueQueryAndOwnTasksResponse struct { + Task []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryAndOwnTasksResponse) Reset() { *m = TaskQueueQueryAndOwnTasksResponse{} } +func (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage() {} + +func (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task { + if m != nil { + return m.Task + } + return nil +} + +type TaskQueueQueryAndOwnTasksResponse_Task struct { + TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` + EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` + RetryCount *int32 `protobuf:"varint,4,opt,name=retry_count,def=0" json:"retry_count,omitempty"` + Body []byte `protobuf:"bytes,5,opt,name=body" json:"body,omitempty"` + Tag []byte `protobuf:"bytes,6,opt,name=tag" json:"tag,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() { + *m = TaskQueueQueryAndOwnTasksResponse_Task{} +} +func (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage() {} + +const Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0 + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 { + if m != nil && m.EtaUsec != nil { + return *m.EtaUsec + } + return 0 +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 { + if m != nil && m.RetryCount != nil { + return *m.RetryCount + } + return Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +type TaskQueueModifyTaskLeaseRequest struct { + QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` + TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` + EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` + LeaseSeconds *float64 `protobuf:"fixed64,4,req,name=lease_seconds" json:"lease_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueModifyTaskLeaseRequest) Reset() { *m = TaskQueueModifyTaskLeaseRequest{} } +func (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueModifyTaskLeaseRequest) ProtoMessage() {} + +func (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 { + if m != nil && m.EtaUsec != nil { + return *m.EtaUsec + } + return 0 +} + +func (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 { + if m != nil && m.LeaseSeconds != nil { + return *m.LeaseSeconds + } + return 0 +} + +type TaskQueueModifyTaskLeaseResponse struct { + UpdatedEtaUsec *int64 `protobuf:"varint,1,req,name=updated_eta_usec" json:"updated_eta_usec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueModifyTaskLeaseResponse) Reset() { *m = TaskQueueModifyTaskLeaseResponse{} } +func (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueModifyTaskLeaseResponse) ProtoMessage() {} + +func (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 { + if m != nil && m.UpdatedEtaUsec != nil { + return *m.UpdatedEtaUsec + } + return 0 +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto new file mode 100644 index 0000000..419aaf5 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto @@ -0,0 +1,342 @@ +syntax = "proto2"; +option go_package = "taskqueue"; + +import "google.golang.org/appengine/internal/datastore/datastore_v3.proto"; + +package appengine; + +message TaskQueueServiceError { + enum ErrorCode { + OK = 0; + UNKNOWN_QUEUE = 1; + TRANSIENT_ERROR = 2; + INTERNAL_ERROR = 3; + TASK_TOO_LARGE = 4; + INVALID_TASK_NAME = 5; + INVALID_QUEUE_NAME = 6; + INVALID_URL = 7; + INVALID_QUEUE_RATE = 8; + PERMISSION_DENIED = 9; + TASK_ALREADY_EXISTS = 10; + TOMBSTONED_TASK = 11; + INVALID_ETA = 12; + INVALID_REQUEST = 13; + UNKNOWN_TASK = 14; + TOMBSTONED_QUEUE = 15; + DUPLICATE_TASK_NAME = 16; + SKIPPED = 17; + TOO_MANY_TASKS = 18; + INVALID_PAYLOAD = 19; + INVALID_RETRY_PARAMETERS = 20; + INVALID_QUEUE_MODE = 21; + ACL_LOOKUP_ERROR = 22; + TRANSACTIONAL_REQUEST_TOO_LARGE = 23; + INCORRECT_CREATOR_NAME = 24; + TASK_LEASE_EXPIRED = 25; + QUEUE_PAUSED = 26; + INVALID_TAG = 27; + + // Reserved range for the Datastore error codes. + // Original Datastore error code is shifted by DATASTORE_ERROR offset. + DATASTORE_ERROR = 10000; + } +} + +message TaskPayload { + extensions 10 to max; + option message_set_wire_format = true; +} + +message TaskQueueRetryParameters { + optional int32 retry_limit = 1; + optional int64 age_limit_sec = 2; + + optional double min_backoff_sec = 3 [default = 0.1]; + optional double max_backoff_sec = 4 [default = 3600]; + optional int32 max_doublings = 5 [default = 16]; +} + +message TaskQueueAcl { + repeated bytes user_email = 1; + repeated bytes writer_email = 2; +} + +message TaskQueueHttpHeader { + required bytes key = 1; + required bytes value = 2; +} + +message TaskQueueMode { + enum Mode { + PUSH = 0; + PULL = 1; + } +} + +message TaskQueueAddRequest { + required bytes queue_name = 1; + required bytes task_name = 2; + required int64 eta_usec = 3; + + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + } + optional RequestMethod method = 5 [default=POST]; + + optional bytes url = 4; + + repeated group Header = 6 { + required bytes key = 7; + required bytes value = 8; + } + + optional bytes body = 9 [ctype=CORD]; + optional Transaction transaction = 10; + optional bytes app_id = 11; + + optional group CronTimetable = 12 { + required bytes schedule = 13; + required bytes timezone = 14; + } + + optional bytes description = 15; + optional TaskPayload payload = 16; + optional TaskQueueRetryParameters retry_parameters = 17; + optional TaskQueueMode.Mode mode = 18 [default=PUSH]; + optional bytes tag = 19; +} + +message TaskQueueAddResponse { + optional bytes chosen_task_name = 1; +} + +message TaskQueueBulkAddRequest { + repeated TaskQueueAddRequest add_request = 1; +} + +message TaskQueueBulkAddResponse { + repeated group TaskResult = 1 { + required TaskQueueServiceError.ErrorCode result = 2; + optional bytes chosen_task_name = 3; + } +} + +message TaskQueueDeleteRequest { + required bytes queue_name = 1; + repeated bytes task_name = 2; + optional bytes app_id = 3; +} + +message TaskQueueDeleteResponse { + repeated TaskQueueServiceError.ErrorCode result = 3; +} + +message TaskQueueForceRunRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; + required bytes task_name = 3; +} + +message TaskQueueForceRunResponse { + required TaskQueueServiceError.ErrorCode result = 3; +} + +message TaskQueueUpdateQueueRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; + required double bucket_refill_per_second = 3; + required int32 bucket_capacity = 4; + optional string user_specified_rate = 5; + optional TaskQueueRetryParameters retry_parameters = 6; + optional int32 max_concurrent_requests = 7; + optional TaskQueueMode.Mode mode = 8 [default = PUSH]; + optional TaskQueueAcl acl = 9; + repeated TaskQueueHttpHeader header_override = 10; +} + +message TaskQueueUpdateQueueResponse { +} + +message TaskQueueFetchQueuesRequest { + optional bytes app_id = 1; + required int32 max_rows = 2; +} + +message TaskQueueFetchQueuesResponse { + repeated group Queue = 1 { + required bytes queue_name = 2; + required double bucket_refill_per_second = 3; + required double bucket_capacity = 4; + optional string user_specified_rate = 5; + required bool paused = 6 [default=false]; + optional TaskQueueRetryParameters retry_parameters = 7; + optional int32 max_concurrent_requests = 8; + optional TaskQueueMode.Mode mode = 9 [default = PUSH]; + optional TaskQueueAcl acl = 10; + repeated TaskQueueHttpHeader header_override = 11; + optional string creator_name = 12 [ctype=CORD, default="apphosting"]; + } +} + +message TaskQueueFetchQueueStatsRequest { + optional bytes app_id = 1; + repeated bytes queue_name = 2; + optional int32 max_num_tasks = 3 [default = 0]; +} + +message TaskQueueScannerQueueInfo { + required int64 executed_last_minute = 1; + required int64 executed_last_hour = 2; + required double sampling_duration_seconds = 3; + optional int32 requests_in_flight = 4; + optional double enforced_rate = 5; +} + +message TaskQueueFetchQueueStatsResponse { + repeated group QueueStats = 1 { + required int32 num_tasks = 2; + required int64 oldest_eta_usec = 3; + optional TaskQueueScannerQueueInfo scanner_info = 4; + } +} +message TaskQueuePauseQueueRequest { + required bytes app_id = 1; + required bytes queue_name = 2; + required bool pause = 3; +} + +message TaskQueuePauseQueueResponse { +} + +message TaskQueuePurgeQueueRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; +} + +message TaskQueuePurgeQueueResponse { +} + +message TaskQueueDeleteQueueRequest { + required bytes app_id = 1; + required bytes queue_name = 2; +} + +message TaskQueueDeleteQueueResponse { +} + +message TaskQueueDeleteGroupRequest { + required bytes app_id = 1; +} + +message TaskQueueDeleteGroupResponse { +} + +message TaskQueueQueryTasksRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; + + optional bytes start_task_name = 3; + optional int64 start_eta_usec = 4; + optional bytes start_tag = 6; + optional int32 max_rows = 5 [default = 1]; +} + +message TaskQueueQueryTasksResponse { + repeated group Task = 1 { + required bytes task_name = 2; + required int64 eta_usec = 3; + optional bytes url = 4; + + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + } + optional RequestMethod method = 5; + + optional int32 retry_count = 6 [default=0]; + + repeated group Header = 7 { + required bytes key = 8; + required bytes value = 9; + } + + optional int32 body_size = 10; + optional bytes body = 11 [ctype=CORD]; + required int64 creation_time_usec = 12; + + optional group CronTimetable = 13 { + required bytes schedule = 14; + required bytes timezone = 15; + } + + optional group RunLog = 16 { + required int64 dispatched_usec = 17; + required int64 lag_usec = 18; + required int64 elapsed_usec = 19; + optional int64 response_code = 20; + optional string retry_reason = 27; + } + + optional bytes description = 21; + optional TaskPayload payload = 22; + optional TaskQueueRetryParameters retry_parameters = 23; + optional int64 first_try_usec = 24; + optional bytes tag = 25; + optional int32 execution_count = 26 [default=0]; + } +} + +message TaskQueueFetchTaskRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; + required bytes task_name = 3; +} + +message TaskQueueFetchTaskResponse { + required TaskQueueQueryTasksResponse task = 1; +} + +message TaskQueueUpdateStorageLimitRequest { + required bytes app_id = 1; + required int64 limit = 2; +} + +message TaskQueueUpdateStorageLimitResponse { + required int64 new_limit = 1; +} + +message TaskQueueQueryAndOwnTasksRequest { + required bytes queue_name = 1; + required double lease_seconds = 2; + required int64 max_tasks = 3; + optional bool group_by_tag = 4 [default=false]; + optional bytes tag = 5; +} + +message TaskQueueQueryAndOwnTasksResponse { + repeated group Task = 1 { + required bytes task_name = 2; + required int64 eta_usec = 3; + optional int32 retry_count = 4 [default=0]; + optional bytes body = 5 [ctype=CORD]; + optional bytes tag = 6; + } +} + +message TaskQueueModifyTaskLeaseRequest { + required bytes queue_name = 1; + required bytes task_name = 2; + required int64 eta_usec = 3; + required double lease_seconds = 4; +} + +message TaskQueueModifyTaskLeaseResponse { + required int64 updated_eta_usec = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go new file mode 100644 index 0000000..28a6d18 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -0,0 +1,107 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements hooks for applying datastore transactions. + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/datastore" +) + +var transactionSetters = make(map[reflect.Type]reflect.Value) + +// RegisterTransactionSetter registers a function that sets transaction information +// in a protocol buffer message. f should be a function with two arguments, +// the first being a protocol buffer type, and the second being *datastore.Transaction. +func RegisterTransactionSetter(f interface{}) { + v := reflect.ValueOf(f) + transactionSetters[v.Type().In(0)] = v +} + +// applyTransaction applies the transaction t to message pb +// by using the relevant setter passed to RegisterTransactionSetter. +func applyTransaction(pb proto.Message, t *pb.Transaction) { + v := reflect.ValueOf(pb) + if f, ok := transactionSetters[v.Type()]; ok { + f.Call([]reflect.Value{v, reflect.ValueOf(t)}) + } +} + +var transactionKey = "used for *Transaction" + +func transactionFromContext(ctx netcontext.Context) *transaction { + t, _ := ctx.Value(&transactionKey).(*transaction) + return t +} + +func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { + return netcontext.WithValue(ctx, &transactionKey, t) +} + +type transaction struct { + transaction pb.Transaction + finished bool +} + +var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") + +func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { + if transactionFromContext(c) != nil { + return errors.New("nested transactions are not supported") + } + + // Begin the transaction. + t := &transaction{} + req := &pb.BeginTransactionRequest{ + App: proto.String(FullyQualifiedAppID(c)), + } + if xg { + req.AllowMultipleEg = proto.Bool(true) + } + if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { + return err + } + + // Call f, rolling back the transaction if f returns a non-nil error, or panics. + // The panic is not recovered. + defer func() { + if t.finished { + return + } + t.finished = true + // Ignore the error return value, since we are already returning a non-nil + // error (or we're panicking). + Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) + }() + if err := f(withTransaction(c, t)); err != nil { + return err + } + t.finished = true + + // Commit the transaction. + res := &pb.CommitResponse{} + err := Call(c, "datastore_v3", "Commit", &t.transaction, res) + if ae, ok := err.(*APIError); ok { + /* TODO: restore this conditional + if appengine.IsDevAppServer() { + */ + // The Python Dev AppServer raises an ApplicationError with error code 2 (which is + // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". + if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { + return ErrConcurrentTransaction + } + if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { + return ErrConcurrentTransaction + } + } + return err +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go new file mode 100644 index 0000000..af463fb --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +// DO NOT EDIT! + +/* +Package urlfetch is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto + +It has these top-level messages: + URLFetchServiceError + URLFetchRequest + URLFetchResponse +*/ +package urlfetch + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type URLFetchServiceError_ErrorCode int32 + +const ( + URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 + URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 + URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 + URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 + URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 + URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 + URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 + URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 + URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 + URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 + URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 + URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 + URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 +) + +var URLFetchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_URL", + 2: "FETCH_ERROR", + 3: "UNSPECIFIED_ERROR", + 4: "RESPONSE_TOO_LARGE", + 5: "DEADLINE_EXCEEDED", + 6: "SSL_CERTIFICATE_ERROR", + 7: "DNS_ERROR", + 8: "CLOSED", + 9: "INTERNAL_TRANSIENT_ERROR", + 10: "TOO_MANY_REDIRECTS", + 11: "MALFORMED_REPLY", + 12: "CONNECTION_ERROR", +} +var URLFetchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_URL": 1, + "FETCH_ERROR": 2, + "UNSPECIFIED_ERROR": 3, + "RESPONSE_TOO_LARGE": 4, + "DEADLINE_EXCEEDED": 5, + "SSL_CERTIFICATE_ERROR": 6, + "DNS_ERROR": 7, + "CLOSED": 8, + "INTERNAL_TRANSIENT_ERROR": 9, + "TOO_MANY_REDIRECTS": 10, + "MALFORMED_REPLY": 11, + "CONNECTION_ERROR": 12, +} + +func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { + p := new(URLFetchServiceError_ErrorCode) + *p = x + return p +} +func (x URLFetchServiceError_ErrorCode) String() string { + return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) +} +func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") + if err != nil { + return err + } + *x = URLFetchServiceError_ErrorCode(value) + return nil +} + +type URLFetchRequest_RequestMethod int32 + +const ( + URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 + URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 + URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 + URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 + URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 + URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 +) + +var URLFetchRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", + 6: "PATCH", +} +var URLFetchRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, + "PATCH": 6, +} + +func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { + p := new(URLFetchRequest_RequestMethod) + *p = x + return p +} +func (x URLFetchRequest_RequestMethod) String() string { + return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) +} +func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") + if err != nil { + return err + } + *x = URLFetchRequest_RequestMethod(value) + return nil +} + +type URLFetchServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } +func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } +func (*URLFetchServiceError) ProtoMessage() {} + +type URLFetchRequest struct { + Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` + Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` + Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` + FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` + Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` + MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } +func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest) ProtoMessage() {} + +const Default_URLFetchRequest_FollowRedirects bool = true +const Default_URLFetchRequest_MustValidateServerCertificate bool = true + +func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return URLFetchRequest_GET +} + +func (m *URLFetchRequest) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *URLFetchRequest) GetFollowRedirects() bool { + if m != nil && m.FollowRedirects != nil { + return *m.FollowRedirects + } + return Default_URLFetchRequest_FollowRedirects +} + +func (m *URLFetchRequest) GetDeadline() float64 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return 0 +} + +func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { + if m != nil && m.MustValidateServerCertificate != nil { + return *m.MustValidateServerCertificate + } + return Default_URLFetchRequest_MustValidateServerCertificate +} + +type URLFetchRequest_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } +func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest_Header) ProtoMessage() {} + +func (m *URLFetchRequest_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchRequest_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type URLFetchResponse struct { + Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` + StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` + Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` + ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` + ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` + FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` + ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` + ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` + ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } +func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse) ProtoMessage() {} + +const Default_URLFetchResponse_ContentWasTruncated bool = false +const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 +const Default_URLFetchResponse_ApiBytesSent int64 = 0 +const Default_URLFetchResponse_ApiBytesReceived int64 = 0 + +func (m *URLFetchResponse) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *URLFetchResponse) GetStatusCode() int32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchResponse) GetContentWasTruncated() bool { + if m != nil && m.ContentWasTruncated != nil { + return *m.ContentWasTruncated + } + return Default_URLFetchResponse_ContentWasTruncated +} + +func (m *URLFetchResponse) GetExternalBytesSent() int64 { + if m != nil && m.ExternalBytesSent != nil { + return *m.ExternalBytesSent + } + return 0 +} + +func (m *URLFetchResponse) GetExternalBytesReceived() int64 { + if m != nil && m.ExternalBytesReceived != nil { + return *m.ExternalBytesReceived + } + return 0 +} + +func (m *URLFetchResponse) GetFinalUrl() string { + if m != nil && m.FinalUrl != nil { + return *m.FinalUrl + } + return "" +} + +func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { + if m != nil && m.ApiCpuMilliseconds != nil { + return *m.ApiCpuMilliseconds + } + return Default_URLFetchResponse_ApiCpuMilliseconds +} + +func (m *URLFetchResponse) GetApiBytesSent() int64 { + if m != nil && m.ApiBytesSent != nil { + return *m.ApiBytesSent + } + return Default_URLFetchResponse_ApiBytesSent +} + +func (m *URLFetchResponse) GetApiBytesReceived() int64 { + if m != nil && m.ApiBytesReceived != nil { + return *m.ApiBytesReceived + } + return Default_URLFetchResponse_ApiBytesReceived +} + +type URLFetchResponse_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } +func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse_Header) ProtoMessage() {} + +func (m *URLFetchResponse_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchResponse_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto new file mode 100644 index 0000000..f695edf --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "urlfetch"; + +package appengine; + +message URLFetchServiceError { + enum ErrorCode { + OK = 0; + INVALID_URL = 1; + FETCH_ERROR = 2; + UNSPECIFIED_ERROR = 3; + RESPONSE_TOO_LARGE = 4; + DEADLINE_EXCEEDED = 5; + SSL_CERTIFICATE_ERROR = 6; + DNS_ERROR = 7; + CLOSED = 8; + INTERNAL_TRANSIENT_ERROR = 9; + TOO_MANY_REDIRECTS = 10; + MALFORMED_REPLY = 11; + CONNECTION_ERROR = 12; + } +} + +message URLFetchRequest { + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + PATCH = 6; + } + required RequestMethod Method = 1; + required string Url = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bytes Payload = 6 [ctype=CORD]; + + optional bool FollowRedirects = 7 [default=true]; + + optional double Deadline = 8; + + optional bool MustValidateServerCertificate = 9 [default=true]; +} + +message URLFetchResponse { + optional bytes Content = 1; + required int32 StatusCode = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bool ContentWasTruncated = 6 [default=false]; + optional int64 ExternalBytesSent = 7; + optional int64 ExternalBytesReceived = 8; + + optional string FinalUrl = 9; + + optional int64 ApiCpuMilliseconds = 10 [default=0]; + optional int64 ApiBytesSent = 11 [default=0]; + optional int64 ApiBytesReceived = 12 [default=0]; +} diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go new file mode 100644 index 0000000..6b52ffc --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go @@ -0,0 +1,289 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/user/user_service.proto +// DO NOT EDIT! + +/* +Package user is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/user/user_service.proto + +It has these top-level messages: + UserServiceError + CreateLoginURLRequest + CreateLoginURLResponse + CreateLogoutURLRequest + CreateLogoutURLResponse + GetOAuthUserRequest + GetOAuthUserResponse + CheckOAuthSignatureRequest + CheckOAuthSignatureResponse +*/ +package user + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type UserServiceError_ErrorCode int32 + +const ( + UserServiceError_OK UserServiceError_ErrorCode = 0 + UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1 + UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2 + UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3 + UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4 + UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5 +) + +var UserServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "REDIRECT_URL_TOO_LONG", + 2: "NOT_ALLOWED", + 3: "OAUTH_INVALID_TOKEN", + 4: "OAUTH_INVALID_REQUEST", + 5: "OAUTH_ERROR", +} +var UserServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "REDIRECT_URL_TOO_LONG": 1, + "NOT_ALLOWED": 2, + "OAUTH_INVALID_TOKEN": 3, + "OAUTH_INVALID_REQUEST": 4, + "OAUTH_ERROR": 5, +} + +func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode { + p := new(UserServiceError_ErrorCode) + *p = x + return p +} +func (x UserServiceError_ErrorCode) String() string { + return proto.EnumName(UserServiceError_ErrorCode_name, int32(x)) +} +func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode") + if err != nil { + return err + } + *x = UserServiceError_ErrorCode(value) + return nil +} + +type UserServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserServiceError) Reset() { *m = UserServiceError{} } +func (m *UserServiceError) String() string { return proto.CompactTextString(m) } +func (*UserServiceError) ProtoMessage() {} + +type CreateLoginURLRequest struct { + DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"` + AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"` + FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} } +func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) } +func (*CreateLoginURLRequest) ProtoMessage() {} + +func (m *CreateLoginURLRequest) GetDestinationUrl() string { + if m != nil && m.DestinationUrl != nil { + return *m.DestinationUrl + } + return "" +} + +func (m *CreateLoginURLRequest) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *CreateLoginURLRequest) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +type CreateLoginURLResponse struct { + LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} } +func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) } +func (*CreateLoginURLResponse) ProtoMessage() {} + +func (m *CreateLoginURLResponse) GetLoginUrl() string { + if m != nil && m.LoginUrl != nil { + return *m.LoginUrl + } + return "" +} + +type CreateLogoutURLRequest struct { + DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"` + AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} } +func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) } +func (*CreateLogoutURLRequest) ProtoMessage() {} + +func (m *CreateLogoutURLRequest) GetDestinationUrl() string { + if m != nil && m.DestinationUrl != nil { + return *m.DestinationUrl + } + return "" +} + +func (m *CreateLogoutURLRequest) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +type CreateLogoutURLResponse struct { + LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} } +func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) } +func (*CreateLogoutURLResponse) ProtoMessage() {} + +func (m *CreateLogoutURLResponse) GetLogoutUrl() string { + if m != nil && m.LogoutUrl != nil { + return *m.LogoutUrl + } + return "" +} + +type GetOAuthUserRequest struct { + Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"` + Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} } +func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) } +func (*GetOAuthUserRequest) ProtoMessage() {} + +func (m *GetOAuthUserRequest) GetScope() string { + if m != nil && m.Scope != nil { + return *m.Scope + } + return "" +} + +func (m *GetOAuthUserRequest) GetScopes() []string { + if m != nil { + return m.Scopes + } + return nil +} + +type GetOAuthUserResponse struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"` + AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"` + UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"` + IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"` + ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"` + Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} } +func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) } +func (*GetOAuthUserResponse) ProtoMessage() {} + +const Default_GetOAuthUserResponse_IsAdmin bool = false + +func (m *GetOAuthUserResponse) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *GetOAuthUserResponse) GetUserId() string { + if m != nil && m.UserId != nil { + return *m.UserId + } + return "" +} + +func (m *GetOAuthUserResponse) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *GetOAuthUserResponse) GetUserOrganization() string { + if m != nil && m.UserOrganization != nil { + return *m.UserOrganization + } + return "" +} + +func (m *GetOAuthUserResponse) GetIsAdmin() bool { + if m != nil && m.IsAdmin != nil { + return *m.IsAdmin + } + return Default_GetOAuthUserResponse_IsAdmin +} + +func (m *GetOAuthUserResponse) GetClientId() string { + if m != nil && m.ClientId != nil { + return *m.ClientId + } + return "" +} + +func (m *GetOAuthUserResponse) GetScopes() []string { + if m != nil { + return m.Scopes + } + return nil +} + +type CheckOAuthSignatureRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} } +func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) } +func (*CheckOAuthSignatureRequest) ProtoMessage() {} + +type CheckOAuthSignatureResponse struct { + OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} } +func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) } +func (*CheckOAuthSignatureResponse) ProtoMessage() {} + +func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string { + if m != nil && m.OauthConsumerKey != nil { + return *m.OauthConsumerKey + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.proto b/vendor/google.golang.org/appengine/internal/user/user_service.proto new file mode 100644 index 0000000..f3e9693 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/user/user_service.proto @@ -0,0 +1,58 @@ +syntax = "proto2"; +option go_package = "user"; + +package appengine; + +message UserServiceError { + enum ErrorCode { + OK = 0; + REDIRECT_URL_TOO_LONG = 1; + NOT_ALLOWED = 2; + OAUTH_INVALID_TOKEN = 3; + OAUTH_INVALID_REQUEST = 4; + OAUTH_ERROR = 5; + } +} + +message CreateLoginURLRequest { + required string destination_url = 1; + optional string auth_domain = 2; + optional string federated_identity = 3 [default = ""]; +} + +message CreateLoginURLResponse { + required string login_url = 1; +} + +message CreateLogoutURLRequest { + required string destination_url = 1; + optional string auth_domain = 2; +} + +message CreateLogoutURLResponse { + required string logout_url = 1; +} + +message GetOAuthUserRequest { + optional string scope = 1; + + repeated string scopes = 2; +} + +message GetOAuthUserResponse { + required string email = 1; + required string user_id = 2; + required string auth_domain = 3; + optional string user_organization = 4 [default = ""]; + optional bool is_admin = 5 [default = false]; + optional string client_id = 6 [default = ""]; + + repeated string scopes = 7; +} + +message CheckOAuthSignatureRequest { +} + +message CheckOAuthSignatureResponse { + required string oauth_consumer_key = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go new file mode 100644 index 0000000..6d5b0ae --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go @@ -0,0 +1,427 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto +// DO NOT EDIT! + +/* +Package xmpp is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/xmpp/xmpp_service.proto + +It has these top-level messages: + XmppServiceError + PresenceRequest + PresenceResponse + BulkPresenceRequest + BulkPresenceResponse + XmppMessageRequest + XmppMessageResponse + XmppSendPresenceRequest + XmppSendPresenceResponse + XmppInviteRequest + XmppInviteResponse +*/ +package xmpp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type XmppServiceError_ErrorCode int32 + +const ( + XmppServiceError_UNSPECIFIED_ERROR XmppServiceError_ErrorCode = 1 + XmppServiceError_INVALID_JID XmppServiceError_ErrorCode = 2 + XmppServiceError_NO_BODY XmppServiceError_ErrorCode = 3 + XmppServiceError_INVALID_XML XmppServiceError_ErrorCode = 4 + XmppServiceError_INVALID_TYPE XmppServiceError_ErrorCode = 5 + XmppServiceError_INVALID_SHOW XmppServiceError_ErrorCode = 6 + XmppServiceError_EXCEEDED_MAX_SIZE XmppServiceError_ErrorCode = 7 + XmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8 + XmppServiceError_NONDEFAULT_MODULE XmppServiceError_ErrorCode = 9 +) + +var XmppServiceError_ErrorCode_name = map[int32]string{ + 1: "UNSPECIFIED_ERROR", + 2: "INVALID_JID", + 3: "NO_BODY", + 4: "INVALID_XML", + 5: "INVALID_TYPE", + 6: "INVALID_SHOW", + 7: "EXCEEDED_MAX_SIZE", + 8: "APPID_ALIAS_REQUIRED", + 9: "NONDEFAULT_MODULE", +} +var XmppServiceError_ErrorCode_value = map[string]int32{ + "UNSPECIFIED_ERROR": 1, + "INVALID_JID": 2, + "NO_BODY": 3, + "INVALID_XML": 4, + "INVALID_TYPE": 5, + "INVALID_SHOW": 6, + "EXCEEDED_MAX_SIZE": 7, + "APPID_ALIAS_REQUIRED": 8, + "NONDEFAULT_MODULE": 9, +} + +func (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode { + p := new(XmppServiceError_ErrorCode) + *p = x + return p +} +func (x XmppServiceError_ErrorCode) String() string { + return proto.EnumName(XmppServiceError_ErrorCode_name, int32(x)) +} +func (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, "XmppServiceError_ErrorCode") + if err != nil { + return err + } + *x = XmppServiceError_ErrorCode(value) + return nil +} + +type PresenceResponse_SHOW int32 + +const ( + PresenceResponse_NORMAL PresenceResponse_SHOW = 0 + PresenceResponse_AWAY PresenceResponse_SHOW = 1 + PresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2 + PresenceResponse_CHAT PresenceResponse_SHOW = 3 + PresenceResponse_EXTENDED_AWAY PresenceResponse_SHOW = 4 +) + +var PresenceResponse_SHOW_name = map[int32]string{ + 0: "NORMAL", + 1: "AWAY", + 2: "DO_NOT_DISTURB", + 3: "CHAT", + 4: "EXTENDED_AWAY", +} +var PresenceResponse_SHOW_value = map[string]int32{ + "NORMAL": 0, + "AWAY": 1, + "DO_NOT_DISTURB": 2, + "CHAT": 3, + "EXTENDED_AWAY": 4, +} + +func (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW { + p := new(PresenceResponse_SHOW) + *p = x + return p +} +func (x PresenceResponse_SHOW) String() string { + return proto.EnumName(PresenceResponse_SHOW_name, int32(x)) +} +func (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, "PresenceResponse_SHOW") + if err != nil { + return err + } + *x = PresenceResponse_SHOW(value) + return nil +} + +type XmppMessageResponse_XmppMessageStatus int32 + +const ( + XmppMessageResponse_NO_ERROR XmppMessageResponse_XmppMessageStatus = 0 + XmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1 + XmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2 +) + +var XmppMessageResponse_XmppMessageStatus_name = map[int32]string{ + 0: "NO_ERROR", + 1: "INVALID_JID", + 2: "OTHER_ERROR", +} +var XmppMessageResponse_XmppMessageStatus_value = map[string]int32{ + "NO_ERROR": 0, + "INVALID_JID": 1, + "OTHER_ERROR": 2, +} + +func (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus { + p := new(XmppMessageResponse_XmppMessageStatus) + *p = x + return p +} +func (x XmppMessageResponse_XmppMessageStatus) String() string { + return proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x)) +} +func (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, "XmppMessageResponse_XmppMessageStatus") + if err != nil { + return err + } + *x = XmppMessageResponse_XmppMessageStatus(value) + return nil +} + +type XmppServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppServiceError) Reset() { *m = XmppServiceError{} } +func (m *XmppServiceError) String() string { return proto.CompactTextString(m) } +func (*XmppServiceError) ProtoMessage() {} + +type PresenceRequest struct { + Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` + FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PresenceRequest) Reset() { *m = PresenceRequest{} } +func (m *PresenceRequest) String() string { return proto.CompactTextString(m) } +func (*PresenceRequest) ProtoMessage() {} + +func (m *PresenceRequest) GetJid() string { + if m != nil && m.Jid != nil { + return *m.Jid + } + return "" +} + +func (m *PresenceRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type PresenceResponse struct { + IsAvailable *bool `protobuf:"varint,1,req,name=is_available" json:"is_available,omitempty"` + Presence *PresenceResponse_SHOW `protobuf:"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW" json:"presence,omitempty"` + Valid *bool `protobuf:"varint,3,opt,name=valid" json:"valid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PresenceResponse) Reset() { *m = PresenceResponse{} } +func (m *PresenceResponse) String() string { return proto.CompactTextString(m) } +func (*PresenceResponse) ProtoMessage() {} + +func (m *PresenceResponse) GetIsAvailable() bool { + if m != nil && m.IsAvailable != nil { + return *m.IsAvailable + } + return false +} + +func (m *PresenceResponse) GetPresence() PresenceResponse_SHOW { + if m != nil && m.Presence != nil { + return *m.Presence + } + return PresenceResponse_NORMAL +} + +func (m *PresenceResponse) GetValid() bool { + if m != nil && m.Valid != nil { + return *m.Valid + } + return false +} + +type BulkPresenceRequest struct { + Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"` + FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BulkPresenceRequest) Reset() { *m = BulkPresenceRequest{} } +func (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) } +func (*BulkPresenceRequest) ProtoMessage() {} + +func (m *BulkPresenceRequest) GetJid() []string { + if m != nil { + return m.Jid + } + return nil +} + +func (m *BulkPresenceRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type BulkPresenceResponse struct { + PresenceResponse []*PresenceResponse `protobuf:"bytes,1,rep,name=presence_response" json:"presence_response,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BulkPresenceResponse) Reset() { *m = BulkPresenceResponse{} } +func (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) } +func (*BulkPresenceResponse) ProtoMessage() {} + +func (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse { + if m != nil { + return m.PresenceResponse + } + return nil +} + +type XmppMessageRequest struct { + Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"` + Body *string `protobuf:"bytes,2,req,name=body" json:"body,omitempty"` + RawXml *bool `protobuf:"varint,3,opt,name=raw_xml,def=0" json:"raw_xml,omitempty"` + Type *string `protobuf:"bytes,4,opt,name=type,def=chat" json:"type,omitempty"` + FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppMessageRequest) Reset() { *m = XmppMessageRequest{} } +func (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) } +func (*XmppMessageRequest) ProtoMessage() {} + +const Default_XmppMessageRequest_RawXml bool = false +const Default_XmppMessageRequest_Type string = "chat" + +func (m *XmppMessageRequest) GetJid() []string { + if m != nil { + return m.Jid + } + return nil +} + +func (m *XmppMessageRequest) GetBody() string { + if m != nil && m.Body != nil { + return *m.Body + } + return "" +} + +func (m *XmppMessageRequest) GetRawXml() bool { + if m != nil && m.RawXml != nil { + return *m.RawXml + } + return Default_XmppMessageRequest_RawXml +} + +func (m *XmppMessageRequest) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_XmppMessageRequest_Type +} + +func (m *XmppMessageRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type XmppMessageResponse struct { + Status []XmppMessageResponse_XmppMessageStatus `protobuf:"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppMessageResponse) Reset() { *m = XmppMessageResponse{} } +func (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) } +func (*XmppMessageResponse) ProtoMessage() {} + +func (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus { + if m != nil { + return m.Status + } + return nil +} + +type XmppSendPresenceRequest struct { + Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` + Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` + Show *string `protobuf:"bytes,3,opt,name=show" json:"show,omitempty"` + Status *string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` + FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppSendPresenceRequest) Reset() { *m = XmppSendPresenceRequest{} } +func (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) } +func (*XmppSendPresenceRequest) ProtoMessage() {} + +func (m *XmppSendPresenceRequest) GetJid() string { + if m != nil && m.Jid != nil { + return *m.Jid + } + return "" +} + +func (m *XmppSendPresenceRequest) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *XmppSendPresenceRequest) GetShow() string { + if m != nil && m.Show != nil { + return *m.Show + } + return "" +} + +func (m *XmppSendPresenceRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +func (m *XmppSendPresenceRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type XmppSendPresenceResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppSendPresenceResponse) Reset() { *m = XmppSendPresenceResponse{} } +func (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) } +func (*XmppSendPresenceResponse) ProtoMessage() {} + +type XmppInviteRequest struct { + Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` + FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppInviteRequest) Reset() { *m = XmppInviteRequest{} } +func (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) } +func (*XmppInviteRequest) ProtoMessage() {} + +func (m *XmppInviteRequest) GetJid() string { + if m != nil && m.Jid != nil { + return *m.Jid + } + return "" +} + +func (m *XmppInviteRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type XmppInviteResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppInviteResponse) Reset() { *m = XmppInviteResponse{} } +func (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) } +func (*XmppInviteResponse) ProtoMessage() {} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto new file mode 100644 index 0000000..472d52e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto @@ -0,0 +1,83 @@ +syntax = "proto2"; +option go_package = "xmpp"; + +package appengine; + +message XmppServiceError { + enum ErrorCode { + UNSPECIFIED_ERROR = 1; + INVALID_JID = 2; + NO_BODY = 3; + INVALID_XML = 4; + INVALID_TYPE = 5; + INVALID_SHOW = 6; + EXCEEDED_MAX_SIZE = 7; + APPID_ALIAS_REQUIRED = 8; + NONDEFAULT_MODULE = 9; + } +} + +message PresenceRequest { + required string jid = 1; + optional string from_jid = 2; +} + +message PresenceResponse { + enum SHOW { + NORMAL = 0; + AWAY = 1; + DO_NOT_DISTURB = 2; + CHAT = 3; + EXTENDED_AWAY = 4; + } + + required bool is_available = 1; + optional SHOW presence = 2; + optional bool valid = 3; +} + +message BulkPresenceRequest { + repeated string jid = 1; + optional string from_jid = 2; +} + +message BulkPresenceResponse { + repeated PresenceResponse presence_response = 1; +} + +message XmppMessageRequest { + repeated string jid = 1; + required string body = 2; + optional bool raw_xml = 3 [ default = false ]; + optional string type = 4 [ default = "chat" ]; + optional string from_jid = 5; +} + +message XmppMessageResponse { + enum XmppMessageStatus { + NO_ERROR = 0; + INVALID_JID = 1; + OTHER_ERROR = 2; + } + + repeated XmppMessageStatus status = 1; +} + +message XmppSendPresenceRequest { + required string jid = 1; + optional string type = 2; + optional string show = 3; + optional string status = 4; + optional string from_jid = 5; +} + +message XmppSendPresenceResponse { +} + +message XmppInviteRequest { + required string jid = 1; + optional string from_jid = 2; +} + +message XmppInviteResponse { +} diff --git a/vendor/google.golang.org/appengine/log/api.go b/vendor/google.golang.org/appengine/log/api.go new file mode 100644 index 0000000..24d5860 --- /dev/null +++ b/vendor/google.golang.org/appengine/log/api.go @@ -0,0 +1,40 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package log + +// This file implements the logging API. + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Debugf formats its arguments according to the format, analogous to fmt.Printf, +// and records the text as a log message at Debug level. The message will be associated +// with the request linked with the provided context. +func Debugf(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 0, format, args...) +} + +// Infof is like Debugf, but at Info level. +func Infof(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 1, format, args...) +} + +// Warningf is like Debugf, but at Warning level. +func Warningf(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 2, format, args...) +} + +// Errorf is like Debugf, but at Error level. +func Errorf(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 3, format, args...) +} + +// Criticalf is like Debugf, but at Critical level. +func Criticalf(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 4, format, args...) +} diff --git a/vendor/google.golang.org/appengine/log/log.go b/vendor/google.golang.org/appengine/log/log.go new file mode 100644 index 0000000..b54fe47 --- /dev/null +++ b/vendor/google.golang.org/appengine/log/log.go @@ -0,0 +1,323 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package log provides the means of querying an application's logs from +within an App Engine application. + +Example: + c := appengine.NewContext(r) + query := &log.Query{ + AppLogs: true, + Versions: []string{"1"}, + } + + for results := query.Run(c); ; { + record, err := results.Next() + if err == log.Done { + log.Infof(c, "Done processing results") + break + } + if err != nil { + log.Errorf(c, "Failed to retrieve next log: %v", err) + break + } + log.Infof(c, "Saw record %v", record) + } +*/ +package log // import "google.golang.org/appengine/log" + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/log" +) + +// Query defines a logs query. +type Query struct { + // Start time specifies the earliest log to return (inclusive). + StartTime time.Time + + // End time specifies the latest log to return (exclusive). + EndTime time.Time + + // Offset specifies a position within the log stream to resume reading from, + // and should come from a previously returned Record's field of the same name. + Offset []byte + + // Incomplete controls whether active (incomplete) requests should be included. + Incomplete bool + + // AppLogs indicates if application-level logs should be included. + AppLogs bool + + // ApplyMinLevel indicates if MinLevel should be used to filter results. + ApplyMinLevel bool + + // If ApplyMinLevel is true, only logs for requests with at least one + // application log of MinLevel or higher will be returned. + MinLevel int + + // Versions is the major version IDs whose logs should be retrieved. + // Logs for specific modules can be retrieved by the specifying versions + // in the form "module:version"; the default module is used if no module + // is specified. + Versions []string + + // A list of requests to search for instead of a time-based scan. Cannot be + // combined with filtering options such as StartTime, EndTime, Offset, + // Incomplete, ApplyMinLevel, or Versions. + RequestIDs []string +} + +// AppLog represents a single application-level log. +type AppLog struct { + Time time.Time + Level int + Message string +} + +// Record contains all the information for a single web request. +type Record struct { + AppID string + ModuleID string + VersionID string + RequestID []byte + IP string + Nickname string + AppEngineRelease string + + // The time when this request started. + StartTime time.Time + + // The time when this request finished. + EndTime time.Time + + // Opaque cursor into the result stream. + Offset []byte + + // The time required to process the request. + Latency time.Duration + MCycles int64 + Method string + Resource string + HTTPVersion string + Status int32 + + // The size of the request sent back to the client, in bytes. + ResponseSize int64 + Referrer string + UserAgent string + URLMapEntry string + Combined string + Host string + + // The estimated cost of this request, in dollars. + Cost float64 + TaskQueueName string + TaskName string + WasLoadingRequest bool + PendingTime time.Duration + Finished bool + AppLogs []AppLog + + // Mostly-unique identifier for the instance that handled the request if available. + InstanceID string +} + +// Result represents the result of a query. +type Result struct { + logs []*Record + context context.Context + request *pb.LogReadRequest + resultsSeen bool + err error +} + +// Next returns the next log record, +func (qr *Result) Next() (*Record, error) { + if qr.err != nil { + return nil, qr.err + } + if len(qr.logs) > 0 { + lr := qr.logs[0] + qr.logs = qr.logs[1:] + return lr, nil + } + + if qr.request.Offset == nil && qr.resultsSeen { + return nil, Done + } + + if err := qr.run(); err != nil { + // Errors here may be retried, so don't store the error. + return nil, err + } + + return qr.Next() +} + +// Done is returned when a query iteration has completed. +var Done = errors.New("log: query has no more results") + +// protoToAppLogs takes as input an array of pointers to LogLines, the internal +// Protocol Buffer representation of a single application-level log, +// and converts it to an array of AppLogs, the external representation +// of an application-level log. +func protoToAppLogs(logLines []*pb.LogLine) []AppLog { + appLogs := make([]AppLog, len(logLines)) + + for i, line := range logLines { + appLogs[i] = AppLog{ + Time: time.Unix(0, *line.Time*1e3), + Level: int(*line.Level), + Message: *line.LogMessage, + } + } + + return appLogs +} + +// protoToRecord converts a RequestLog, the internal Protocol Buffer +// representation of a single request-level log, to a Record, its +// corresponding external representation. +func protoToRecord(rl *pb.RequestLog) *Record { + offset, err := proto.Marshal(rl.Offset) + if err != nil { + offset = nil + } + return &Record{ + AppID: *rl.AppId, + ModuleID: rl.GetModuleId(), + VersionID: *rl.VersionId, + RequestID: rl.RequestId, + Offset: offset, + IP: *rl.Ip, + Nickname: rl.GetNickname(), + AppEngineRelease: string(rl.GetAppEngineRelease()), + StartTime: time.Unix(0, *rl.StartTime*1e3), + EndTime: time.Unix(0, *rl.EndTime*1e3), + Latency: time.Duration(*rl.Latency) * time.Microsecond, + MCycles: *rl.Mcycles, + Method: *rl.Method, + Resource: *rl.Resource, + HTTPVersion: *rl.HttpVersion, + Status: *rl.Status, + ResponseSize: *rl.ResponseSize, + Referrer: rl.GetReferrer(), + UserAgent: rl.GetUserAgent(), + URLMapEntry: *rl.UrlMapEntry, + Combined: *rl.Combined, + Host: rl.GetHost(), + Cost: rl.GetCost(), + TaskQueueName: rl.GetTaskQueueName(), + TaskName: rl.GetTaskName(), + WasLoadingRequest: rl.GetWasLoadingRequest(), + PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond, + Finished: rl.GetFinished(), + AppLogs: protoToAppLogs(rl.Line), + InstanceID: string(rl.GetCloneKey()), + } +} + +// Run starts a query for log records, which contain request and application +// level log information. +func (params *Query) Run(c context.Context) *Result { + req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c)) + return &Result{ + context: c, + request: req, + err: err, + } +} + +func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) { + req := &pb.LogReadRequest{} + req.AppId = &appID + if !params.StartTime.IsZero() { + req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3) + } + if !params.EndTime.IsZero() { + req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3) + } + if len(params.Offset) > 0 { + var offset pb.LogOffset + if err := proto.Unmarshal(params.Offset, &offset); err != nil { + return nil, fmt.Errorf("bad Offset: %v", err) + } + req.Offset = &offset + } + if params.Incomplete { + req.IncludeIncomplete = ¶ms.Incomplete + } + if params.AppLogs { + req.IncludeAppLogs = ¶ms.AppLogs + } + if params.ApplyMinLevel { + req.MinimumLogLevel = proto.Int32(int32(params.MinLevel)) + } + if params.Versions == nil { + // If no versions were specified, default to the default module at + // the major version being used by this module. + if i := strings.Index(versionID, "."); i >= 0 { + versionID = versionID[:i] + } + req.VersionId = []string{versionID} + } else { + req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions)) + for _, v := range params.Versions { + var m *string + if i := strings.Index(v, ":"); i >= 0 { + m, v = proto.String(v[:i]), v[i+1:] + } + req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{ + ModuleId: m, + VersionId: proto.String(v), + }) + } + } + if params.RequestIDs != nil { + ids := make([][]byte, len(params.RequestIDs)) + for i, v := range params.RequestIDs { + ids[i] = []byte(v) + } + req.RequestId = ids + } + + return req, nil +} + +// run takes the query Result produced by a call to Run and updates it with +// more Records. The updated Result contains a new set of logs as well as an +// offset to where more logs can be found. We also convert the items in the +// response from their internal representations to external versions of the +// same structs. +func (r *Result) run() error { + res := &pb.LogReadResponse{} + if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil { + return err + } + + r.logs = make([]*Record, len(res.Log)) + r.request.Offset = res.Offset + r.resultsSeen = true + + for i, log := range res.Log { + r.logs[i] = protoToRecord(log) + } + + return nil +} + +func init() { + internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/log/log_test.go b/vendor/google.golang.org/appengine/log/log_test.go new file mode 100644 index 0000000..726468e --- /dev/null +++ b/vendor/google.golang.org/appengine/log/log_test.go @@ -0,0 +1,112 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package log + +import ( + "reflect" + "testing" + "time" + + "github.com/golang/protobuf/proto" + + pb "google.golang.org/appengine/internal/log" +) + +func TestQueryToRequest(t *testing.T) { + testCases := []struct { + desc string + query *Query + want *pb.LogReadRequest + }{ + { + desc: "Empty", + query: &Query{}, + want: &pb.LogReadRequest{ + AppId: proto.String("s~fake"), + VersionId: []string{"v12"}, + }, + }, + { + desc: "Versions", + query: &Query{ + Versions: []string{"alpha", "backend:beta"}, + }, + want: &pb.LogReadRequest{ + AppId: proto.String("s~fake"), + ModuleVersion: []*pb.LogModuleVersion{ + { + VersionId: proto.String("alpha"), + }, { + ModuleId: proto.String("backend"), + VersionId: proto.String("beta"), + }, + }, + }, + }, + } + + for _, tt := range testCases { + req, err := makeRequest(tt.query, "s~fake", "v12") + + if err != nil { + t.Errorf("%s: got err %v, want nil", tt.desc, err) + continue + } + if !proto.Equal(req, tt.want) { + t.Errorf("%s request:\ngot %v\nwant %v", tt.desc, req, tt.want) + } + } +} + +func TestProtoToRecord(t *testing.T) { + // We deliberately leave ModuleId and other optional fields unset. + p := &pb.RequestLog{ + AppId: proto.String("s~fake"), + VersionId: proto.String("1"), + RequestId: []byte("deadbeef"), + Ip: proto.String("127.0.0.1"), + StartTime: proto.Int64(431044244000000), + EndTime: proto.Int64(431044724000000), + Latency: proto.Int64(480000000), + Mcycles: proto.Int64(7), + Method: proto.String("GET"), + Resource: proto.String("/app"), + HttpVersion: proto.String("1.1"), + Status: proto.Int32(418), + ResponseSize: proto.Int64(1337), + UrlMapEntry: proto.String("_go_app"), + Combined: proto.String("apache log"), + } + // Sanity check that all required fields are set. + if _, err := proto.Marshal(p); err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + want := &Record{ + AppID: "s~fake", + ModuleID: "default", + VersionID: "1", + RequestID: []byte("deadbeef"), + IP: "127.0.0.1", + StartTime: time.Date(1983, 8, 29, 22, 30, 44, 0, time.UTC), + EndTime: time.Date(1983, 8, 29, 22, 38, 44, 0, time.UTC), + Latency: 8 * time.Minute, + MCycles: 7, + Method: "GET", + Resource: "/app", + HTTPVersion: "1.1", + Status: 418, + ResponseSize: 1337, + URLMapEntry: "_go_app", + Combined: "apache log", + Finished: true, + AppLogs: []AppLog{}, + } + got := protoToRecord(p) + // Coerce locations to UTC since otherwise they will be in local. + got.StartTime, got.EndTime = got.StartTime.UTC(), got.EndTime.UTC() + if !reflect.DeepEqual(got, want) { + t.Errorf("protoToRecord:\ngot: %v\nwant: %v", got, want) + } +} diff --git a/vendor/google.golang.org/appengine/mail/mail.go b/vendor/google.golang.org/appengine/mail/mail.go new file mode 100644 index 0000000..f7955aa --- /dev/null +++ b/vendor/google.golang.org/appengine/mail/mail.go @@ -0,0 +1,123 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package mail provides the means of sending email from an +App Engine application. + +Example: + msg := &mail.Message{ + Sender: "romeo@montague.com", + To: []string{"Juliet "}, + Subject: "See you tonight", + Body: "Don't forget our plans. Hark, 'til later.", + } + if err := mail.Send(c, msg); err != nil { + log.Errorf(c, "Alas, my user, the email failed to sendeth: %v", err) + } +*/ +package mail // import "google.golang.org/appengine/mail" + +import ( + "net/mail" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + bpb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/mail" +) + +// A Message represents an email message. +// Addresses may be of any form permitted by RFC 822. +type Message struct { + // Sender must be set, and must be either an application admin + // or the currently signed-in user. + Sender string + ReplyTo string // may be empty + + // At least one of these slices must have a non-zero length, + // except when calling SendToAdmins. + To, Cc, Bcc []string + + Subject string + + // At least one of Body or HTMLBody must be non-empty. + Body string + HTMLBody string + + Attachments []Attachment + + // Extra mail headers. + // See https://cloud.google.com/appengine/docs/go/mail/ + // for permissible headers. + Headers mail.Header +} + +// An Attachment represents an email attachment. +type Attachment struct { + // Name must be set to a valid file name. + Name string + Data []byte + ContentID string +} + +// Send sends an email message. +func Send(c context.Context, msg *Message) error { + return send(c, "Send", msg) +} + +// SendToAdmins sends an email message to the application's administrators. +func SendToAdmins(c context.Context, msg *Message) error { + return send(c, "SendToAdmins", msg) +} + +func send(c context.Context, method string, msg *Message) error { + req := &pb.MailMessage{ + Sender: &msg.Sender, + To: msg.To, + Cc: msg.Cc, + Bcc: msg.Bcc, + Subject: &msg.Subject, + } + if msg.ReplyTo != "" { + req.ReplyTo = &msg.ReplyTo + } + if msg.Body != "" { + req.TextBody = &msg.Body + } + if msg.HTMLBody != "" { + req.HtmlBody = &msg.HTMLBody + } + if len(msg.Attachments) > 0 { + req.Attachment = make([]*pb.MailAttachment, len(msg.Attachments)) + for i, att := range msg.Attachments { + req.Attachment[i] = &pb.MailAttachment{ + FileName: proto.String(att.Name), + Data: att.Data, + } + if att.ContentID != "" { + req.Attachment[i].ContentID = proto.String(att.ContentID) + } + } + } + for key, vs := range msg.Headers { + for _, v := range vs { + req.Header = append(req.Header, &pb.MailHeader{ + Name: proto.String(key), + Value: proto.String(v), + }) + } + } + res := &bpb.VoidProto{} + if err := internal.Call(c, "mail", method, req, res); err != nil { + return err + } + return nil +} + +func init() { + internal.RegisterErrorCodeMap("mail", pb.MailServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/mail/mail_test.go b/vendor/google.golang.org/appengine/mail/mail_test.go new file mode 100644 index 0000000..7502c59 --- /dev/null +++ b/vendor/google.golang.org/appengine/mail/mail_test.go @@ -0,0 +1,65 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package mail + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal/aetesting" + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/mail" +) + +func TestMessageConstruction(t *testing.T) { + var got *pb.MailMessage + c := aetesting.FakeSingleContext(t, "mail", "Send", func(in *pb.MailMessage, out *basepb.VoidProto) error { + got = in + return nil + }) + + msg := &Message{ + Sender: "dsymonds@example.com", + To: []string{"nigeltao@example.com"}, + Body: "Hey, lunch time?", + Attachments: []Attachment{ + // Regression test for a prod bug. The address of a range variable was used when + // constructing the outgoing proto, so multiple attachments used the same name. + { + Name: "att1.txt", + Data: []byte("data1"), + ContentID: "", + }, + { + Name: "att2.txt", + Data: []byte("data2"), + }, + }, + } + if err := Send(c, msg); err != nil { + t.Fatalf("Send: %v", err) + } + want := &pb.MailMessage{ + Sender: proto.String("dsymonds@example.com"), + To: []string{"nigeltao@example.com"}, + Subject: proto.String(""), + TextBody: proto.String("Hey, lunch time?"), + Attachment: []*pb.MailAttachment{ + { + FileName: proto.String("att1.txt"), + Data: []byte("data1"), + ContentID: proto.String(""), + }, + { + FileName: proto.String("att2.txt"), + Data: []byte("data2"), + }, + }, + } + if !proto.Equal(got, want) { + t.Errorf("Bad proto for %+v\n got %v\nwant %v", msg, got, want) + } +} diff --git a/vendor/google.golang.org/appengine/memcache/memcache.go b/vendor/google.golang.org/appengine/memcache/memcache.go new file mode 100644 index 0000000..d8eed4b --- /dev/null +++ b/vendor/google.golang.org/appengine/memcache/memcache.go @@ -0,0 +1,526 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package memcache provides a client for App Engine's distributed in-memory +// key-value store for small chunks of arbitrary data. +// +// The fundamental operations get and set items, keyed by a string. +// +// item0, err := memcache.Get(c, "key") +// if err != nil && err != memcache.ErrCacheMiss { +// return err +// } +// if err == nil { +// fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value) +// } else { +// fmt.Fprintf(w, "memcache miss\n") +// } +// +// and +// +// item1 := &memcache.Item{ +// Key: "foo", +// Value: []byte("bar"), +// } +// if err := memcache.Set(c, item1); err != nil { +// return err +// } +package memcache // import "google.golang.org/appengine/memcache" + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "errors" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/memcache" +) + +var ( + // ErrCacheMiss means that an operation failed + // because the item wasn't present. + ErrCacheMiss = errors.New("memcache: cache miss") + // ErrCASConflict means that a CompareAndSwap call failed due to the + // cached value being modified between the Get and the CompareAndSwap. + // If the cached value was simply evicted rather than replaced, + // ErrNotStored will be returned instead. + ErrCASConflict = errors.New("memcache: compare-and-swap conflict") + // ErrNoStats means that no statistics were available. + ErrNoStats = errors.New("memcache: no statistics available") + // ErrNotStored means that a conditional write operation (i.e. Add or + // CompareAndSwap) failed because the condition was not satisfied. + ErrNotStored = errors.New("memcache: item not stored") + // ErrServerError means that a server error occurred. + ErrServerError = errors.New("memcache: server error") +) + +// Item is the unit of memcache gets and sets. +type Item struct { + // Key is the Item's key (250 bytes maximum). + Key string + // Value is the Item's value. + Value []byte + // Object is the Item's value for use with a Codec. + Object interface{} + // Flags are server-opaque flags whose semantics are entirely up to the + // App Engine app. + Flags uint32 + // Expiration is the maximum duration that the item will stay + // in the cache. + // The zero value means the Item has no expiration time. + // Subsecond precision is ignored. + // This is not set when getting items. + Expiration time.Duration + // casID is a client-opaque value used for compare-and-swap operations. + // Zero means that compare-and-swap is not used. + casID uint64 +} + +const ( + secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code + thirtyYears = time.Duration(secondsIn30Years) * time.Second +) + +// protoToItem converts a protocol buffer item to a Go struct. +func protoToItem(p *pb.MemcacheGetResponse_Item) *Item { + return &Item{ + Key: string(p.Key), + Value: p.Value, + Flags: p.GetFlags(), + casID: p.GetCasId(), + } +} + +// If err is an appengine.MultiError, return its first element. Otherwise, return err. +func singleError(err error) error { + if me, ok := err.(appengine.MultiError); ok { + return me[0] + } + return err +} + +// Get gets the item for the given key. ErrCacheMiss is returned for a memcache +// cache miss. The key must be at most 250 bytes in length. +func Get(c context.Context, key string) (*Item, error) { + m, err := GetMulti(c, []string{key}) + if err != nil { + return nil, err + } + if _, ok := m[key]; !ok { + return nil, ErrCacheMiss + } + return m[key], nil +} + +// GetMulti is a batch version of Get. The returned map from keys to items may +// have fewer elements than the input slice, due to memcache cache misses. +// Each key must be at most 250 bytes in length. +func GetMulti(c context.Context, key []string) (map[string]*Item, error) { + if len(key) == 0 { + return nil, nil + } + keyAsBytes := make([][]byte, len(key)) + for i, k := range key { + keyAsBytes[i] = []byte(k) + } + req := &pb.MemcacheGetRequest{ + Key: keyAsBytes, + ForCas: proto.Bool(true), + } + res := &pb.MemcacheGetResponse{} + if err := internal.Call(c, "memcache", "Get", req, res); err != nil { + return nil, err + } + m := make(map[string]*Item, len(res.Item)) + for _, p := range res.Item { + t := protoToItem(p) + m[t.Key] = t + } + return m, nil +} + +// Delete deletes the item for the given key. +// ErrCacheMiss is returned if the specified item can not be found. +// The key must be at most 250 bytes in length. +func Delete(c context.Context, key string) error { + return singleError(DeleteMulti(c, []string{key})) +} + +// DeleteMulti is a batch version of Delete. +// If any keys cannot be found, an appengine.MultiError is returned. +// Each key must be at most 250 bytes in length. +func DeleteMulti(c context.Context, key []string) error { + if len(key) == 0 { + return nil + } + req := &pb.MemcacheDeleteRequest{ + Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)), + } + for i, k := range key { + req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)} + } + res := &pb.MemcacheDeleteResponse{} + if err := internal.Call(c, "memcache", "Delete", req, res); err != nil { + return err + } + if len(res.DeleteStatus) != len(key) { + return ErrServerError + } + me, any := make(appengine.MultiError, len(key)), false + for i, s := range res.DeleteStatus { + switch s { + case pb.MemcacheDeleteResponse_DELETED: + // OK + case pb.MemcacheDeleteResponse_NOT_FOUND: + me[i] = ErrCacheMiss + any = true + default: + me[i] = ErrServerError + any = true + } + } + if any { + return me + } + return nil +} + +// Increment atomically increments the decimal value in the given key +// by delta and returns the new value. The value must fit in a uint64. +// Overflow wraps around, and underflow is capped to zero. The +// provided delta may be negative. If the key doesn't exist in +// memcache, the provided initial value is used to atomically +// populate it before the delta is applied. +// The key must be at most 250 bytes in length. +func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) { + return incr(c, key, delta, &initialValue) +} + +// IncrementExisting works like Increment but assumes that the key +// already exists in memcache and doesn't take an initial value. +// IncrementExisting can save work if calculating the initial value is +// expensive. +// An error is returned if the specified item can not be found. +func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) { + return incr(c, key, delta, nil) +} + +func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) { + req := &pb.MemcacheIncrementRequest{ + Key: []byte(key), + InitialValue: initialValue, + } + if delta >= 0 { + req.Delta = proto.Uint64(uint64(delta)) + } else { + req.Delta = proto.Uint64(uint64(-delta)) + req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum() + } + res := &pb.MemcacheIncrementResponse{} + err = internal.Call(c, "memcache", "Increment", req, res) + if err != nil { + return + } + if res.NewValue == nil { + return 0, ErrCacheMiss + } + return *res.NewValue, nil +} + +// set sets the given items using the given conflict resolution policy. +// appengine.MultiError may be returned. +func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error { + if len(item) == 0 { + return nil + } + req := &pb.MemcacheSetRequest{ + Item: make([]*pb.MemcacheSetRequest_Item, len(item)), + } + for i, t := range item { + p := &pb.MemcacheSetRequest_Item{ + Key: []byte(t.Key), + } + if value == nil { + p.Value = t.Value + } else { + p.Value = value[i] + } + if t.Flags != 0 { + p.Flags = proto.Uint32(t.Flags) + } + if t.Expiration != 0 { + // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned) + // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed). + // Throughout this .go file, we use int32. + // Also, in the proto, the expiration value is either a duration (in seconds) + // or an absolute Unix timestamp (in seconds), depending on whether the + // value is less than or greater than or equal to 30 years, respectively. + if t.Expiration < time.Second { + // Because an Expiration of 0 means no expiration, we take + // care here to translate an item with an expiration + // Duration between 0-1 seconds as immediately expiring + // (saying it expired a few seconds ago), rather than + // rounding it down to 0 and making it live forever. + p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5) + } else if t.Expiration >= thirtyYears { + p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second)) + } else { + p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second)) + } + } + if t.casID != 0 { + p.CasId = proto.Uint64(t.casID) + p.ForCas = proto.Bool(true) + } + p.SetPolicy = policy.Enum() + req.Item[i] = p + } + res := &pb.MemcacheSetResponse{} + if err := internal.Call(c, "memcache", "Set", req, res); err != nil { + return err + } + if len(res.SetStatus) != len(item) { + return ErrServerError + } + me, any := make(appengine.MultiError, len(item)), false + for i, st := range res.SetStatus { + var err error + switch st { + case pb.MemcacheSetResponse_STORED: + // OK + case pb.MemcacheSetResponse_NOT_STORED: + err = ErrNotStored + case pb.MemcacheSetResponse_EXISTS: + err = ErrCASConflict + default: + err = ErrServerError + } + if err != nil { + me[i] = err + any = true + } + } + if any { + return me + } + return nil +} + +// Set writes the given item, unconditionally. +func Set(c context.Context, item *Item) error { + return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET)) +} + +// SetMulti is a batch version of Set. +// appengine.MultiError may be returned. +func SetMulti(c context.Context, item []*Item) error { + return set(c, item, nil, pb.MemcacheSetRequest_SET) +} + +// Add writes the given item, if no value already exists for its key. +// ErrNotStored is returned if that condition is not met. +func Add(c context.Context, item *Item) error { + return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD)) +} + +// AddMulti is a batch version of Add. +// appengine.MultiError may be returned. +func AddMulti(c context.Context, item []*Item) error { + return set(c, item, nil, pb.MemcacheSetRequest_ADD) +} + +// CompareAndSwap writes the given item that was previously returned by Get, +// if the value was neither modified or evicted between the Get and the +// CompareAndSwap calls. The item's Key should not change between calls but +// all other item fields may differ. +// ErrCASConflict is returned if the value was modified in between the calls. +// ErrNotStored is returned if the value was evicted in between the calls. +func CompareAndSwap(c context.Context, item *Item) error { + return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS)) +} + +// CompareAndSwapMulti is a batch version of CompareAndSwap. +// appengine.MultiError may be returned. +func CompareAndSwapMulti(c context.Context, item []*Item) error { + return set(c, item, nil, pb.MemcacheSetRequest_CAS) +} + +// Codec represents a symmetric pair of functions that implement a codec. +// Items stored into or retrieved from memcache using a Codec have their +// values marshaled or unmarshaled. +// +// All the methods provided for Codec behave analogously to the package level +// function with same name. +type Codec struct { + Marshal func(interface{}) ([]byte, error) + Unmarshal func([]byte, interface{}) error +} + +// Get gets the item for the given key and decodes the obtained value into v. +// ErrCacheMiss is returned for a memcache cache miss. +// The key must be at most 250 bytes in length. +func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) { + i, err := Get(c, key) + if err != nil { + return nil, err + } + if err := cd.Unmarshal(i.Value, v); err != nil { + return nil, err + } + return i, nil +} + +func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error { + var vs [][]byte + var me appengine.MultiError + for i, item := range items { + v, err := cd.Marshal(item.Object) + if err != nil { + if me == nil { + me = make(appengine.MultiError, len(items)) + } + me[i] = err + continue + } + if me == nil { + vs = append(vs, v) + } + } + if me != nil { + return me + } + + return set(c, items, vs, policy) +} + +// Set writes the given item, unconditionally. +func (cd Codec) Set(c context.Context, item *Item) error { + return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET)) +} + +// SetMulti is a batch version of Set. +// appengine.MultiError may be returned. +func (cd Codec) SetMulti(c context.Context, items []*Item) error { + return cd.set(c, items, pb.MemcacheSetRequest_SET) +} + +// Add writes the given item, if no value already exists for its key. +// ErrNotStored is returned if that condition is not met. +func (cd Codec) Add(c context.Context, item *Item) error { + return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD)) +} + +// AddMulti is a batch version of Add. +// appengine.MultiError may be returned. +func (cd Codec) AddMulti(c context.Context, items []*Item) error { + return cd.set(c, items, pb.MemcacheSetRequest_ADD) +} + +// CompareAndSwap writes the given item that was previously returned by Get, +// if the value was neither modified or evicted between the Get and the +// CompareAndSwap calls. The item's Key should not change between calls but +// all other item fields may differ. +// ErrCASConflict is returned if the value was modified in between the calls. +// ErrNotStored is returned if the value was evicted in between the calls. +func (cd Codec) CompareAndSwap(c context.Context, item *Item) error { + return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS)) +} + +// CompareAndSwapMulti is a batch version of CompareAndSwap. +// appengine.MultiError may be returned. +func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error { + return cd.set(c, items, pb.MemcacheSetRequest_CAS) +} + +var ( + // Gob is a Codec that uses the gob package. + Gob = Codec{gobMarshal, gobUnmarshal} + // JSON is a Codec that uses the json package. + JSON = Codec{json.Marshal, json.Unmarshal} +) + +func gobMarshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func gobUnmarshal(data []byte, v interface{}) error { + return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v) +} + +// Statistics represents a set of statistics about the memcache cache. +// This may include items that have expired but have not yet been removed from the cache. +type Statistics struct { + Hits uint64 // Counter of cache hits + Misses uint64 // Counter of cache misses + ByteHits uint64 // Counter of bytes transferred for gets + + Items uint64 // Items currently in the cache + Bytes uint64 // Size of all items currently in the cache + + Oldest int64 // Age of access of the oldest item, in seconds +} + +// Stats retrieves the current memcache statistics. +func Stats(c context.Context) (*Statistics, error) { + req := &pb.MemcacheStatsRequest{} + res := &pb.MemcacheStatsResponse{} + if err := internal.Call(c, "memcache", "Stats", req, res); err != nil { + return nil, err + } + if res.Stats == nil { + return nil, ErrNoStats + } + return &Statistics{ + Hits: *res.Stats.Hits, + Misses: *res.Stats.Misses, + ByteHits: *res.Stats.ByteHits, + Items: *res.Stats.Items, + Bytes: *res.Stats.Bytes, + Oldest: int64(*res.Stats.OldestItemAge), + }, nil +} + +// Flush flushes all items from memcache. +func Flush(c context.Context) error { + req := &pb.MemcacheFlushRequest{} + res := &pb.MemcacheFlushResponse{} + return internal.Call(c, "memcache", "FlushAll", req, res) +} + +func namespaceMod(m proto.Message, namespace string) { + switch m := m.(type) { + case *pb.MemcacheDeleteRequest: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + case *pb.MemcacheGetRequest: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + case *pb.MemcacheIncrementRequest: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + case *pb.MemcacheSetRequest: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace. + } +} + +func init() { + internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name) + internal.NamespaceMods["memcache"] = namespaceMod +} diff --git a/vendor/google.golang.org/appengine/memcache/memcache_test.go b/vendor/google.golang.org/appengine/memcache/memcache_test.go new file mode 100644 index 0000000..1dc7da4 --- /dev/null +++ b/vendor/google.golang.org/appengine/memcache/memcache_test.go @@ -0,0 +1,263 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package memcache + +import ( + "fmt" + "testing" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/memcache" +) + +var errRPC = fmt.Errorf("RPC error") + +func TestGetRequest(t *testing.T) { + serviceCalled := false + apiKey := "lyric" + + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error { + // Test request. + if n := len(req.Key); n != 1 { + t.Errorf("got %d want 1", n) + return nil + } + if k := string(req.Key[0]); k != apiKey { + t.Errorf("got %q want %q", k, apiKey) + } + + serviceCalled = true + return nil + }) + + // Test the "forward" path from the API call parameters to the + // protobuf request object. (The "backward" path from the + // protobuf response object to the API call response, + // including the error response, are handled in the next few + // tests). + Get(c, apiKey) + if !serviceCalled { + t.Error("Service was not called as expected") + } +} + +func TestGetResponseHit(t *testing.T) { + key := "lyric" + value := "Where the buffalo roam" + + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error { + res.Item = []*pb.MemcacheGetResponse_Item{ + {Key: []byte(key), Value: []byte(value)}, + } + return nil + }) + apiItem, err := Get(c, key) + if apiItem == nil || apiItem.Key != key || string(apiItem.Value) != value { + t.Errorf("got %q, %q want {%q,%q}, nil", apiItem, err, key, value) + } +} + +func TestGetResponseMiss(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error { + // don't fill in any of the response + return nil + }) + _, err := Get(c, "something") + if err != ErrCacheMiss { + t.Errorf("got %v want ErrCacheMiss", err) + } +} + +func TestGetResponseRPCError(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error { + return errRPC + }) + + if _, err := Get(c, "something"); err != errRPC { + t.Errorf("got %v want errRPC", err) + } +} + +func TestAddRequest(t *testing.T) { + var apiItem = &Item{ + Key: "lyric", + Value: []byte("Oh, give me a home"), + } + + serviceCalled := false + + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error { + // Test request. + pbItem := req.Item[0] + if k := string(pbItem.Key); k != apiItem.Key { + t.Errorf("got %q want %q", k, apiItem.Key) + } + if v := string(apiItem.Value); v != string(pbItem.Value) { + t.Errorf("got %q want %q", v, string(pbItem.Value)) + } + if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_ADD { + t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_ADD) + } + + serviceCalled = true + return nil + }) + + Add(c, apiItem) + if !serviceCalled { + t.Error("Service was not called as expected") + } +} + +func TestAddResponseStored(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED} + return nil + }) + + if err := Add(c, &Item{}); err != nil { + t.Errorf("got %v want nil", err) + } +} + +func TestAddResponseNotStored(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_NOT_STORED} + return nil + }) + + if err := Add(c, &Item{}); err != ErrNotStored { + t.Errorf("got %v want ErrNotStored", err) + } +} + +func TestAddResponseError(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR} + return nil + }) + + if err := Add(c, &Item{}); err != ErrServerError { + t.Errorf("got %v want ErrServerError", err) + } +} + +func TestAddResponseRPCError(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + return errRPC + }) + + if err := Add(c, &Item{}); err != errRPC { + t.Errorf("got %v want errRPC", err) + } +} + +func TestSetRequest(t *testing.T) { + var apiItem = &Item{ + Key: "lyric", + Value: []byte("Where the buffalo roam"), + } + + serviceCalled := false + + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error { + // Test request. + if n := len(req.Item); n != 1 { + t.Errorf("got %d want 1", n) + return nil + } + pbItem := req.Item[0] + if k := string(pbItem.Key); k != apiItem.Key { + t.Errorf("got %q want %q", k, apiItem.Key) + } + if v := string(pbItem.Value); v != string(apiItem.Value) { + t.Errorf("got %q want %q", v, string(apiItem.Value)) + } + if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_SET { + t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_SET) + } + + serviceCalled = true + return nil + }) + + Set(c, apiItem) + if !serviceCalled { + t.Error("Service was not called as expected") + } +} + +func TestSetResponse(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED} + return nil + }) + + if err := Set(c, &Item{}); err != nil { + t.Errorf("got %v want nil", err) + } +} + +func TestSetResponseError(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR} + return nil + }) + + if err := Set(c, &Item{}); err != ErrServerError { + t.Errorf("got %v want ErrServerError", err) + } +} + +func TestNamespaceResetting(t *testing.T) { + namec := make(chan *string, 1) + c0 := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error { + namec <- req.NameSpace + return errRPC + }) + + // Check that wrapping c0 in a namespace twice works correctly. + c1, err := appengine.Namespace(c0, "A") + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + c2, err := appengine.Namespace(c1, "") // should act as the original context + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + + Get(c0, "key") + if ns := <-namec; ns != nil { + t.Errorf(`Get with c0: ns = %q, want nil`, *ns) + } + + Get(c1, "key") + if ns := <-namec; ns == nil { + t.Error(`Get with c1: ns = nil, want "A"`) + } else if *ns != "A" { + t.Errorf(`Get with c1: ns = %q, want "A"`, *ns) + } + + Get(c2, "key") + if ns := <-namec; ns != nil { + t.Errorf(`Get with c2: ns = %q, want nil`, *ns) + } +} + +func TestGetMultiEmpty(t *testing.T) { + serviceCalled := false + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error { + serviceCalled = true + return nil + }) + + // Test that the Memcache service is not called when + // GetMulti is passed an empty slice of keys. + GetMulti(c, []string{}) + if serviceCalled { + t.Error("Service was called but should not have been") + } +} diff --git a/vendor/google.golang.org/appengine/module/module.go b/vendor/google.golang.org/appengine/module/module.go new file mode 100644 index 0000000..88e6629 --- /dev/null +++ b/vendor/google.golang.org/appengine/module/module.go @@ -0,0 +1,113 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package module provides functions for interacting with modules. + +The appengine package contains functions that report the identity of the app, +including the module name. +*/ +package module // import "google.golang.org/appengine/module" + +import ( + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/modules" +) + +// List returns the names of modules belonging to this application. +func List(c context.Context) ([]string, error) { + req := &pb.GetModulesRequest{} + res := &pb.GetModulesResponse{} + err := internal.Call(c, "modules", "GetModules", req, res) + return res.Module, err +} + +// NumInstances returns the number of instances of the given module/version. +// If either argument is the empty string it means the default. +func NumInstances(c context.Context, module, version string) (int, error) { + req := &pb.GetNumInstancesRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + res := &pb.GetNumInstancesResponse{} + + if err := internal.Call(c, "modules", "GetNumInstances", req, res); err != nil { + return 0, err + } + return int(*res.Instances), nil +} + +// SetNumInstances sets the number of instances of the given module.version to the +// specified value. If either module or version are the empty string it means the +// default. +func SetNumInstances(c context.Context, module, version string, instances int) error { + req := &pb.SetNumInstancesRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + req.Instances = proto.Int64(int64(instances)) + res := &pb.SetNumInstancesResponse{} + return internal.Call(c, "modules", "SetNumInstances", req, res) +} + +// Versions returns the names of the versions that belong to the specified module. +// If module is the empty string, it means the default module. +func Versions(c context.Context, module string) ([]string, error) { + req := &pb.GetVersionsRequest{} + if module != "" { + req.Module = &module + } + res := &pb.GetVersionsResponse{} + err := internal.Call(c, "modules", "GetVersions", req, res) + return res.GetVersion(), err +} + +// DefaultVersion returns the default version of the specified module. +// If module is the empty string, it means the default module. +func DefaultVersion(c context.Context, module string) (string, error) { + req := &pb.GetDefaultVersionRequest{} + if module != "" { + req.Module = &module + } + res := &pb.GetDefaultVersionResponse{} + err := internal.Call(c, "modules", "GetDefaultVersion", req, res) + return res.GetVersion(), err +} + +// Start starts the specified version of the specified module. +// If either module or version are the empty string, it means the default. +func Start(c context.Context, module, version string) error { + req := &pb.StartModuleRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + res := &pb.StartModuleResponse{} + return internal.Call(c, "modules", "StartModule", req, res) +} + +// Stop stops the specified version of the specified module. +// If either module or version are the empty string, it means the default. +func Stop(c context.Context, module, version string) error { + req := &pb.StopModuleRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + res := &pb.StopModuleResponse{} + return internal.Call(c, "modules", "StopModule", req, res) +} diff --git a/vendor/google.golang.org/appengine/module/module_test.go b/vendor/google.golang.org/appengine/module/module_test.go new file mode 100644 index 0000000..73e8971 --- /dev/null +++ b/vendor/google.golang.org/appengine/module/module_test.go @@ -0,0 +1,124 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package module + +import ( + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/modules" +) + +const version = "test-version" +const module = "test-module" +const instances = 3 + +func TestList(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "GetModules", func(req *pb.GetModulesRequest, res *pb.GetModulesResponse) error { + res.Module = []string{"default", "mod1"} + return nil + }) + got, err := List(c) + if err != nil { + t.Fatalf("List: %v", err) + } + want := []string{"default", "mod1"} + if !reflect.DeepEqual(got, want) { + t.Errorf("List = %v, want %v", got, want) + } +} + +func TestSetNumInstances(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "SetNumInstances", func(req *pb.SetNumInstancesRequest, res *pb.SetNumInstancesResponse) error { + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + if *req.Version != version { + t.Errorf("Version = %v, want %v", req.Version, version) + } + if *req.Instances != instances { + t.Errorf("Instances = %v, want %d", req.Instances, instances) + } + return nil + }) + err := SetNumInstances(c, module, version, instances) + if err != nil { + t.Fatalf("SetNumInstances: %v", err) + } +} + +func TestVersions(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "GetVersions", func(req *pb.GetVersionsRequest, res *pb.GetVersionsResponse) error { + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + res.Version = []string{"v1", "v2", "v3"} + return nil + }) + got, err := Versions(c, module) + if err != nil { + t.Fatalf("Versions: %v", err) + } + want := []string{"v1", "v2", "v3"} + if !reflect.DeepEqual(got, want) { + t.Errorf("Versions = %v, want %v", got, want) + } +} + +func TestDefaultVersion(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "GetDefaultVersion", func(req *pb.GetDefaultVersionRequest, res *pb.GetDefaultVersionResponse) error { + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + res.Version = proto.String(version) + return nil + }) + got, err := DefaultVersion(c, module) + if err != nil { + t.Fatalf("DefaultVersion: %v", err) + } + if got != version { + t.Errorf("Version = %v, want %v", got, version) + } +} + +func TestStart(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "StartModule", func(req *pb.StartModuleRequest, res *pb.StartModuleResponse) error { + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + if *req.Version != version { + t.Errorf("Version = %v, want %v", req.Version, version) + } + return nil + }) + + err := Start(c, module, version) + if err != nil { + t.Fatalf("Start: %v", err) + } +} + +func TestStop(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "StopModule", func(req *pb.StopModuleRequest, res *pb.StopModuleResponse) error { + version := "test-version" + module := "test-module" + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + if *req.Version != version { + t.Errorf("Version = %v, want %v", req.Version, version) + } + return nil + }) + + err := Stop(c, module, version) + if err != nil { + t.Fatalf("Stop: %v", err) + } +} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 0000000..21860ca --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,25 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "fmt" + "regexp" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/namespace_test.go b/vendor/google.golang.org/appengine/namespace_test.go new file mode 100644 index 0000000..847f640 --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace_test.go @@ -0,0 +1,39 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestNamespaceValidity(t *testing.T) { + testCases := []struct { + namespace string + ok bool + }{ + // data from Python's namespace_manager_test.py + {"", true}, + {"__a.namespace.123__", true}, + {"-_A....NAMESPACE-_", true}, + {"-", true}, + {".", true}, + {".-", true}, + + {"?", false}, + {"+", false}, + {"!", false}, + {" ", false}, + } + for _, tc := range testCases { + _, err := Namespace(context.Background(), tc.namespace) + if err == nil && !tc.ok { + t.Errorf("Namespace %q should be rejected, but wasn't", tc.namespace) + } else if err != nil && tc.ok { + t.Errorf("Namespace %q should be accepted, but wasn't", tc.namespace) + } + } +} diff --git a/vendor/google.golang.org/appengine/remote_api/client.go b/vendor/google.golang.org/appengine/remote_api/client.go new file mode 100644 index 0000000..dbe219d --- /dev/null +++ b/vendor/google.golang.org/appengine/remote_api/client.go @@ -0,0 +1,174 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package remote_api + +// This file provides the client for connecting remotely to a user's production +// application. + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/remote_api" +) + +// NewRemoteContext returns a context that gives access to the production +// APIs for the application at the given host. All communication will be +// performed over SSL unless the host is localhost. +func NewRemoteContext(host string, client *http.Client) (context.Context, error) { + // Add an appcfg header to outgoing requests. + t := client.Transport + if t == nil { + t = http.DefaultTransport + } + client.Transport = &headerAddingRoundTripper{t} + + url := url.URL{ + Scheme: "https", + Host: host, + Path: "/_ah/remote_api", + } + if host == "localhost" || strings.HasPrefix(host, "localhost:") { + url.Scheme = "http" + } + u := url.String() + appID, err := getAppID(client, u) + if err != nil { + return nil, fmt.Errorf("unable to contact server: %v", err) + } + rc := &remoteContext{ + client: client, + url: u, + } + ctx := internal.WithCallOverride(context.Background(), rc.call) + ctx = internal.WithLogOverride(ctx, rc.logf) + ctx = internal.WithAppIDOverride(ctx, appID) + return ctx, nil +} + +type remoteContext struct { + client *http.Client + url string +} + +var logLevels = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func (c *remoteContext) logf(level int64, format string, args ...interface{}) { + log.Printf(logLevels[level]+": "+format, args...) +} + +func (c *remoteContext) call(ctx context.Context, service, method string, in, out proto.Message) error { + req, err := proto.Marshal(in) + if err != nil { + return fmt.Errorf("error marshalling request: %v", err) + } + + remReq := &pb.Request{ + ServiceName: proto.String(service), + Method: proto.String(method), + Request: req, + // NOTE(djd): RequestId is unused in the server. + } + + req, err = proto.Marshal(remReq) + if err != nil { + return fmt.Errorf("proto.Marshal: %v", err) + } + + // TODO(djd): Respect ctx.Deadline()? + resp, err := c.client.Post(c.url, "application/octet-stream", bytes.NewReader(req)) + if err != nil { + return fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body) + } + if err != nil { + return fmt.Errorf("failed reading response: %v", err) + } + remResp := &pb.Response{} + if err := proto.Unmarshal(body, remResp); err != nil { + return fmt.Errorf("error unmarshalling response: %v", err) + } + + if ae := remResp.GetApplicationError(); ae != nil { + return &internal.APIError{ + Code: ae.GetCode(), + Detail: ae.GetDetail(), + Service: service, + } + } + + if remResp.Response == nil { + return fmt.Errorf("unexpected response: %s", proto.MarshalTextString(remResp)) + } + + return proto.Unmarshal(remResp.Response, out) +} + +// This is a forgiving regexp designed to parse the app ID from YAML. +var appIDRE = regexp.MustCompile(`app_id["']?\s*:\s*['"]?([-a-z0-9.:~]+)`) + +func getAppID(client *http.Client, url string) (string, error) { + // Generate a pseudo-random token for handshaking. + token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int()) + + resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body) + } + if err != nil { + return "", fmt.Errorf("failed reading response: %v", err) + } + + // Check the token is present in response. + if !bytes.Contains(body, []byte(token)) { + return "", fmt.Errorf("token not found: want %q; body %q", token, body) + } + + match := appIDRE.FindSubmatch(body) + if match == nil { + return "", fmt.Errorf("app ID not found: body %q", body) + } + + return string(match[1]), nil +} + +type headerAddingRoundTripper struct { + Wrapped http.RoundTripper +} + +func (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + r.Header.Set("X-Appcfg-Api-Version", "1") + return t.Wrapped.RoundTrip(r) +} diff --git a/vendor/google.golang.org/appengine/remote_api/client_test.go b/vendor/google.golang.org/appengine/remote_api/client_test.go new file mode 100644 index 0000000..2e892a0 --- /dev/null +++ b/vendor/google.golang.org/appengine/remote_api/client_test.go @@ -0,0 +1,24 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package remote_api + +import ( + "testing" +) + +func TestAppIDRE(t *testing.T) { + appID := "s~my-appid-539" + tests := []string{ + "{rtok: 8306111115908860449, app_id: s~my-appid-539}\n", + "{rtok: 8306111115908860449, app_id: 's~my-appid-539'}\n", + `{rtok: 8306111115908860449, app_id: "s~my-appid-539"}`, + `{rtok: 8306111115908860449, "app_id":"s~my-appid-539"}`, + } + for _, v := range tests { + if g := appIDRE.FindStringSubmatch(v); g == nil || g[1] != appID { + t.Errorf("appIDRE.FindStringSubmatch(%s) got %q, want %q", v, g, appID) + } + } +} diff --git a/vendor/google.golang.org/appengine/remote_api/remote_api.go b/vendor/google.golang.org/appengine/remote_api/remote_api.go new file mode 100644 index 0000000..68cd7d9 --- /dev/null +++ b/vendor/google.golang.org/appengine/remote_api/remote_api.go @@ -0,0 +1,152 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package remote_api implements the /_ah/remote_api endpoint. +This endpoint is used by offline tools such as the bulk loader. +*/ +package remote_api // import "google.golang.org/appengine/remote_api" + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/remote_api" + "google.golang.org/appengine/log" + "google.golang.org/appengine/user" +) + +func init() { + http.HandleFunc("/_ah/remote_api", handle) +} + +func handle(w http.ResponseWriter, req *http.Request) { + c := appengine.NewContext(req) + + u := user.Current(c) + if u == nil { + u, _ = user.CurrentOAuth(c, + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/appengine.apis", + ) + } + + if u == nil || !u.Admin { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusUnauthorized) + io.WriteString(w, "You must be logged in as an administrator to access this.\n") + return + } + if req.Header.Get("X-Appcfg-Api-Version") == "" { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusForbidden) + io.WriteString(w, "This request did not contain a necessary header.\n") + return + } + + if req.Method != "POST" { + // Response must be YAML. + rtok := req.FormValue("rtok") + if rtok == "" { + rtok = "0" + } + w.Header().Set("Content-Type", "text/yaml; charset=utf-8") + fmt.Fprintf(w, `{app_id: %q, rtok: %q}`, internal.FullyQualifiedAppID(c), rtok) + return + } + + defer req.Body.Close() + body, err := ioutil.ReadAll(req.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + log.Errorf(c, "Failed reading body: %v", err) + return + } + remReq := &pb.Request{} + if err := proto.Unmarshal(body, remReq); err != nil { + w.WriteHeader(http.StatusBadRequest) + log.Errorf(c, "Bad body: %v", err) + return + } + + service, method := *remReq.ServiceName, *remReq.Method + if !requestSupported(service, method) { + w.WriteHeader(http.StatusBadRequest) + log.Errorf(c, "Unsupported RPC /%s.%s", service, method) + return + } + + rawReq := &rawMessage{remReq.Request} + rawRes := &rawMessage{} + err = internal.Call(c, service, method, rawReq, rawRes) + + remRes := &pb.Response{} + if err == nil { + remRes.Response = rawRes.buf + } else if ae, ok := err.(*internal.APIError); ok { + remRes.ApplicationError = &pb.ApplicationError{ + Code: &ae.Code, + Detail: &ae.Detail, + } + } else { + // This shouldn't normally happen. + log.Errorf(c, "appengine/remote_api: Unexpected error of type %T: %v", err, err) + remRes.ApplicationError = &pb.ApplicationError{ + Code: proto.Int32(0), + Detail: proto.String(err.Error()), + } + } + out, err := proto.Marshal(remRes) + if err != nil { + // This should not be possible. + w.WriteHeader(500) + log.Errorf(c, "proto.Marshal: %v", err) + return + } + + log.Infof(c, "Spooling %d bytes of response to /%s.%s", len(out), service, method) + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Length", strconv.Itoa(len(out))) + w.Write(out) +} + +// rawMessage is a protocol buffer type that is already serialised. +// This allows the remote_api code here to handle messages +// without having to know the real type. +type rawMessage struct { + buf []byte +} + +func (rm *rawMessage) Marshal() ([]byte, error) { + return rm.buf, nil +} + +func (rm *rawMessage) Unmarshal(buf []byte) error { + rm.buf = make([]byte, len(buf)) + copy(rm.buf, buf) + return nil +} + +func requestSupported(service, method string) bool { + // This list of supported services is taken from SERVICE_PB_MAP in remote_api_services.py + switch service { + case "app_identity_service", "blobstore", "capability_service", "channel", "datastore_v3", + "datastore_v4", "file", "images", "logservice", "mail", "matcher", "memcache", "remote_datastore", + "remote_socket", "search", "modules", "system", "taskqueue", "urlfetch", "user", "xmpp": + return true + } + return false +} + +// Methods to satisfy proto.Message. +func (rm *rawMessage) Reset() { rm.buf = nil } +func (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) } +func (*rawMessage) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/runtime/runtime.go b/vendor/google.golang.org/appengine/runtime/runtime.go new file mode 100644 index 0000000..fa6c12b --- /dev/null +++ b/vendor/google.golang.org/appengine/runtime/runtime.go @@ -0,0 +1,148 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package runtime exposes information about the resource usage of the application. +It also provides a way to run code in a new background context of a module. + +This package does not work on App Engine "flexible environment". +*/ +package runtime // import "google.golang.org/appengine/runtime" + +import ( + "net/http" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/system" +) + +// Statistics represents the system's statistics. +type Statistics struct { + // CPU records the CPU consumed by this instance, in megacycles. + CPU struct { + Total float64 + Rate1M float64 // consumption rate over one minute + Rate10M float64 // consumption rate over ten minutes + } + // RAM records the memory used by the instance, in megabytes. + RAM struct { + Current float64 + Average1M float64 // average usage over one minute + Average10M float64 // average usage over ten minutes + } +} + +func Stats(c context.Context) (*Statistics, error) { + req := &pb.GetSystemStatsRequest{} + res := &pb.GetSystemStatsResponse{} + if err := internal.Call(c, "system", "GetSystemStats", req, res); err != nil { + return nil, err + } + s := &Statistics{} + if res.Cpu != nil { + s.CPU.Total = res.Cpu.GetTotal() + s.CPU.Rate1M = res.Cpu.GetRate1M() + s.CPU.Rate10M = res.Cpu.GetRate10M() + } + if res.Memory != nil { + s.RAM.Current = res.Memory.GetCurrent() + s.RAM.Average1M = res.Memory.GetAverage1M() + s.RAM.Average10M = res.Memory.GetAverage10M() + } + return s, nil +} + +/* +RunInBackground makes an API call that triggers an /_ah/background request. + +There are two independent code paths that need to make contact: +the RunInBackground code, and the /_ah/background handler. The matchmaker +loop arranges for the two paths to meet. The RunInBackground code passes +a send to the matchmaker, the /_ah/background passes a recv to the matchmaker, +and the matchmaker hooks them up. +*/ + +func init() { + http.HandleFunc("/_ah/background", handleBackground) + + sc := make(chan send) + rc := make(chan recv) + sendc, recvc = sc, rc + go matchmaker(sc, rc) +} + +var ( + sendc chan<- send // RunInBackground sends to this + recvc chan<- recv // handleBackground sends to this +) + +type send struct { + id string + f func(context.Context) +} + +type recv struct { + id string + ch chan<- func(context.Context) +} + +func matchmaker(sendc <-chan send, recvc <-chan recv) { + // When one side of the match arrives before the other + // it is inserted in the corresponding map. + waitSend := make(map[string]send) + waitRecv := make(map[string]recv) + + for { + select { + case s := <-sendc: + if r, ok := waitRecv[s.id]; ok { + // meet! + delete(waitRecv, s.id) + r.ch <- s.f + } else { + // waiting for r + waitSend[s.id] = s + } + case r := <-recvc: + if s, ok := waitSend[r.id]; ok { + // meet! + delete(waitSend, r.id) + r.ch <- s.f + } else { + // waiting for s + waitRecv[r.id] = r + } + } + } +} + +var newContext = appengine.NewContext // for testing + +func handleBackground(w http.ResponseWriter, req *http.Request) { + id := req.Header.Get("X-AppEngine-BackgroundRequest") + + ch := make(chan func(context.Context)) + recvc <- recv{id, ch} + (<-ch)(newContext(req)) +} + +// RunInBackground runs f in a background goroutine in this process. +// f is provided a context that may outlast the context provided to RunInBackground. +// This is only valid to invoke from a service set to basic or manual scaling. +func RunInBackground(c context.Context, f func(c context.Context)) error { + req := &pb.StartBackgroundRequestRequest{} + res := &pb.StartBackgroundRequestResponse{} + if err := internal.Call(c, "system", "StartBackgroundRequest", req, res); err != nil { + return err + } + sendc <- send{res.GetRequestId(), f} + return nil +} + +func init() { + internal.RegisterErrorCodeMap("system", pb.SystemServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/runtime/runtime_test.go b/vendor/google.golang.org/appengine/runtime/runtime_test.go new file mode 100644 index 0000000..8f3a124 --- /dev/null +++ b/vendor/google.golang.org/appengine/runtime/runtime_test.go @@ -0,0 +1,101 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package runtime + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/system" +) + +func TestRunInBackgroundSendFirst(t *testing.T) { testRunInBackground(t, true) } +func TestRunInBackgroundRecvFirst(t *testing.T) { testRunInBackground(t, false) } + +func testRunInBackground(t *testing.T, sendFirst bool) { + srv := httptest.NewServer(nil) + defer srv.Close() + + const id = "f00bar" + sendWait, recvWait := make(chan bool), make(chan bool) + sbr := make(chan bool) // strobed when system.StartBackgroundRequest has started + + calls := 0 + c := aetesting.FakeSingleContext(t, "system", "StartBackgroundRequest", func(req *pb.StartBackgroundRequestRequest, res *pb.StartBackgroundRequestResponse) error { + calls++ + if calls > 1 { + t.Errorf("Too many calls to system.StartBackgroundRequest") + } + sbr <- true + res.RequestId = proto.String(id) + <-sendWait + return nil + }) + + var c2 context.Context // a fake + newContext = func(*http.Request) context.Context { + return c2 + } + + var fRun int + f := func(c3 context.Context) { + fRun++ + if c3 != c2 { + t.Errorf("f got a different context than expected") + } + } + + ribErrc := make(chan error) + go func() { + ribErrc <- RunInBackground(c, f) + }() + + brErrc := make(chan error) + go func() { + <-sbr + req, err := http.NewRequest("GET", srv.URL+"/_ah/background", nil) + if err != nil { + brErrc <- fmt.Errorf("http.NewRequest: %v", err) + return + } + req.Header.Set("X-AppEngine-BackgroundRequest", id) + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + } + + <-recvWait + _, err = client.Do(req) + brErrc <- err + }() + + // Send and receive are both waiting at this point. + waits := [2]chan bool{sendWait, recvWait} + if !sendFirst { + waits[0], waits[1] = waits[1], waits[0] + } + waits[0] <- true + time.Sleep(100 * time.Millisecond) + waits[1] <- true + + if err := <-ribErrc; err != nil { + t.Fatalf("RunInBackground: %v", err) + } + if err := <-brErrc; err != nil { + t.Fatalf("background request: %v", err) + } + + if fRun != 1 { + t.Errorf("Got %d runs of f, want 1", fRun) + } +} diff --git a/vendor/google.golang.org/appengine/search/doc.go b/vendor/google.golang.org/appengine/search/doc.go new file mode 100644 index 0000000..da331ce --- /dev/null +++ b/vendor/google.golang.org/appengine/search/doc.go @@ -0,0 +1,209 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package search provides a client for App Engine's search service. + + +Basic Operations + +Indexes contain documents. Each index is identified by its name: a +human-readable ASCII string. + +Within an index, documents are associated with an ID, which is also +a human-readable ASCII string. A document's contents are a mapping from +case-sensitive field names to values. Valid types for field values are: + - string, + - search.Atom, + - search.HTML, + - time.Time (stored with millisecond precision), + - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive), + - appengine.GeoPoint. + +The Get and Put methods on an Index load and save a document. +A document's contents are typically represented by a struct pointer. + +Example code: + + type Doc struct { + Author string + Comment string + Creation time.Time + } + + index, err := search.Open("comments") + if err != nil { + return err + } + newID, err := index.Put(ctx, "", &Doc{ + Author: "gopher", + Comment: "the truth of the matter", + Creation: time.Now(), + }) + if err != nil { + return err + } + +A single document can be retrieved by its ID. Pass a destination struct +to Get to hold the resulting document. + + var doc Doc + err := index.Get(ctx, id, &doc) + if err != nil { + return err + } + + +Search and Listing Documents + +Indexes have two methods for retrieving multiple documents at once: Search and +List. + +Searching an index for a query will result in an iterator. As with an iterator +from package datastore, pass a destination struct to Next to decode the next +result. Next will return Done when the iterator is exhausted. + + for t := index.Search(ctx, "Comment:truth", nil); ; { + var doc Doc + id, err := t.Next(&doc) + if err == search.Done { + break + } + if err != nil { + return err + } + fmt.Fprintf(w, "%s -> %#v\n", id, doc) + } + +Search takes a string query to determine which documents to return. The query +can be simple, such as a single word to match, or complex. The query +language is described at +https://cloud.google.com/appengine/docs/go/search/query_strings + +Search also takes an optional SearchOptions struct which gives much more +control over how results are calculated and returned. + +Call List to iterate over all documents in an index. + + for t := index.List(ctx, nil); ; { + var doc Doc + id, err := t.Next(&doc) + if err == search.Done { + break + } + if err != nil { + return err + } + fmt.Fprintf(w, "%s -> %#v\n", id, doc) + } + + +Fields and Facets + +A document's contents can be represented by a variety of types. These are +typically struct pointers, but they can also be represented by any type +implementing the FieldLoadSaver interface. The FieldLoadSaver allows metadata +to be set for the document with the DocumentMetadata type. Struct pointers are +more strongly typed and are easier to use; FieldLoadSavers are more flexible. + +A document's contents can be expressed in two ways: fields and facets. + +Fields are the most common way of providing content for documents. Fields can +store data in multiple types and can be matched in searches using query +strings. + +Facets provide a way to attach categorical information to a document. The only +valid types for facets are search.Atom and float64. Facets allow search +results to contain summaries of the categories matched in a search, and to +restrict searches to only match against specific categories. + +By default, for struct pointers, all of the struct fields are used as document +fields, and the field name used is the same as on the struct (and hence must +start with an upper case letter). Struct fields may have a +`search:"name,options"` tag. The name must start with a letter and be +composed only of word characters. A "-" tag name means that the field will be +ignored. If options is "facet" then the struct field will be used as a +document facet. If options is "" then the comma may be omitted. There are no +other recognized options. + +Example code: + + // A and B are renamed to a and b. + // A, C and I are facets. + // D's tag is equivalent to having no tag at all (E). + // F and G are ignored entirely by the search package. + // I has tag information for both the search and json packages. + type TaggedStruct struct { + A float64 `search:"a,facet"` + B float64 `search:"b"` + C float64 `search:",facet"` + D float64 `search:""` + E float64 + F float64 `search:"-"` + G float64 `search:"-,facet"` + I float64 `search:",facet" json:"i"` + } + + +The FieldLoadSaver Interface + +A document's contents can also be represented by any type that implements the +FieldLoadSaver interface. This type may be a struct pointer, but it +does not have to be. The search package will call Load when loading the +document's contents, and Save when saving them. In addition to a slice of +Fields, the Load and Save methods also use the DocumentMetadata type to +provide additional information about a document (such as its Rank, or set of +Facets). Possible uses for this interface include deriving non-stored fields, +verifying fields or setting specific languages for string and HTML fields. + +Example code: + + type CustomFieldsExample struct { + // Item's title and which language it is in. + Title string + Lang string + // Mass, in grams. + Mass int + } + + func (x *CustomFieldsExample) Load(fields []search.Field, meta *search.DocumentMetadata) error { + // Load the title field, failing if any other field is found. + for _, f := range fields { + if f.Name != "title" { + return fmt.Errorf("unknown field %q", f.Name) + } + s, ok := f.Value.(string) + if !ok { + return fmt.Errorf("unsupported type %T for field %q", f.Value, f.Name) + } + x.Title = s + x.Lang = f.Language + } + // Load the mass facet, failing if any other facet is found. + for _, f := range meta.Facets { + if f.Name != "mass" { + return fmt.Errorf("unknown facet %q", f.Name) + } + m, ok := f.Value.(float64) + if !ok { + return fmt.Errorf("unsupported type %T for facet %q", f.Value, f.Name) + } + x.Mass = int(m) + } + return nil + } + + func (x *CustomFieldsExample) Save() ([]search.Field, *search.DocumentMetadata, error) { + fields := []search.Field{ + {Name: "title", Value: x.Title, Language: x.Lang}, + } + meta := &search.DocumentMetadata{ + Facets: { + {Name: "mass", Value: float64(x.Mass)}, + }, + } + return fields, meta, nil + } +*/ +package search diff --git a/vendor/google.golang.org/appengine/search/field.go b/vendor/google.golang.org/appengine/search/field.go new file mode 100644 index 0000000..707c2d8 --- /dev/null +++ b/vendor/google.golang.org/appengine/search/field.go @@ -0,0 +1,82 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search + +// Field is a name/value pair. A search index's document can be loaded and +// saved as a sequence of Fields. +type Field struct { + // Name is the field name. A valid field name matches /[A-Za-z][A-Za-z0-9_]*/. + Name string + // Value is the field value. The valid types are: + // - string, + // - search.Atom, + // - search.HTML, + // - time.Time (stored with millisecond precision), + // - float64, + // - GeoPoint. + Value interface{} + // Language is a two-letter ISO 639-1 code for the field's language, + // defaulting to "en" if nothing is specified. It may only be specified for + // fields of type string and search.HTML. + Language string + // Derived marks fields that were calculated as a result of a + // FieldExpression provided to Search. This field is ignored when saving a + // document. + Derived bool +} + +// Facet is a name/value pair which is used to add categorical information to a +// document. +type Facet struct { + // Name is the facet name. A valid facet name matches /[A-Za-z][A-Za-z0-9_]*/. + // A facet name cannot be longer than 500 characters. + Name string + // Value is the facet value. + // + // When being used in documents (for example, in + // DocumentMetadata.Facets), the valid types are: + // - search.Atom, + // - float64. + // + // When being used in SearchOptions.Refinements or being returned + // in FacetResult, the valid types are: + // - search.Atom, + // - search.Range. + Value interface{} +} + +// DocumentMetadata is a struct containing information describing a given document. +type DocumentMetadata struct { + // Rank is an integer specifying the order the document will be returned in + // search results. If zero, the rank will be set to the number of seconds since + // 2011-01-01 00:00:00 UTC when being Put into an index. + Rank int + // Facets is the set of facets for this document. + Facets []Facet +} + +// FieldLoadSaver can be converted from and to a slice of Fields +// with additional document metadata. +type FieldLoadSaver interface { + Load([]Field, *DocumentMetadata) error + Save() ([]Field, *DocumentMetadata, error) +} + +// FieldList converts a []Field to implement FieldLoadSaver. +type FieldList []Field + +// Load loads all of the provided fields into l. +// It does not first reset *l to an empty slice. +func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error { + *l = append(*l, f...) + return nil +} + +// Save returns all of l's fields as a slice of Fields. +func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) { + return *l, nil, nil +} + +var _ FieldLoadSaver = (*FieldList)(nil) diff --git a/vendor/google.golang.org/appengine/search/search.go b/vendor/google.golang.org/appengine/search/search.go new file mode 100644 index 0000000..774b051 --- /dev/null +++ b/vendor/google.golang.org/appengine/search/search.go @@ -0,0 +1,1121 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search // import "google.golang.org/appengine/search" + +// TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage?? +// TODO: Index.GetAll (or Iterator.GetAll)? +// TODO: struct <-> protobuf tests. +// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero +// time.Time)? _MAXIMUM_STRING_LENGTH? + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/search" +) + +var ( + // ErrInvalidDocumentType is returned when methods like Put, Get or Next + // are passed a dst or src argument of invalid type. + ErrInvalidDocumentType = errors.New("search: invalid document type") + + // ErrNoSuchDocument is returned when no document was found for a given ID. + ErrNoSuchDocument = errors.New("search: no such document") +) + +// Atom is a document field whose contents are indexed as a single indivisible +// string. +type Atom string + +// HTML is a document field whose contents are indexed as HTML. Only text nodes +// are indexed: "foobar" will be treated as "foobar". +type HTML string + +// validIndexNameOrDocID is the Go equivalent of Python's +// _ValidateVisiblePrintableAsciiNotReserved. +func validIndexNameOrDocID(s string) bool { + if strings.HasPrefix(s, "!") { + return false + } + for _, c := range s { + if c < 0x21 || 0x7f <= c { + return false + } + } + return true +} + +var ( + fieldNameRE = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`) + languageRE = regexp.MustCompile(`^[a-z]{2}$`) +) + +// validFieldName is the Go equivalent of Python's _CheckFieldName. It checks +// the validity of both field and facet names. +func validFieldName(s string) bool { + return len(s) <= 500 && fieldNameRE.MatchString(s) +} + +// validDocRank checks that the ranks is in the range [0, 2^31). +func validDocRank(r int) bool { + return 0 <= r && r <= (1<<31-1) +} + +// validLanguage checks that a language looks like ISO 639-1. +func validLanguage(s string) bool { + return languageRE.MatchString(s) +} + +// validFloat checks that f is in the range [-2147483647, 2147483647]. +func validFloat(f float64) bool { + return -(1<<31-1) <= f && f <= (1<<31-1) +} + +// Index is an index of documents. +type Index struct { + spec pb.IndexSpec +} + +// orderIDEpoch forms the basis for populating OrderId on documents. +var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC) + +// Open opens the index with the given name. The index is created if it does +// not already exist. +// +// The name is a human-readable ASCII string. It must contain no whitespace +// characters and not start with "!". +func Open(name string) (*Index, error) { + if !validIndexNameOrDocID(name) { + return nil, fmt.Errorf("search: invalid index name %q", name) + } + return &Index{ + spec: pb.IndexSpec{ + Name: &name, + }, + }, nil +} + +// Put saves src to the index. If id is empty, a new ID is allocated by the +// service and returned. If id is not empty, any existing index entry for that +// ID is replaced. +// +// The ID is a human-readable ASCII string. It must contain no whitespace +// characters and not start with "!". +// +// src must be a non-nil struct pointer or implement the FieldLoadSaver +// interface. +func (x *Index) Put(c context.Context, id string, src interface{}) (string, error) { + d, err := saveDoc(src) + if err != nil { + return "", err + } + if id != "" { + if !validIndexNameOrDocID(id) { + return "", fmt.Errorf("search: invalid ID %q", id) + } + d.Id = proto.String(id) + } + // spec is modified by Call when applying the current Namespace, so copy it to + // avoid retaining the namespace beyond the scope of the Call. + spec := x.spec + req := &pb.IndexDocumentRequest{ + Params: &pb.IndexDocumentParams{ + Document: []*pb.Document{d}, + IndexSpec: &spec, + }, + } + res := &pb.IndexDocumentResponse{} + if err := internal.Call(c, "search", "IndexDocument", req, res); err != nil { + return "", err + } + if len(res.Status) > 0 { + if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK { + return "", fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail()) + } + } + if len(res.Status) != 1 || len(res.DocId) != 1 { + return "", fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs)", + len(res.Status), len(res.DocId)) + } + return res.DocId[0], nil +} + +// Get loads the document with the given ID into dst. +// +// The ID is a human-readable ASCII string. It must be non-empty, contain no +// whitespace characters and not start with "!". +// +// dst must be a non-nil struct pointer or implement the FieldLoadSaver +// interface. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. It is up to the callee to decide whether this error +// is fatal, recoverable, or ignorable. +func (x *Index) Get(c context.Context, id string, dst interface{}) error { + if id == "" || !validIndexNameOrDocID(id) { + return fmt.Errorf("search: invalid ID %q", id) + } + req := &pb.ListDocumentsRequest{ + Params: &pb.ListDocumentsParams{ + IndexSpec: &x.spec, + StartDocId: proto.String(id), + Limit: proto.Int32(1), + }, + } + res := &pb.ListDocumentsResponse{} + if err := internal.Call(c, "search", "ListDocuments", req, res); err != nil { + return err + } + if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { + return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) + } + if len(res.Document) != 1 || res.Document[0].GetId() != id { + return ErrNoSuchDocument + } + return loadDoc(dst, res.Document[0], nil) +} + +// Delete deletes a document from the index. +func (x *Index) Delete(c context.Context, id string) error { + req := &pb.DeleteDocumentRequest{ + Params: &pb.DeleteDocumentParams{ + DocId: []string{id}, + IndexSpec: &x.spec, + }, + } + res := &pb.DeleteDocumentResponse{} + if err := internal.Call(c, "search", "DeleteDocument", req, res); err != nil { + return err + } + if len(res.Status) != 1 { + return fmt.Errorf("search: internal error: wrong number of results (%d)", len(res.Status)) + } + if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK { + return fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail()) + } + return nil +} + +// List lists all of the documents in an index. The documents are returned in +// increasing ID order. +func (x *Index) List(c context.Context, opts *ListOptions) *Iterator { + t := &Iterator{ + c: c, + index: x, + count: -1, + listInclusive: true, + more: moreList, + } + if opts != nil { + t.listStartID = opts.StartID + t.limit = opts.Limit + t.idsOnly = opts.IDsOnly + } + return t +} + +func moreList(t *Iterator) error { + req := &pb.ListDocumentsRequest{ + Params: &pb.ListDocumentsParams{ + IndexSpec: &t.index.spec, + }, + } + if t.listStartID != "" { + req.Params.StartDocId = &t.listStartID + req.Params.IncludeStartDoc = &t.listInclusive + } + if t.limit > 0 { + req.Params.Limit = proto.Int32(int32(t.limit)) + } + if t.idsOnly { + req.Params.KeysOnly = &t.idsOnly + } + + res := &pb.ListDocumentsResponse{} + if err := internal.Call(t.c, "search", "ListDocuments", req, res); err != nil { + return err + } + if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { + return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) + } + t.listRes = res.Document + t.listStartID, t.listInclusive, t.more = "", false, nil + if len(res.Document) != 0 && t.limit <= 0 { + if id := res.Document[len(res.Document)-1].GetId(); id != "" { + t.listStartID, t.more = id, moreList + } + } + return nil +} + +// ListOptions are the options for listing documents in an index. Passing a nil +// *ListOptions is equivalent to using the default values. +type ListOptions struct { + // StartID is the inclusive lower bound for the ID of the returned + // documents. The zero value means all documents will be returned. + StartID string + + // Limit is the maximum number of documents to return. The zero value + // indicates no limit. + Limit int + + // IDsOnly indicates that only document IDs should be returned for the list + // operation; no document fields are populated. + IDsOnly bool +} + +// Search searches the index for the given query. +func (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator { + t := &Iterator{ + c: c, + index: x, + searchQuery: query, + more: moreSearch, + } + if opts != nil { + if opts.Cursor != "" { + if opts.Offset != 0 { + return errIter("at most one of Cursor and Offset may be specified") + } + t.searchCursor = proto.String(string(opts.Cursor)) + } + t.limit = opts.Limit + t.fields = opts.Fields + t.idsOnly = opts.IDsOnly + t.sort = opts.Sort + t.exprs = opts.Expressions + t.refinements = opts.Refinements + t.facetOpts = opts.Facets + t.searchOffset = opts.Offset + t.countAccuracy = opts.CountAccuracy + } + return t +} + +func moreSearch(t *Iterator) error { + // We use per-result (rather than single/per-page) cursors since this + // lets us return a Cursor for every iterator document. The two cursor + // types are largely interchangeable: a page cursor is the same as the + // last per-result cursor in a given search response. + req := &pb.SearchRequest{ + Params: &pb.SearchParams{ + IndexSpec: &t.index.spec, + Query: &t.searchQuery, + Cursor: t.searchCursor, + CursorType: pb.SearchParams_PER_RESULT.Enum(), + FieldSpec: &pb.FieldSpec{ + Name: t.fields, + }, + }, + } + if t.limit > 0 { + req.Params.Limit = proto.Int32(int32(t.limit)) + } + if t.searchOffset > 0 { + req.Params.Offset = proto.Int32(int32(t.searchOffset)) + t.searchOffset = 0 + } + if t.countAccuracy > 0 { + req.Params.MatchedCountAccuracy = proto.Int32(int32(t.countAccuracy)) + } + if t.idsOnly { + req.Params.KeysOnly = &t.idsOnly + } + if t.sort != nil { + if err := sortToProto(t.sort, req.Params); err != nil { + return err + } + } + if t.refinements != nil { + if err := refinementsToProto(t.refinements, req.Params); err != nil { + return err + } + } + for _, e := range t.exprs { + req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{ + Name: proto.String(e.Name), + Expression: proto.String(e.Expr), + }) + } + for _, f := range t.facetOpts { + if err := f.setParams(req.Params); err != nil { + return fmt.Errorf("bad FacetSearchOption: %v", err) + } + } + // Don't repeat facet search. + t.facetOpts = nil + + res := &pb.SearchResponse{} + if err := internal.Call(t.c, "search", "Search", req, res); err != nil { + return err + } + if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { + return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) + } + t.searchRes = res.Result + if len(res.FacetResult) > 0 { + t.facetRes = res.FacetResult + } + t.count = int(*res.MatchedCount) + if t.limit > 0 { + t.more = nil + } else { + t.more = moreSearch + } + return nil +} + +// SearchOptions are the options for searching an index. Passing a nil +// *SearchOptions is equivalent to using the default values. +type SearchOptions struct { + // Limit is the maximum number of documents to return. The zero value + // indicates no limit. + Limit int + + // IDsOnly indicates that only document IDs should be returned for the search + // operation; no document fields are populated. + IDsOnly bool + + // Sort controls the ordering of search results. + Sort *SortOptions + + // Fields specifies which document fields to include in the results. If omitted, + // all document fields are returned. No more than 100 fields may be specified. + Fields []string + + // Expressions specifies additional computed fields to add to each returned + // document. + Expressions []FieldExpression + + // Facets controls what facet information is returned for these search results. + // If no options are specified, no facet results will be returned. + Facets []FacetSearchOption + + // Refinements filters the returned documents by requiring them to contain facets + // with specific values. Refinements are applied in conjunction for facets with + // different names, and in disjunction otherwise. + Refinements []Facet + + // Cursor causes the results to commence with the first document after + // the document associated with the cursor. + Cursor Cursor + + // Offset specifies the number of documents to skip over before returning results. + // When specified, Cursor must be nil. + Offset int + + // CountAccuracy specifies the maximum result count that can be expected to + // be accurate. If zero, the count accuracy defaults to 20. + CountAccuracy int +} + +// Cursor represents an iterator's position. +// +// The string value of a cursor is web-safe. It can be saved and restored +// for later use. +type Cursor string + +// FieldExpression defines a custom expression to evaluate for each result. +type FieldExpression struct { + // Name is the name to use for the computed field. + Name string + + // Expr is evaluated to provide a custom content snippet for each document. + // See https://cloud.google.com/appengine/docs/go/search/options for + // the supported expression syntax. + Expr string +} + +// FacetSearchOption controls what facet information is returned in search results. +type FacetSearchOption interface { + setParams(*pb.SearchParams) error +} + +// AutoFacetDiscovery returns a FacetSearchOption which enables automatic facet +// discovery for the search. Automatic facet discovery looks for the facets +// which appear the most often in the aggregate in the matched documents. +// +// The maximum number of facets returned is controlled by facetLimit, and the +// maximum number of values per facet by facetLimit. A limit of zero indicates +// a default limit should be used. +func AutoFacetDiscovery(facetLimit, valueLimit int) FacetSearchOption { + return &autoFacetOpt{facetLimit, valueLimit} +} + +type autoFacetOpt struct { + facetLimit, valueLimit int +} + +const defaultAutoFacetLimit = 10 // As per python runtime search.py. + +func (o *autoFacetOpt) setParams(params *pb.SearchParams) error { + lim := int32(o.facetLimit) + if lim == 0 { + lim = defaultAutoFacetLimit + } + params.AutoDiscoverFacetCount = &lim + if o.valueLimit > 0 { + params.FacetAutoDetectParam = &pb.FacetAutoDetectParam{ + ValueLimit: proto.Int32(int32(o.valueLimit)), + } + } + return nil +} + +// FacetDiscovery returns a FacetSearchOption which selects a facet to be +// returned with the search results. By default, the most frequently +// occurring values for that facet will be returned. However, you can also +// specify a list of particular Atoms or specific Ranges to return. +func FacetDiscovery(name string, value ...interface{}) FacetSearchOption { + return &facetOpt{name, value} +} + +type facetOpt struct { + name string + values []interface{} +} + +func (o *facetOpt) setParams(params *pb.SearchParams) error { + req := &pb.FacetRequest{Name: &o.name} + params.IncludeFacet = append(params.IncludeFacet, req) + if len(o.values) == 0 { + return nil + } + vtype := reflect.TypeOf(o.values[0]) + reqParam := &pb.FacetRequestParam{} + for _, v := range o.values { + if reflect.TypeOf(v) != vtype { + return errors.New("values must all be Atom, or must all be Range") + } + switch v := v.(type) { + case Atom: + reqParam.ValueConstraint = append(reqParam.ValueConstraint, string(v)) + case Range: + rng, err := rangeToProto(v) + if err != nil { + return fmt.Errorf("invalid range: %v", err) + } + reqParam.Range = append(reqParam.Range, rng) + default: + return fmt.Errorf("unsupported value type %T", v) + } + } + req.Params = reqParam + return nil +} + +// FacetDocumentDepth returns a FacetSearchOption which controls the number of +// documents to be evaluated with preparing facet results. +func FacetDocumentDepth(depth int) FacetSearchOption { + return facetDepthOpt(depth) +} + +type facetDepthOpt int + +func (o facetDepthOpt) setParams(params *pb.SearchParams) error { + params.FacetDepth = proto.Int32(int32(o)) + return nil +} + +// FacetResult represents the number of times a particular facet and value +// appeared in the documents matching a search request. +type FacetResult struct { + Facet + + // Count is the number of times this specific facet and value appeared in the + // matching documents. + Count int +} + +// Range represents a numeric range with inclusive start and exclusive end. +// Start may be specified as math.Inf(-1) to indicate there is no minimum +// value, and End may similarly be specified as math.Inf(1); at least one of +// Start or End must be a finite number. +type Range struct { + Start, End float64 +} + +var ( + negInf = math.Inf(-1) + posInf = math.Inf(1) +) + +// AtLeast returns a Range matching any value greater than, or equal to, min. +func AtLeast(min float64) Range { + return Range{Start: min, End: posInf} +} + +// LessThan returns a Range matching any value less than max. +func LessThan(max float64) Range { + return Range{Start: negInf, End: max} +} + +// SortOptions control the ordering and scoring of search results. +type SortOptions struct { + // Expressions is a slice of expressions representing a multi-dimensional + // sort. + Expressions []SortExpression + + // Scorer, when specified, will cause the documents to be scored according to + // search term frequency. + Scorer Scorer + + // Limit is the maximum number of objects to score and/or sort. Limit cannot + // be more than 10,000. The zero value indicates a default limit. + Limit int +} + +// SortExpression defines a single dimension for sorting a document. +type SortExpression struct { + // Expr is evaluated to provide a sorting value for each document. + // See https://cloud.google.com/appengine/docs/go/search/options for + // the supported expression syntax. + Expr string + + // Reverse causes the documents to be sorted in ascending order. + Reverse bool + + // The default value to use when no field is present or the expresion + // cannot be calculated for a document. For text sorts, Default must + // be of type string; for numeric sorts, float64. + Default interface{} +} + +// A Scorer defines how a document is scored. +type Scorer interface { + toProto(*pb.ScorerSpec) +} + +type enumScorer struct { + enum pb.ScorerSpec_Scorer +} + +func (e enumScorer) toProto(spec *pb.ScorerSpec) { + spec.Scorer = e.enum.Enum() +} + +var ( + // MatchScorer assigns a score based on term frequency in a document. + MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER} + + // RescoringMatchScorer assigns a score based on the quality of the query + // match. It is similar to a MatchScorer but uses a more complex scoring + // algorithm based on match term frequency and other factors like field type. + // Please be aware that this algorithm is continually refined and can change + // over time without notice. This means that the ordering of search results + // that use this scorer can also change without notice. + RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER} +) + +func sortToProto(sort *SortOptions, params *pb.SearchParams) error { + for _, e := range sort.Expressions { + spec := &pb.SortSpec{ + SortExpression: proto.String(e.Expr), + } + if e.Reverse { + spec.SortDescending = proto.Bool(false) + } + if e.Default != nil { + switch d := e.Default.(type) { + case float64: + spec.DefaultValueNumeric = &d + case string: + spec.DefaultValueText = &d + default: + return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr) + } + } + params.SortSpec = append(params.SortSpec, spec) + } + + spec := &pb.ScorerSpec{} + if sort.Limit > 0 { + spec.Limit = proto.Int32(int32(sort.Limit)) + params.ScorerSpec = spec + } + if sort.Scorer != nil { + sort.Scorer.toProto(spec) + params.ScorerSpec = spec + } + + return nil +} + +func refinementsToProto(refinements []Facet, params *pb.SearchParams) error { + for _, r := range refinements { + ref := &pb.FacetRefinement{ + Name: proto.String(r.Name), + } + switch v := r.Value.(type) { + case Atom: + ref.Value = proto.String(string(v)) + case Range: + rng, err := rangeToProto(v) + if err != nil { + return fmt.Errorf("search: refinement for facet %q: %v", r.Name, err) + } + // Unfortunately there are two identical messages for identify Facet ranges. + ref.Range = &pb.FacetRefinement_Range{Start: rng.Start, End: rng.End} + default: + return fmt.Errorf("search: unsupported refinement for facet %q of type %T", r.Name, v) + } + params.FacetRefinement = append(params.FacetRefinement, ref) + } + return nil +} + +func rangeToProto(r Range) (*pb.FacetRange, error) { + rng := &pb.FacetRange{} + if r.Start != negInf { + if !validFloat(r.Start) { + return nil, errors.New("invalid value for Start") + } + rng.Start = proto.String(strconv.FormatFloat(r.Start, 'e', -1, 64)) + } else if r.End == posInf { + return nil, errors.New("either Start or End must be finite") + } + if r.End != posInf { + if !validFloat(r.End) { + return nil, errors.New("invalid value for End") + } + rng.End = proto.String(strconv.FormatFloat(r.End, 'e', -1, 64)) + } + return rng, nil +} + +func protoToRange(rng *pb.FacetRefinement_Range) Range { + r := Range{Start: negInf, End: posInf} + if x, err := strconv.ParseFloat(rng.GetStart(), 64); err != nil { + r.Start = x + } + if x, err := strconv.ParseFloat(rng.GetEnd(), 64); err != nil { + r.End = x + } + return r +} + +// Iterator is the result of searching an index for a query or listing an +// index. +type Iterator struct { + c context.Context + index *Index + err error + + listRes []*pb.Document + listStartID string + listInclusive bool + + searchRes []*pb.SearchResult + facetRes []*pb.FacetResult + searchQuery string + searchCursor *string + searchOffset int + sort *SortOptions + + fields []string + exprs []FieldExpression + refinements []Facet + facetOpts []FacetSearchOption + + more func(*Iterator) error + + count int + countAccuracy int + limit int // items left to return; 0 for unlimited. + idsOnly bool +} + +// errIter returns an iterator that only returns the given error. +func errIter(err string) *Iterator { + return &Iterator{ + err: errors.New(err), + } +} + +// Done is returned when a query iteration has completed. +var Done = errors.New("search: query has no more results") + +// Count returns an approximation of the number of documents matched by the +// query. It is only valid to call for iterators returned by Search. +func (t *Iterator) Count() int { return t.count } + +// fetchMore retrieves more results, if there are no errors or pending results. +func (t *Iterator) fetchMore() { + if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil { + t.err = t.more(t) + } +} + +// Next returns the ID of the next result. When there are no more results, +// Done is returned as the error. +// +// dst must be a non-nil struct pointer, implement the FieldLoadSaver +// interface, or be a nil interface value. If a non-nil dst is provided, it +// will be filled with the indexed fields. dst is ignored if this iterator was +// created with an IDsOnly option. +func (t *Iterator) Next(dst interface{}) (string, error) { + t.fetchMore() + if t.err != nil { + return "", t.err + } + + var doc *pb.Document + var exprs []*pb.Field + switch { + case len(t.listRes) != 0: + doc = t.listRes[0] + t.listRes = t.listRes[1:] + case len(t.searchRes) != 0: + doc = t.searchRes[0].Document + exprs = t.searchRes[0].Expression + t.searchCursor = t.searchRes[0].Cursor + t.searchRes = t.searchRes[1:] + default: + return "", Done + } + if doc == nil { + return "", errors.New("search: internal error: no document returned") + } + if !t.idsOnly && dst != nil { + if err := loadDoc(dst, doc, exprs); err != nil { + return "", err + } + } + return doc.GetId(), nil +} + +// Cursor returns the cursor associated with the current document (that is, +// the document most recently returned by a call to Next). +// +// Passing this cursor in a future call to Search will cause those results +// to commence with the first document after the current document. +func (t *Iterator) Cursor() Cursor { + if t.searchCursor == nil { + return "" + } + return Cursor(*t.searchCursor) +} + +// Facets returns the facets found within the search results, if any facets +// were requested in the SearchOptions. +func (t *Iterator) Facets() ([][]FacetResult, error) { + t.fetchMore() + if t.err != nil && t.err != Done { + return nil, t.err + } + + var facets [][]FacetResult + for _, f := range t.facetRes { + fres := make([]FacetResult, 0, len(f.Value)) + for _, v := range f.Value { + ref := v.Refinement + facet := FacetResult{ + Facet: Facet{Name: ref.GetName()}, + Count: int(v.GetCount()), + } + if ref.Value != nil { + facet.Value = Atom(*ref.Value) + } else { + facet.Value = protoToRange(ref.Range) + } + fres = append(fres, facet) + } + facets = append(facets, fres) + } + return facets, nil +} + +// saveDoc converts from a struct pointer or +// FieldLoadSaver/FieldMetadataLoadSaver to the Document protobuf. +func saveDoc(src interface{}) (*pb.Document, error) { + var err error + var fields []Field + var meta *DocumentMetadata + switch x := src.(type) { + case FieldLoadSaver: + fields, meta, err = x.Save() + default: + fields, meta, err = saveStructWithMeta(src) + } + if err != nil { + return nil, err + } + + fieldsProto, err := fieldsToProto(fields) + if err != nil { + return nil, err + } + d := &pb.Document{ + Field: fieldsProto, + OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())), + } + if meta != nil { + if meta.Rank != 0 { + if !validDocRank(meta.Rank) { + return nil, fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank) + } + *d.OrderId = int32(meta.Rank) + } + if len(meta.Facets) > 0 { + facets, err := facetsToProto(meta.Facets) + if err != nil { + return nil, err + } + d.Facet = facets + } + } + return d, nil +} + +func fieldsToProto(src []Field) ([]*pb.Field, error) { + // Maps to catch duplicate time or numeric fields. + timeFields, numericFields := make(map[string]bool), make(map[string]bool) + dst := make([]*pb.Field, 0, len(src)) + for _, f := range src { + if !validFieldName(f.Name) { + return nil, fmt.Errorf("search: invalid field name %q", f.Name) + } + fieldValue := &pb.FieldValue{} + switch x := f.Value.(type) { + case string: + fieldValue.Type = pb.FieldValue_TEXT.Enum() + fieldValue.StringValue = proto.String(x) + case Atom: + fieldValue.Type = pb.FieldValue_ATOM.Enum() + fieldValue.StringValue = proto.String(string(x)) + case HTML: + fieldValue.Type = pb.FieldValue_HTML.Enum() + fieldValue.StringValue = proto.String(string(x)) + case time.Time: + if timeFields[f.Name] { + return nil, fmt.Errorf("search: duplicate time field %q", f.Name) + } + timeFields[f.Name] = true + fieldValue.Type = pb.FieldValue_DATE.Enum() + fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10)) + case float64: + if numericFields[f.Name] { + return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name) + } + if !validFloat(x) { + return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x) + } + numericFields[f.Name] = true + fieldValue.Type = pb.FieldValue_NUMBER.Enum() + fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64)) + case appengine.GeoPoint: + if !x.Valid() { + return nil, fmt.Errorf( + "search: GeoPoint field %q with invalid value %v", + f.Name, x) + } + fieldValue.Type = pb.FieldValue_GEO.Enum() + fieldValue.Geo = &pb.FieldValue_Geo{ + Lat: proto.Float64(x.Lat), + Lng: proto.Float64(x.Lng), + } + default: + return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value)) + } + if f.Language != "" { + switch f.Value.(type) { + case string, HTML: + if !validLanguage(f.Language) { + return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language) + } + fieldValue.Language = proto.String(f.Language) + default: + return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value) + } + } + if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) { + return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p) + } + dst = append(dst, &pb.Field{ + Name: proto.String(f.Name), + Value: fieldValue, + }) + } + return dst, nil +} + +func facetsToProto(src []Facet) ([]*pb.Facet, error) { + dst := make([]*pb.Facet, 0, len(src)) + for _, f := range src { + if !validFieldName(f.Name) { + return nil, fmt.Errorf("search: invalid facet name %q", f.Name) + } + facetValue := &pb.FacetValue{} + switch x := f.Value.(type) { + case Atom: + if !utf8.ValidString(string(x)) { + return nil, fmt.Errorf("search: %q facet is invalid UTF-8: %q", f.Name, x) + } + facetValue.Type = pb.FacetValue_ATOM.Enum() + facetValue.StringValue = proto.String(string(x)) + case float64: + if !validFloat(x) { + return nil, fmt.Errorf("search: numeric facet %q with invalid value %f", f.Name, x) + } + facetValue.Type = pb.FacetValue_NUMBER.Enum() + facetValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64)) + default: + return nil, fmt.Errorf("search: unsupported facet type: %v", reflect.TypeOf(f.Value)) + } + dst = append(dst, &pb.Facet{ + Name: proto.String(f.Name), + Value: facetValue, + }) + } + return dst, nil +} + +// loadDoc converts from protobufs to a struct pointer or +// FieldLoadSaver/FieldMetadataLoadSaver. The src param provides the document's +// stored fields and facets, and any document metadata. An additional slice of +// fields, exprs, may optionally be provided to contain any derived expressions +// requested by the developer. +func loadDoc(dst interface{}, src *pb.Document, exprs []*pb.Field) (err error) { + fields, err := protoToFields(src.Field) + if err != nil { + return err + } + facets, err := protoToFacets(src.Facet) + if err != nil { + return err + } + if len(exprs) > 0 { + exprFields, err := protoToFields(exprs) + if err != nil { + return err + } + // Mark each field as derived. + for i := range exprFields { + exprFields[i].Derived = true + } + fields = append(fields, exprFields...) + } + meta := &DocumentMetadata{ + Rank: int(src.GetOrderId()), + Facets: facets, + } + switch x := dst.(type) { + case FieldLoadSaver: + return x.Load(fields, meta) + default: + return loadStructWithMeta(dst, fields, meta) + } +} + +func protoToFields(fields []*pb.Field) ([]Field, error) { + dst := make([]Field, 0, len(fields)) + for _, field := range fields { + fieldValue := field.GetValue() + f := Field{ + Name: field.GetName(), + } + switch fieldValue.GetType() { + case pb.FieldValue_TEXT: + f.Value = fieldValue.GetStringValue() + f.Language = fieldValue.GetLanguage() + case pb.FieldValue_ATOM: + f.Value = Atom(fieldValue.GetStringValue()) + case pb.FieldValue_HTML: + f.Value = HTML(fieldValue.GetStringValue()) + f.Language = fieldValue.GetLanguage() + case pb.FieldValue_DATE: + sv := fieldValue.GetStringValue() + millis, err := strconv.ParseInt(sv, 10, 64) + if err != nil { + return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err) + } + f.Value = time.Unix(0, millis*1e6) + case pb.FieldValue_NUMBER: + sv := fieldValue.GetStringValue() + x, err := strconv.ParseFloat(sv, 64) + if err != nil { + return nil, err + } + f.Value = x + case pb.FieldValue_GEO: + geoValue := fieldValue.GetGeo() + geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()} + if !geoPoint.Valid() { + return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint) + } + f.Value = geoPoint + default: + return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType()) + } + dst = append(dst, f) + } + return dst, nil +} + +func protoToFacets(facets []*pb.Facet) ([]Facet, error) { + if len(facets) == 0 { + return nil, nil + } + dst := make([]Facet, 0, len(facets)) + for _, facet := range facets { + facetValue := facet.GetValue() + f := Facet{ + Name: facet.GetName(), + } + switch facetValue.GetType() { + case pb.FacetValue_ATOM: + f.Value = Atom(facetValue.GetStringValue()) + case pb.FacetValue_NUMBER: + sv := facetValue.GetStringValue() + x, err := strconv.ParseFloat(sv, 64) + if err != nil { + return nil, err + } + f.Value = x + default: + return nil, fmt.Errorf("search: internal error: unknown data type %s", facetValue.GetType()) + } + dst = append(dst, f) + } + return dst, nil +} + +func namespaceMod(m proto.Message, namespace string) { + set := func(s **string) { + if *s == nil { + *s = &namespace + } + } + switch m := m.(type) { + case *pb.IndexDocumentRequest: + set(&m.Params.IndexSpec.Namespace) + case *pb.ListDocumentsRequest: + set(&m.Params.IndexSpec.Namespace) + case *pb.DeleteDocumentRequest: + set(&m.Params.IndexSpec.Namespace) + case *pb.SearchRequest: + set(&m.Params.IndexSpec.Namespace) + } +} + +func init() { + internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name) + internal.NamespaceMods["search"] = namespaceMod +} diff --git a/vendor/google.golang.org/appengine/search/search_test.go b/vendor/google.golang.org/appengine/search/search_test.go new file mode 100644 index 0000000..f7c339b --- /dev/null +++ b/vendor/google.golang.org/appengine/search/search_test.go @@ -0,0 +1,1000 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/search" +) + +type TestDoc struct { + String string + Atom Atom + HTML HTML + Float float64 + Location appengine.GeoPoint + Time time.Time +} + +type FieldListWithMeta struct { + Fields FieldList + Meta *DocumentMetadata +} + +func (f *FieldListWithMeta) Load(fields []Field, meta *DocumentMetadata) error { + f.Meta = meta + return f.Fields.Load(fields, nil) +} + +func (f *FieldListWithMeta) Save() ([]Field, *DocumentMetadata, error) { + fields, _, err := f.Fields.Save() + return fields, f.Meta, err +} + +// Assert that FieldListWithMeta satisfies FieldLoadSaver +var _ FieldLoadSaver = &FieldListWithMeta{} + +var ( + float = 3.14159 + floatOut = "3.14159e+00" + latitude = 37.3894 + longitude = 122.0819 + testGeo = appengine.GeoPoint{latitude, longitude} + testString = "foobar" + testTime = time.Unix(1337324400, 0) + testTimeOut = "1337324400000" + searchMeta = &DocumentMetadata{ + Rank: 42, + } + searchDoc = TestDoc{ + String: testString, + Atom: Atom(testString), + HTML: HTML(testString), + Float: float, + Location: testGeo, + Time: testTime, + } + searchFields = FieldList{ + Field{Name: "String", Value: testString}, + Field{Name: "Atom", Value: Atom(testString)}, + Field{Name: "HTML", Value: HTML(testString)}, + Field{Name: "Float", Value: float}, + Field{Name: "Location", Value: testGeo}, + Field{Name: "Time", Value: testTime}, + } + // searchFieldsWithLang is a copy of the searchFields with the Language field + // set on text/HTML Fields. + searchFieldsWithLang = FieldList{} + protoFields = []*pb.Field{ + newStringValueField("String", testString, pb.FieldValue_TEXT), + newStringValueField("Atom", testString, pb.FieldValue_ATOM), + newStringValueField("HTML", testString, pb.FieldValue_HTML), + newStringValueField("Float", floatOut, pb.FieldValue_NUMBER), + { + Name: proto.String("Location"), + Value: &pb.FieldValue{ + Geo: &pb.FieldValue_Geo{ + Lat: proto.Float64(latitude), + Lng: proto.Float64(longitude), + }, + Type: pb.FieldValue_GEO.Enum(), + }, + }, + newStringValueField("Time", testTimeOut, pb.FieldValue_DATE), + } +) + +func init() { + for _, f := range searchFields { + if f.Name == "String" || f.Name == "HTML" { + f.Language = "en" + } + searchFieldsWithLang = append(searchFieldsWithLang, f) + } +} + +func newStringValueField(name, value string, valueType pb.FieldValue_ContentType) *pb.Field { + return &pb.Field{ + Name: proto.String(name), + Value: &pb.FieldValue{ + StringValue: proto.String(value), + Type: valueType.Enum(), + }, + } +} + +func newFacet(name, value string, valueType pb.FacetValue_ContentType) *pb.Facet { + return &pb.Facet{ + Name: proto.String(name), + Value: &pb.FacetValue{ + StringValue: proto.String(value), + Type: valueType.Enum(), + }, + } +} + +func TestValidIndexNameOrDocID(t *testing.T) { + testCases := []struct { + s string + want bool + }{ + {"", true}, + {"!", false}, + {"$", true}, + {"!bad", false}, + {"good!", true}, + {"alsoGood", true}, + {"has spaces", false}, + {"is_inva\xffid_UTF-8", false}, + {"is_non-ASCïI", false}, + {"underscores_are_ok", true}, + } + for _, tc := range testCases { + if got := validIndexNameOrDocID(tc.s); got != tc.want { + t.Errorf("%q: got %v, want %v", tc.s, got, tc.want) + } + } +} + +func TestLoadDoc(t *testing.T) { + got, want := TestDoc{}, searchDoc + if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if got != want { + t.Errorf("loadDoc: got %v, wanted %v", got, want) + } +} + +func TestSaveDoc(t *testing.T) { + got, err := saveDoc(&searchDoc) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + want := protoFields + if !reflect.DeepEqual(got.Field, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestLoadFieldList(t *testing.T) { + var got FieldList + want := searchFieldsWithLang + if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestLangFields(t *testing.T) { + fl := &FieldList{ + {Name: "Foo", Value: "I am English", Language: "en"}, + {Name: "Bar", Value: "ç§ã¯æ—¥æœ¬äººã ", Language: "jp"}, + } + var got FieldList + doc, err := saveDoc(fl) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + if err := loadDoc(&got, doc, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if want := fl; !reflect.DeepEqual(&got, want) { + t.Errorf("got %v\nwant %v", got, want) + } +} + +func TestSaveFieldList(t *testing.T) { + got, err := saveDoc(&searchFields) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + want := protoFields + if !reflect.DeepEqual(got.Field, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestLoadFieldAndExprList(t *testing.T) { + var got, want FieldList + for i, f := range searchFieldsWithLang { + f.Derived = (i >= 2) // First 2 elements are "fields", next are "expressions". + want = append(want, f) + } + doc, expr := &pb.Document{Field: protoFields[:2]}, protoFields[2:] + if err := loadDoc(&got, doc, expr); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v\nwant %v", got, want) + } +} + +func TestLoadMeta(t *testing.T) { + var got FieldListWithMeta + want := FieldListWithMeta{ + Meta: searchMeta, + Fields: searchFieldsWithLang, + } + doc := &pb.Document{ + Field: protoFields, + OrderId: proto.Int32(42), + } + if err := loadDoc(&got, doc, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestSaveMeta(t *testing.T) { + got, err := saveDoc(&FieldListWithMeta{ + Meta: searchMeta, + Fields: searchFields, + }) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + want := &pb.Document{ + Field: protoFields, + OrderId: proto.Int32(42), + } + if !proto.Equal(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestLoadSaveWithStruct(t *testing.T) { + type gopher struct { + Name string + Info string `search:"about"` + Legs float64 `search:",facet"` + Fuzz Atom `search:"Fur,facet"` + } + + doc := gopher{"Gopher", "Likes slide rules.", 4, Atom("furry")} + pb := &pb.Document{ + Field: []*pb.Field{ + newStringValueField("Name", "Gopher", pb.FieldValue_TEXT), + newStringValueField("about", "Likes slide rules.", pb.FieldValue_TEXT), + }, + Facet: []*pb.Facet{ + newFacet("Legs", "4e+00", pb.FacetValue_NUMBER), + newFacet("Fur", "furry", pb.FacetValue_ATOM), + }, + } + + var gotDoc gopher + if err := loadDoc(&gotDoc, pb, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if !reflect.DeepEqual(gotDoc, doc) { + t.Errorf("loading doc\ngot %v\nwant %v", gotDoc, doc) + } + + gotPB, err := saveDoc(&doc) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + gotPB.OrderId = nil // Don't test: it's time dependent. + if !proto.Equal(gotPB, pb) { + t.Errorf("saving doc\ngot %v\nwant %v", gotPB, pb) + } +} + +func TestValidFieldNames(t *testing.T) { + testCases := []struct { + name string + valid bool + }{ + {"Normal", true}, + {"Also_OK_123", true}, + {"Not so great", false}, + {"lower_case", true}, + {"Exclaim!", false}, + {"Hello세ìƒì•„ 안녕", false}, + {"", false}, + {"Hεllo", false}, + {strings.Repeat("A", 500), true}, + {strings.Repeat("A", 501), false}, + } + + for _, tc := range testCases { + _, err := saveDoc(&FieldList{ + Field{Name: tc.name, Value: "val"}, + }) + if err != nil && !strings.Contains(err.Error(), "invalid field name") { + t.Errorf("unexpected err %q for field name %q", err, tc.name) + } + if (err == nil) != tc.valid { + t.Errorf("field %q: expected valid %t, received err %v", tc.name, tc.valid, err) + } + } +} + +func TestValidLangs(t *testing.T) { + testCases := []struct { + field Field + valid bool + }{ + {Field{Name: "Foo", Value: "String", Language: ""}, true}, + {Field{Name: "Foo", Value: "String", Language: "en"}, true}, + {Field{Name: "Foo", Value: "String", Language: "aussie"}, false}, + {Field{Name: "Foo", Value: "String", Language: "12"}, false}, + {Field{Name: "Foo", Value: HTML("String"), Language: "en"}, true}, + {Field{Name: "Foo", Value: Atom("String"), Language: "en"}, false}, + {Field{Name: "Foo", Value: 42, Language: "en"}, false}, + } + + for _, tt := range testCases { + _, err := saveDoc(&FieldList{tt.field}) + if err == nil != tt.valid { + t.Errorf("Field %v, got error %v, wanted valid %t", tt.field, err, tt.valid) + } + } +} + +func TestDuplicateFields(t *testing.T) { + testCases := []struct { + desc string + fields FieldList + errMsg string // Non-empty if we expect an error + }{ + { + desc: "multi string", + fields: FieldList{{Name: "FieldA", Value: "val1"}, {Name: "FieldA", Value: "val2"}, {Name: "FieldA", Value: "val3"}}, + }, + { + desc: "multi atom", + fields: FieldList{{Name: "FieldA", Value: Atom("val1")}, {Name: "FieldA", Value: Atom("val2")}, {Name: "FieldA", Value: Atom("val3")}}, + }, + { + desc: "mixed", + fields: FieldList{{Name: "FieldA", Value: testString}, {Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: float}}, + }, + { + desc: "multi time", + fields: FieldList{{Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: testTime}}, + errMsg: `duplicate time field "FieldA"`, + }, + { + desc: "multi num", + fields: FieldList{{Name: "FieldA", Value: float}, {Name: "FieldA", Value: float}}, + errMsg: `duplicate numeric field "FieldA"`, + }, + } + for _, tc := range testCases { + _, err := saveDoc(&tc.fields) + if (err == nil) != (tc.errMsg == "") || (err != nil && !strings.Contains(err.Error(), tc.errMsg)) { + t.Errorf("%s: got err %v, wanted %q", tc.desc, err, tc.errMsg) + } + } +} + +func TestLoadErrFieldMismatch(t *testing.T) { + testCases := []struct { + desc string + dst interface{} + src []*pb.Field + err error + }{ + { + desc: "missing", + dst: &struct{ One string }{}, + src: []*pb.Field{newStringValueField("Two", "woop!", pb.FieldValue_TEXT)}, + err: &ErrFieldMismatch{ + FieldName: "Two", + Reason: "no such struct field", + }, + }, + { + desc: "wrong type", + dst: &struct{ Num float64 }{}, + src: []*pb.Field{newStringValueField("Num", "woop!", pb.FieldValue_TEXT)}, + err: &ErrFieldMismatch{ + FieldName: "Num", + Reason: "type mismatch: float64 for string data", + }, + }, + { + desc: "unsettable", + dst: &struct{ lower string }{}, + src: []*pb.Field{newStringValueField("lower", "woop!", pb.FieldValue_TEXT)}, + err: &ErrFieldMismatch{ + FieldName: "lower", + Reason: "cannot set struct field", + }, + }, + } + for _, tc := range testCases { + err := loadDoc(tc.dst, &pb.Document{Field: tc.src}, nil) + if !reflect.DeepEqual(err, tc.err) { + t.Errorf("%s, got err %v, wanted %v", tc.desc, err, tc.err) + } + } +} + +func TestLimit(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, res *pb.SearchResponse) error { + limit := 20 // Default per page. + if req.Params.Limit != nil { + limit = int(*req.Params.Limit) + } + res.Status = &pb.RequestStatus{Code: pb.SearchServiceError_OK.Enum()} + res.MatchedCount = proto.Int64(int64(limit)) + for i := 0; i < limit; i++ { + res.Result = append(res.Result, &pb.SearchResult{Document: &pb.Document{}}) + res.Cursor = proto.String("moreresults") + } + return nil + }) + + const maxDocs = 500 // Limit maximum number of docs. + testCases := []struct { + limit, want int + }{ + {limit: 0, want: maxDocs}, + {limit: 42, want: 42}, + {limit: 100, want: 100}, + {limit: 1000, want: maxDocs}, + } + + for _, tt := range testCases { + it := index.Search(c, "gopher", &SearchOptions{Limit: tt.limit, IDsOnly: true}) + count := 0 + for ; count < maxDocs; count++ { + _, err := it.Next(nil) + if err == Done { + break + } + if err != nil { + t.Fatalf("err after %d: %v", count, err) + } + } + if count != tt.want { + t.Errorf("got %d results, expected %d", count, tt.want) + } + } +} + +func TestPut(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error { + expectedIn := &pb.IndexDocumentRequest{ + Params: &pb.IndexDocumentParams{ + Document: []*pb.Document{ + {Field: protoFields, OrderId: proto.Int32(42)}, + }, + IndexSpec: &pb.IndexSpec{ + Name: proto.String("Doc"), + }, + }, + } + if !proto.Equal(in, expectedIn) { + return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn) + } + *out = pb.IndexDocumentResponse{ + Status: []*pb.RequestStatus{ + {Code: pb.SearchServiceError_OK.Enum()}, + }, + DocId: []string{ + "doc_id", + }, + } + return nil + }) + + id, err := index.Put(c, "", &FieldListWithMeta{ + Meta: searchMeta, + Fields: searchFields, + }) + if err != nil { + t.Fatal(err) + } + if want := "doc_id"; id != want { + t.Errorf("Got doc ID %q, want %q", id, want) + } +} + +func TestPutAutoOrderID(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error { + if len(in.Params.GetDocument()) < 1 { + return fmt.Errorf("expected at least one Document, got %v", in) + } + got, want := in.Params.Document[0].GetOrderId(), int32(time.Since(orderIDEpoch).Seconds()) + if d := got - want; -5 > d || d > 5 { + return fmt.Errorf("got OrderId %d, want near %d", got, want) + } + *out = pb.IndexDocumentResponse{ + Status: []*pb.RequestStatus{ + {Code: pb.SearchServiceError_OK.Enum()}, + }, + DocId: []string{ + "doc_id", + }, + } + return nil + }) + + if _, err := index.Put(c, "", &searchFields); err != nil { + t.Fatal(err) + } +} + +func TestPutBadStatus(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(_ *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error { + *out = pb.IndexDocumentResponse{ + Status: []*pb.RequestStatus{ + { + Code: pb.SearchServiceError_INVALID_REQUEST.Enum(), + ErrorDetail: proto.String("insufficient gophers"), + }, + }, + } + return nil + }) + + wantErr := "search: INVALID_REQUEST: insufficient gophers" + if _, err := index.Put(c, "", &searchFields); err == nil || err.Error() != wantErr { + t.Fatalf("Put: got %v error, want %q", err, wantErr) + } +} + +func TestSortOptions(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + noErr := errors.New("") // Sentinel err to return to prevent sending request. + + testCases := []struct { + desc string + sort *SortOptions + wantSort []*pb.SortSpec + wantScorer *pb.ScorerSpec + wantErr string + }{ + { + desc: "No SortOptions", + }, + { + desc: "Basic", + sort: &SortOptions{ + Expressions: []SortExpression{ + {Expr: "dog"}, + {Expr: "cat", Reverse: true}, + {Expr: "gopher", Default: "blue"}, + {Expr: "fish", Default: 2.0}, + }, + Limit: 42, + Scorer: MatchScorer, + }, + wantSort: []*pb.SortSpec{ + {SortExpression: proto.String("dog")}, + {SortExpression: proto.String("cat"), SortDescending: proto.Bool(false)}, + {SortExpression: proto.String("gopher"), DefaultValueText: proto.String("blue")}, + {SortExpression: proto.String("fish"), DefaultValueNumeric: proto.Float64(2)}, + }, + wantScorer: &pb.ScorerSpec{ + Limit: proto.Int32(42), + Scorer: pb.ScorerSpec_MATCH_SCORER.Enum(), + }, + }, + { + desc: "Bad expression default", + sort: &SortOptions{ + Expressions: []SortExpression{ + {Expr: "dog", Default: true}, + }, + }, + wantErr: `search: invalid Default type bool for expression "dog"`, + }, + { + desc: "RescoringMatchScorer", + sort: &SortOptions{Scorer: RescoringMatchScorer}, + wantScorer: &pb.ScorerSpec{Scorer: pb.ScorerSpec_RESCORING_MATCH_SCORER.Enum()}, + }, + } + + for _, tt := range testCases { + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error { + params := req.Params + if !reflect.DeepEqual(params.SortSpec, tt.wantSort) { + t.Errorf("%s: params.SortSpec=%v; want %v", tt.desc, params.SortSpec, tt.wantSort) + } + if !reflect.DeepEqual(params.ScorerSpec, tt.wantScorer) { + t.Errorf("%s: params.ScorerSpec=%v; want %v", tt.desc, params.ScorerSpec, tt.wantScorer) + } + return noErr // Always return some error to prevent response parsing. + }) + + it := index.Search(c, "gopher", &SearchOptions{Sort: tt.sort}) + _, err := it.Next(nil) + if err == nil { + t.Fatalf("%s: err==nil; should not happen", tt.desc) + } + if err.Error() != tt.wantErr { + t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr) + } + } +} + +func TestFieldSpec(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + errFoo := errors.New("foo") // sentinel error when there isn't one. + + testCases := []struct { + desc string + opts *SearchOptions + want *pb.FieldSpec + }{ + { + desc: "No options", + want: &pb.FieldSpec{}, + }, + { + desc: "Fields", + opts: &SearchOptions{ + Fields: []string{"one", "two"}, + }, + want: &pb.FieldSpec{ + Name: []string{"one", "two"}, + }, + }, + { + desc: "Expressions", + opts: &SearchOptions{ + Expressions: []FieldExpression{ + {Name: "one", Expr: "price * quantity"}, + {Name: "two", Expr: "min(daily_use, 10) * rate"}, + }, + }, + want: &pb.FieldSpec{ + Expression: []*pb.FieldSpec_Expression{ + {Name: proto.String("one"), Expression: proto.String("price * quantity")}, + {Name: proto.String("two"), Expression: proto.String("min(daily_use, 10) * rate")}, + }, + }, + }, + } + + for _, tt := range testCases { + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error { + params := req.Params + if !reflect.DeepEqual(params.FieldSpec, tt.want) { + t.Errorf("%s: params.FieldSpec=%v; want %v", tt.desc, params.FieldSpec, tt.want) + } + return errFoo // Always return some error to prevent response parsing. + }) + + it := index.Search(c, "gopher", tt.opts) + if _, err := it.Next(nil); err != errFoo { + t.Fatalf("%s: got error %v; want %v", tt.desc, err, errFoo) + } + } +} + +func TestBasicSearchOpts(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + noErr := errors.New("") // Sentinel err to return to prevent sending request. + + testCases := []struct { + desc string + facetOpts []FacetSearchOption + cursor Cursor + offset int + countAccuracy int + want *pb.SearchParams + wantErr string + }{ + { + desc: "No options", + want: &pb.SearchParams{}, + }, + { + desc: "Default auto discovery", + facetOpts: []FacetSearchOption{ + AutoFacetDiscovery(0, 0), + }, + want: &pb.SearchParams{ + AutoDiscoverFacetCount: proto.Int32(10), + }, + }, + { + desc: "Auto discovery", + facetOpts: []FacetSearchOption{ + AutoFacetDiscovery(7, 12), + }, + want: &pb.SearchParams{ + AutoDiscoverFacetCount: proto.Int32(7), + FacetAutoDetectParam: &pb.FacetAutoDetectParam{ + ValueLimit: proto.Int32(12), + }, + }, + }, + { + desc: "Param Depth", + facetOpts: []FacetSearchOption{ + AutoFacetDiscovery(7, 12), + }, + want: &pb.SearchParams{ + AutoDiscoverFacetCount: proto.Int32(7), + FacetAutoDetectParam: &pb.FacetAutoDetectParam{ + ValueLimit: proto.Int32(12), + }, + }, + }, + { + desc: "Doc depth", + facetOpts: []FacetSearchOption{ + FacetDocumentDepth(123), + }, + want: &pb.SearchParams{ + FacetDepth: proto.Int32(123), + }, + }, + { + desc: "Facet discovery", + facetOpts: []FacetSearchOption{ + FacetDiscovery("colour"), + FacetDiscovery("size", Atom("M"), Atom("L")), + FacetDiscovery("price", LessThan(7), Range{7, 14}, AtLeast(14)), + }, + want: &pb.SearchParams{ + IncludeFacet: []*pb.FacetRequest{ + {Name: proto.String("colour")}, + {Name: proto.String("size"), Params: &pb.FacetRequestParam{ + ValueConstraint: []string{"M", "L"}, + }}, + {Name: proto.String("price"), Params: &pb.FacetRequestParam{ + Range: []*pb.FacetRange{ + {End: proto.String("7e+00")}, + {Start: proto.String("7e+00"), End: proto.String("1.4e+01")}, + {Start: proto.String("1.4e+01")}, + }, + }}, + }, + }, + }, + { + desc: "Facet discovery - bad value", + facetOpts: []FacetSearchOption{ + FacetDiscovery("colour", true), + }, + wantErr: "bad FacetSearchOption: unsupported value type bool", + }, + { + desc: "Facet discovery - mix value types", + facetOpts: []FacetSearchOption{ + FacetDiscovery("colour", Atom("blue"), AtLeast(7)), + }, + wantErr: "bad FacetSearchOption: values must all be Atom, or must all be Range", + }, + { + desc: "Facet discovery - invalid range", + facetOpts: []FacetSearchOption{ + FacetDiscovery("colour", Range{negInf, posInf}), + }, + wantErr: "bad FacetSearchOption: invalid range: either Start or End must be finite", + }, + { + desc: "Cursor", + cursor: Cursor("mycursor"), + want: &pb.SearchParams{ + Cursor: proto.String("mycursor"), + }, + }, + { + desc: "Offset", + offset: 121, + want: &pb.SearchParams{ + Offset: proto.Int32(121), + }, + }, + { + desc: "Cursor and Offset set", + cursor: Cursor("mycursor"), + offset: 121, + wantErr: "at most one of Cursor and Offset may be specified", + }, + { + desc: "Count accuracy", + countAccuracy: 100, + want: &pb.SearchParams{ + MatchedCountAccuracy: proto.Int32(100), + }, + }, + } + + for _, tt := range testCases { + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error { + if tt.want == nil { + t.Errorf("%s: expected call to fail", tt.desc) + return nil + } + // Set default fields. + tt.want.Query = proto.String("gopher") + tt.want.IndexSpec = &pb.IndexSpec{Name: proto.String("Doc")} + tt.want.CursorType = pb.SearchParams_PER_RESULT.Enum() + tt.want.FieldSpec = &pb.FieldSpec{} + if got := req.Params; !reflect.DeepEqual(got, tt.want) { + t.Errorf("%s: params=%v; want %v", tt.desc, got, tt.want) + } + return noErr // Always return some error to prevent response parsing. + }) + + it := index.Search(c, "gopher", &SearchOptions{ + Facets: tt.facetOpts, + Cursor: tt.cursor, + Offset: tt.offset, + CountAccuracy: tt.countAccuracy, + }) + _, err := it.Next(nil) + if err == nil { + t.Fatalf("%s: err==nil; should not happen", tt.desc) + } + if err.Error() != tt.wantErr { + t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr) + } + } +} + +func TestFacetRefinements(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + noErr := errors.New("") // Sentinel err to return to prevent sending request. + + testCases := []struct { + desc string + refine []Facet + want []*pb.FacetRefinement + wantErr string + }{ + { + desc: "No refinements", + }, + { + desc: "Basic", + refine: []Facet{ + {Name: "fur", Value: Atom("fluffy")}, + {Name: "age", Value: LessThan(123)}, + {Name: "age", Value: AtLeast(0)}, + {Name: "legs", Value: Range{Start: 3, End: 5}}, + }, + want: []*pb.FacetRefinement{ + {Name: proto.String("fur"), Value: proto.String("fluffy")}, + {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{End: proto.String("1.23e+02")}}, + {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{Start: proto.String("0e+00")}}, + {Name: proto.String("legs"), Range: &pb.FacetRefinement_Range{Start: proto.String("3e+00"), End: proto.String("5e+00")}}, + }, + }, + { + desc: "Infinite range", + refine: []Facet{ + {Name: "age", Value: Range{Start: negInf, End: posInf}}, + }, + wantErr: `search: refinement for facet "age": either Start or End must be finite`, + }, + { + desc: "Bad End value in range", + refine: []Facet{ + {Name: "age", Value: LessThan(2147483648)}, + }, + wantErr: `search: refinement for facet "age": invalid value for End`, + }, + { + desc: "Bad Start value in range", + refine: []Facet{ + {Name: "age", Value: AtLeast(-2147483649)}, + }, + wantErr: `search: refinement for facet "age": invalid value for Start`, + }, + { + desc: "Unknown value type", + refine: []Facet{ + {Name: "age", Value: "you can't use strings!"}, + }, + wantErr: `search: unsupported refinement for facet "age" of type string`, + }, + } + + for _, tt := range testCases { + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error { + if got := req.Params.FacetRefinement; !reflect.DeepEqual(got, tt.want) { + t.Errorf("%s: params.FacetRefinement=%v; want %v", tt.desc, got, tt.want) + } + return noErr // Always return some error to prevent response parsing. + }) + + it := index.Search(c, "gopher", &SearchOptions{Refinements: tt.refine}) + _, err := it.Next(nil) + if err == nil { + t.Fatalf("%s: err==nil; should not happen", tt.desc) + } + if err.Error() != tt.wantErr { + t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr) + } + } +} + +func TestNamespaceResetting(t *testing.T) { + namec := make(chan *string, 1) + c0 := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(req *pb.IndexDocumentRequest, res *pb.IndexDocumentResponse) error { + namec <- req.Params.IndexSpec.Namespace + return fmt.Errorf("RPC error") + }) + + // Check that wrapping c0 in a namespace twice works correctly. + c1, err := appengine.Namespace(c0, "A") + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + c2, err := appengine.Namespace(c1, "") // should act as the original context + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + + i := (&Index{}) + + i.Put(c0, "something", &searchDoc) + if ns := <-namec; ns != nil { + t.Errorf(`Put with c0: ns = %q, want nil`, *ns) + } + + i.Put(c1, "something", &searchDoc) + if ns := <-namec; ns == nil { + t.Error(`Put with c1: ns = nil, want "A"`) + } else if *ns != "A" { + t.Errorf(`Put with c1: ns = %q, want "A"`, *ns) + } + + i.Put(c2, "something", &searchDoc) + if ns := <-namec; ns != nil { + t.Errorf(`Put with c2: ns = %q, want nil`, *ns) + } +} diff --git a/vendor/google.golang.org/appengine/search/struct.go b/vendor/google.golang.org/appengine/search/struct.go new file mode 100644 index 0000000..e73d2f2 --- /dev/null +++ b/vendor/google.golang.org/appengine/search/struct.go @@ -0,0 +1,251 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// than the one it was stored from, or when a field is missing or unexported in +// the destination struct. +type ErrFieldMismatch struct { + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason) +} + +// ErrFacetMismatch is returned when a facet is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. StructType is the type of the struct +// pointed to by the destination argument passed to Iterator.Next. +type ErrFacetMismatch struct { + StructType reflect.Type + FacetName string + Reason string +} + +func (e *ErrFacetMismatch) Error() string { + return fmt.Sprintf("search: cannot load facet %q into a %q: %s", e.FacetName, e.StructType, e.Reason) +} + +// structCodec defines how to convert a given struct to/from a search document. +type structCodec struct { + // byIndex returns the struct tag for the i'th struct field. + byIndex []structTag + + // fieldByName returns the index of the struct field for the given field name. + fieldByName map[string]int + + // facetByName returns the index of the struct field for the given facet name, + facetByName map[string]int +} + +// structTag holds a structured version of each struct field's parsed tag. +type structTag struct { + name string + facet bool + ignore bool +} + +var ( + codecsMu sync.RWMutex + codecs = map[reflect.Type]*structCodec{} +) + +func loadCodec(t reflect.Type) (*structCodec, error) { + codecsMu.RLock() + codec, ok := codecs[t] + codecsMu.RUnlock() + if ok { + return codec, nil + } + + codecsMu.Lock() + defer codecsMu.Unlock() + if codec, ok := codecs[t]; ok { + return codec, nil + } + + codec = &structCodec{ + fieldByName: make(map[string]int), + facetByName: make(map[string]int), + } + + for i, I := 0, t.NumField(); i < I; i++ { + f := t.Field(i) + name, opts := f.Tag.Get("search"), "" + if i := strings.Index(name, ","); i != -1 { + name, opts = name[:i], name[i+1:] + } + ignore := false + if name == "-" { + ignore = true + } else if name == "" { + name = f.Name + } else if !validFieldName(name) { + return nil, fmt.Errorf("search: struct tag has invalid field name: %q", name) + } + facet := opts == "facet" + codec.byIndex = append(codec.byIndex, structTag{name: name, facet: facet, ignore: ignore}) + if facet { + codec.facetByName[name] = i + } else { + codec.fieldByName[name] = i + } + } + + codecs[t] = codec + return codec, nil +} + +// structFLS adapts a struct to be a FieldLoadSaver. +type structFLS struct { + v reflect.Value + codec *structCodec +} + +func (s structFLS) Load(fields []Field, meta *DocumentMetadata) error { + var err error + for _, field := range fields { + i, ok := s.codec.fieldByName[field.Name] + if !ok { + // Note the error, but keep going. + err = &ErrFieldMismatch{ + FieldName: field.Name, + Reason: "no such struct field", + } + continue + + } + f := s.v.Field(i) + if !f.CanSet() { + // Note the error, but keep going. + err = &ErrFieldMismatch{ + FieldName: field.Name, + Reason: "cannot set struct field", + } + continue + } + v := reflect.ValueOf(field.Value) + if ft, vt := f.Type(), v.Type(); ft != vt { + err = &ErrFieldMismatch{ + FieldName: field.Name, + Reason: fmt.Sprintf("type mismatch: %v for %v data", ft, vt), + } + continue + } + f.Set(v) + } + if meta == nil { + return err + } + for _, facet := range meta.Facets { + i, ok := s.codec.facetByName[facet.Name] + if !ok { + // Note the error, but keep going. + if err == nil { + err = &ErrFacetMismatch{ + StructType: s.v.Type(), + FacetName: facet.Name, + Reason: "no matching field found", + } + } + continue + } + f := s.v.Field(i) + if !f.CanSet() { + // Note the error, but keep going. + if err == nil { + err = &ErrFacetMismatch{ + StructType: s.v.Type(), + FacetName: facet.Name, + Reason: "unable to set unexported field of struct", + } + } + continue + } + v := reflect.ValueOf(facet.Value) + if ft, vt := f.Type(), v.Type(); ft != vt { + if err == nil { + err = &ErrFacetMismatch{ + StructType: s.v.Type(), + FacetName: facet.Name, + Reason: fmt.Sprintf("type mismatch: %v for %d data", ft, vt), + } + continue + } + } + f.Set(v) + } + return err +} + +func (s structFLS) Save() ([]Field, *DocumentMetadata, error) { + fields := make([]Field, 0, len(s.codec.fieldByName)) + var facets []Facet + for i, tag := range s.codec.byIndex { + if tag.ignore { + continue + } + f := s.v.Field(i) + if !f.CanSet() { + continue + } + if tag.facet { + facets = append(facets, Facet{Name: tag.name, Value: f.Interface()}) + } else { + fields = append(fields, Field{Name: tag.name, Value: f.Interface()}) + } + } + return fields, &DocumentMetadata{Facets: facets}, nil +} + +// newStructFLS returns a FieldLoadSaver for the struct pointer p. +func newStructFLS(p interface{}) (FieldLoadSaver, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidDocumentType + } + codec, err := loadCodec(v.Elem().Type()) + if err != nil { + return nil, err + } + return structFLS{v.Elem(), codec}, nil +} + +func loadStructWithMeta(dst interface{}, f []Field, meta *DocumentMetadata) error { + x, err := newStructFLS(dst) + if err != nil { + return err + } + return x.Load(f, meta) +} + +func saveStructWithMeta(src interface{}) ([]Field, *DocumentMetadata, error) { + x, err := newStructFLS(src) + if err != nil { + return nil, nil, err + } + return x.Save() +} + +// LoadStruct loads the fields from f to dst. dst must be a struct pointer. +func LoadStruct(dst interface{}, f []Field) error { + return loadStructWithMeta(dst, f, nil) +} + +// SaveStruct returns the fields from src as a slice of Field. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Field, error) { + f, _, err := saveStructWithMeta(src) + return f, err +} diff --git a/vendor/google.golang.org/appengine/search/struct_test.go b/vendor/google.golang.org/appengine/search/struct_test.go new file mode 100644 index 0000000..4e5b5d1 --- /dev/null +++ b/vendor/google.golang.org/appengine/search/struct_test.go @@ -0,0 +1,213 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search + +import ( + "reflect" + "testing" +) + +func TestLoadingStruct(t *testing.T) { + testCases := []struct { + desc string + fields []Field + meta *DocumentMetadata + want interface{} + wantErr bool + }{ + { + desc: "Basic struct", + fields: []Field{ + {Name: "Name", Value: "Gopher"}, + {Name: "Legs", Value: float64(4)}, + }, + want: &struct { + Name string + Legs float64 + }{"Gopher", 4}, + }, + { + desc: "Struct with tags", + fields: []Field{ + {Name: "Name", Value: "Gopher"}, + {Name: "about", Value: "Likes slide rules."}, + }, + meta: &DocumentMetadata{Facets: []Facet{ + {Name: "Legs", Value: float64(4)}, + {Name: "Fur", Value: Atom("furry")}, + }}, + want: &struct { + Name string + Info string `search:"about"` + Legs float64 `search:",facet"` + Fuzz Atom `search:"Fur,facet"` + }{"Gopher", "Likes slide rules.", 4, Atom("furry")}, + }, + { + desc: "Bad field from tag", + want: &struct { + AlphaBeta string `search:"αβ"` + }{}, + wantErr: true, + }, + { + desc: "Ignore missing field", + fields: []Field{ + {Name: "Meaning", Value: float64(42)}, + }, + want: &struct{}{}, + wantErr: true, + }, + { + desc: "Ignore unsettable field", + fields: []Field{ + {Name: "meaning", Value: float64(42)}, + }, + want: &struct{ meaning float64 }{}, // field not populated. + wantErr: true, + }, + { + desc: "Error on missing facet", + meta: &DocumentMetadata{Facets: []Facet{ + {Name: "Set", Value: Atom("yes")}, + {Name: "Missing", Value: Atom("no")}, + }}, + want: &struct { + Set Atom `search:",facet"` + }{Atom("yes")}, + wantErr: true, + }, + { + desc: "Error on unsettable facet", + meta: &DocumentMetadata{Facets: []Facet{ + {Name: "Set", Value: Atom("yes")}, + {Name: "unset", Value: Atom("no")}, + }}, + want: &struct { + Set Atom `search:",facet"` + }{Atom("yes")}, + wantErr: true, + }, + { + desc: "Error setting ignored field", + fields: []Field{ + {Name: "Set", Value: "yes"}, + {Name: "Ignored", Value: "no"}, + }, + want: &struct { + Set string + Ignored string `search:"-"` + }{Set: "yes"}, + wantErr: true, + }, + { + desc: "Error setting ignored facet", + meta: &DocumentMetadata{Facets: []Facet{ + {Name: "Set", Value: Atom("yes")}, + {Name: "Ignored", Value: Atom("no")}, + }}, + want: &struct { + Set Atom `search:",facet"` + Ignored Atom `search:"-,facet"` + }{Set: Atom("yes")}, + wantErr: true, + }, + } + + for _, tt := range testCases { + // Make a pointer to an empty version of what want points to. + dst := reflect.New(reflect.TypeOf(tt.want).Elem()).Interface() + err := loadStructWithMeta(dst, tt.fields, tt.meta) + if err != nil != tt.wantErr { + t.Errorf("%s: got err %v; want err %t", tt.desc, err, tt.wantErr) + continue + } + if !reflect.DeepEqual(dst, tt.want) { + t.Errorf("%s: doesn't match\ngot: %v\nwant: %v", tt.desc, dst, tt.want) + } + } +} + +func TestSavingStruct(t *testing.T) { + testCases := []struct { + desc string + doc interface{} + wantFields []Field + wantFacets []Facet + }{ + { + desc: "Basic struct", + doc: &struct { + Name string + Legs float64 + }{"Gopher", 4}, + wantFields: []Field{ + {Name: "Name", Value: "Gopher"}, + {Name: "Legs", Value: float64(4)}, + }, + }, + { + desc: "Struct with tags", + doc: &struct { + Name string + Info string `search:"about"` + Legs float64 `search:",facet"` + Fuzz Atom `search:"Fur,facet"` + }{"Gopher", "Likes slide rules.", 4, Atom("furry")}, + wantFields: []Field{ + {Name: "Name", Value: "Gopher"}, + {Name: "about", Value: "Likes slide rules."}, + }, + wantFacets: []Facet{ + {Name: "Legs", Value: float64(4)}, + {Name: "Fur", Value: Atom("furry")}, + }, + }, + { + desc: "Ignore unexported struct fields", + doc: &struct { + Name string + info string + Legs float64 `search:",facet"` + fuzz Atom `search:",facet"` + }{"Gopher", "Likes slide rules.", 4, Atom("furry")}, + wantFields: []Field{ + {Name: "Name", Value: "Gopher"}, + }, + wantFacets: []Facet{ + {Name: "Legs", Value: float64(4)}, + }, + }, + { + desc: "Ignore fields marked -", + doc: &struct { + Name string + Info string `search:"-"` + Legs float64 `search:",facet"` + Fuzz Atom `search:"-,facet"` + }{"Gopher", "Likes slide rules.", 4, Atom("furry")}, + wantFields: []Field{ + {Name: "Name", Value: "Gopher"}, + }, + wantFacets: []Facet{ + {Name: "Legs", Value: float64(4)}, + }, + }, + } + + for _, tt := range testCases { + fields, meta, err := saveStructWithMeta(tt.doc) + if err != nil { + t.Errorf("%s: got err %v; want nil", tt.desc, err) + continue + } + if !reflect.DeepEqual(fields, tt.wantFields) { + t.Errorf("%s: fields don't match\ngot: %v\nwant: %v", tt.desc, fields, tt.wantFields) + } + if facets := meta.Facets; !reflect.DeepEqual(facets, tt.wantFacets) { + t.Errorf("%s: facets don't match\ngot: %v\nwant: %v", tt.desc, facets, tt.wantFacets) + } + } +} diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go new file mode 100644 index 0000000..3de46df --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/doc.go @@ -0,0 +1,10 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package socket provides outbound network sockets. +// +// This package is only required in the classic App Engine environment. +// Applications running only in App Engine "flexible environment" should +// use the standard library's net package. +package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go new file mode 100644 index 0000000..0ad50e2 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_classic.go @@ -0,0 +1,290 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package socket + +import ( + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/appengine/internal" + + pb "google.golang.org/appengine/internal/socket" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + return DialTimeout(ctx, protocol, addr, 0) +} + +var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ + pb.CreateSocketRequest_IPv4, + pb.CreateSocketRequest_IPv6, +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. + if timeout > 0 { + var cancel context.CancelFunc + dialCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) + } + + var prot pb.CreateSocketRequest_SocketProtocol + switch protocol { + case "tcp": + prot = pb.CreateSocketRequest_TCP + case "udp": + prot = pb.CreateSocketRequest_UDP + default: + return nil, fmt.Errorf("socket: unknown protocol %q", protocol) + } + + packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + if len(packedAddrs) == 0 { + return nil, fmt.Errorf("no addresses for %q", host) + } + + packedAddr := packedAddrs[0] // use first address + fam := pb.CreateSocketRequest_IPv4 + if len(packedAddr) == net.IPv6len { + fam = pb.CreateSocketRequest_IPv6 + } + + req := &pb.CreateSocketRequest{ + Family: fam.Enum(), + Protocol: prot.Enum(), + RemoteIp: &pb.AddressPort{ + Port: proto.Int32(int32(port)), + PackedAddress: packedAddr, + }, + } + if resolved { + req.RemoteIp.HostnameHint = &host + } + res := &pb.CreateSocketReply{} + if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { + return nil, err + } + + return &Conn{ + ctx: ctx, + desc: res.GetSocketDescriptor(), + prot: prot, + local: res.ProxyExternalIp, + remote: req.RemoteIp, + }, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + packedAddrs, _, err := resolve(ctx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + addrs = make([]net.IP, len(packedAddrs)) + for i, pa := range packedAddrs { + addrs[i] = net.IP(pa) + } + return addrs, nil +} + +func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { + // Check if it's an IP address. + if ip := net.ParseIP(host); ip != nil { + if ip := ip.To4(); ip != nil { + return [][]byte{ip}, false, nil + } + return [][]byte{ip}, false, nil + } + + req := &pb.ResolveRequest{ + Name: &host, + AddressFamilies: fams, + } + res := &pb.ResolveReply{} + if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { + // XXX: need to map to pb.ResolveReply_ErrorCode? + return nil, false, err + } + return res.PackedAddress, true, nil +} + +// withDeadline is like context.WithDeadline, except it ignores the zero deadline. +func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if deadline.IsZero() { + return parent, func() {} + } + return context.WithDeadline(parent, deadline) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + ctx context.Context + desc string + offset int64 + + prot pb.CreateSocketRequest_SocketProtocol + local, remote *pb.AddressPort + + readDeadline, writeDeadline time.Time // optional +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + cn.ctx = ctx +} + +func (cn *Conn) Read(b []byte) (n int, err error) { + const maxRead = 1 << 20 + if len(b) > maxRead { + b = b[:maxRead] + } + + req := &pb.ReceiveRequest{ + SocketDescriptor: &cn.desc, + DataSize: proto.Int32(int32(len(b))), + } + res := &pb.ReceiveReply{} + if !cn.readDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) + defer cancel() + if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { + return 0, err + } + if len(res.Data) == 0 { + return 0, io.EOF + } + if len(res.Data) > len(b) { + return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) + } + return copy(b, res.Data), nil +} + +func (cn *Conn) Write(b []byte) (n int, err error) { + const lim = 1 << 20 // max per chunk + + for n < len(b) { + chunk := b[n:] + if len(chunk) > lim { + chunk = chunk[:lim] + } + + req := &pb.SendRequest{ + SocketDescriptor: &cn.desc, + Data: chunk, + StreamOffset: &cn.offset, + } + res := &pb.SendReply{} + if !cn.writeDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) + defer cancel() + if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { + // assume zero bytes were sent in this RPC + break + } + n += int(res.GetDataSent()) + cn.offset += int64(res.GetDataSent()) + } + + return +} + +func (cn *Conn) Close() error { + req := &pb.CloseRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.CloseReply{} + if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { + return err + } + cn.desc = "CLOSED" + return nil +} + +func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { + if ap == nil { + return nil + } + switch prot { + case pb.CreateSocketRequest_TCP: + return &net.TCPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + case pb.CreateSocketRequest_UDP: + return &net.UDPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + } + panic("unknown protocol " + prot.String()) +} + +func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } +func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } + +func (cn *Conn) SetDeadline(t time.Time) error { + cn.readDeadline = t + cn.writeDeadline = t + return nil +} + +func (cn *Conn) SetReadDeadline(t time.Time) error { + cn.readDeadline = t + return nil +} + +func (cn *Conn) SetWriteDeadline(t time.Time) error { + cn.writeDeadline = t + return nil +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + req := &pb.GetSocketNameRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.GetSocketNameReply{} + return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go new file mode 100644 index 0000000..c804169 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package socket + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + conn, err := net.Dial(protocol, addr) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + conn, err := net.DialTimeout(protocol, addr, timeout) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + return net.LookupIP(host) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + net.Conn +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + // This function is not required in App Engine "flexible environment". +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + // This function is not required in App Engine "flexible environment". + return nil +} diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go new file mode 100644 index 0000000..9b62fac --- /dev/null +++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go @@ -0,0 +1,496 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package taskqueue provides a client for App Engine's taskqueue service. +Using this service, applications may perform work outside a user's request. + +A Task may be constructed manually; alternatively, since the most common +taskqueue operation is to add a single POST task, NewPOSTTask makes it easy. + + t := taskqueue.NewPOSTTask("/worker", url.Values{ + "key": {key}, + }) + taskqueue.Add(c, t, "") // add t to the default queue +*/ +package taskqueue // import "google.golang.org/appengine/taskqueue" + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + dspb "google.golang.org/appengine/internal/datastore" + pb "google.golang.org/appengine/internal/taskqueue" +) + +var ( + // ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name. + ErrTaskAlreadyAdded = errors.New("taskqueue: task has already been added") +) + +// RetryOptions let you control whether to retry a task and the backoff intervals between tries. +type RetryOptions struct { + // Number of tries/leases after which the task fails permanently and is deleted. + // If AgeLimit is also set, both limits must be exceeded for the task to fail permanently. + RetryLimit int32 + + // Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks). + // If RetryLimit is also set, both limits must be exceeded for the task to fail permanently. + AgeLimit time.Duration + + // Minimum time between successive tries (only for push tasks). + MinBackoff time.Duration + + // Maximum time between successive tries (only for push tasks). + MaxBackoff time.Duration + + // Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks). + MaxDoublings int32 + + // If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value. + // Otherwise a zero MaxDoublings is ignored and the default is used. + ApplyZeroMaxDoublings bool +} + +// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters. +func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters { + params := &pb.TaskQueueRetryParameters{} + if opt.RetryLimit > 0 { + params.RetryLimit = proto.Int32(opt.RetryLimit) + } + if opt.AgeLimit > 0 { + params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds())) + } + if opt.MinBackoff > 0 { + params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds()) + } + if opt.MaxBackoff > 0 { + params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds()) + } + if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) { + params.MaxDoublings = proto.Int32(opt.MaxDoublings) + } + return params +} + +// A Task represents a task to be executed. +type Task struct { + // Path is the worker URL for the task. + // If unset, it will default to /_ah/queue/. + Path string + + // Payload is the data for the task. + // This will be delivered as the HTTP request body. + // It is only used when Method is POST, PUT or PULL. + // url.Values' Encode method may be used to generate this for POST requests. + Payload []byte + + // Additional HTTP headers to pass at the task's execution time. + // To schedule the task to be run with an alternate app version + // or backend, set the "Host" header. + Header http.Header + + // Method is the HTTP method for the task ("GET", "POST", etc.), + // or "PULL" if this is task is destined for a pull-based queue. + // If empty, this defaults to "POST". + Method string + + // A name for the task. + // If empty, a name will be chosen. + Name string + + // Delay specifies the duration the task queue service must wait + // before executing the task. + // Either Delay or ETA may be set, but not both. + Delay time.Duration + + // ETA specifies the earliest time a task may be executed (push queues) + // or leased (pull queues). + // Either Delay or ETA may be set, but not both. + ETA time.Time + + // The number of times the task has been dispatched or leased. + RetryCount int32 + + // Tag for the task. Only used when Method is PULL. + Tag string + + // Retry options for this task. May be nil. + RetryOptions *RetryOptions +} + +func (t *Task) method() string { + if t.Method == "" { + return "POST" + } + return t.Method +} + +// NewPOSTTask creates a Task that will POST to a path with the given form data. +func NewPOSTTask(path string, params url.Values) *Task { + h := make(http.Header) + h.Set("Content-Type", "application/x-www-form-urlencoded") + return &Task{ + Path: path, + Payload: []byte(params.Encode()), + Header: h, + Method: "POST", + } +} + +var ( + currentNamespace = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + defaultNamespace = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace") +) + +func getDefaultNamespace(ctx context.Context) string { + return internal.IncomingHeaders(ctx).Get(defaultNamespace) +} + +func newAddReq(c context.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) { + if queueName == "" { + queueName = "default" + } + path := task.Path + if path == "" { + path = "/_ah/queue/" + queueName + } + eta := task.ETA + if eta.IsZero() { + eta = time.Now().Add(task.Delay) + } else if task.Delay != 0 { + panic("taskqueue: both Delay and ETA are set") + } + req := &pb.TaskQueueAddRequest{ + QueueName: []byte(queueName), + TaskName: []byte(task.Name), + EtaUsec: proto.Int64(eta.UnixNano() / 1e3), + } + method := task.method() + if method == "PULL" { + // Pull-based task + req.Body = task.Payload + req.Mode = pb.TaskQueueMode_PULL.Enum() + if task.Tag != "" { + req.Tag = []byte(task.Tag) + } + } else { + // HTTP-based task + if v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok { + req.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum() + } else { + return nil, fmt.Errorf("taskqueue: bad method %q", method) + } + req.Url = []byte(path) + for k, vs := range task.Header { + for _, v := range vs { + req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ + Key: []byte(k), + Value: []byte(v), + }) + } + } + if method == "POST" || method == "PUT" { + req.Body = task.Payload + } + + // Namespace headers. + if _, ok := task.Header[currentNamespace]; !ok { + // Fetch the current namespace of this request. + ns := internal.NamespaceFromContext(c) + req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ + Key: []byte(currentNamespace), + Value: []byte(ns), + }) + } + if _, ok := task.Header[defaultNamespace]; !ok { + // Fetch the X-AppEngine-Default-Namespace header of this request. + if ns := getDefaultNamespace(c); ns != "" { + req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ + Key: []byte(defaultNamespace), + Value: []byte(ns), + }) + } + } + } + + if task.RetryOptions != nil { + req.RetryParameters = task.RetryOptions.toRetryParameters() + } + + return req, nil +} + +var alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{ + pb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true, + pb.TaskQueueServiceError_TOMBSTONED_TASK: true, +} + +// Add adds the task to a named queue. +// An empty queue name means that the default queue will be used. +// Add returns an equivalent Task with defaults filled in, including setting +// the task's Name field to the chosen name if the original was empty. +func Add(c context.Context, task *Task, queueName string) (*Task, error) { + req, err := newAddReq(c, task, queueName) + if err != nil { + return nil, err + } + res := &pb.TaskQueueAddResponse{} + if err := internal.Call(c, "taskqueue", "Add", req, res); err != nil { + apiErr, ok := err.(*internal.APIError) + if ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] { + return nil, ErrTaskAlreadyAdded + } + return nil, err + } + resultTask := *task + resultTask.Method = task.method() + if task.Name == "" { + resultTask.Name = string(res.ChosenTaskName) + } + return &resultTask, nil +} + +// AddMulti adds multiple tasks to a named queue. +// An empty queue name means that the default queue will be used. +// AddMulti returns a slice of equivalent tasks with defaults filled in, including setting +// each task's Name field to the chosen name if the original was empty. +// If a given task is badly formed or could not be added, an appengine.MultiError is returned. +func AddMulti(c context.Context, tasks []*Task, queueName string) ([]*Task, error) { + req := &pb.TaskQueueBulkAddRequest{ + AddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)), + } + me, any := make(appengine.MultiError, len(tasks)), false + for i, t := range tasks { + req.AddRequest[i], me[i] = newAddReq(c, t, queueName) + any = any || me[i] != nil + } + if any { + return nil, me + } + res := &pb.TaskQueueBulkAddResponse{} + if err := internal.Call(c, "taskqueue", "BulkAdd", req, res); err != nil { + return nil, err + } + if len(res.Taskresult) != len(tasks) { + return nil, errors.New("taskqueue: server error") + } + tasksOut := make([]*Task, len(tasks)) + for i, tr := range res.Taskresult { + tasksOut[i] = new(Task) + *tasksOut[i] = *tasks[i] + tasksOut[i].Method = tasksOut[i].method() + if tasksOut[i].Name == "" { + tasksOut[i].Name = string(tr.ChosenTaskName) + } + if *tr.Result != pb.TaskQueueServiceError_OK { + if alreadyAddedErrors[*tr.Result] { + me[i] = ErrTaskAlreadyAdded + } else { + me[i] = &internal.APIError{ + Service: "taskqueue", + Code: int32(*tr.Result), + } + } + any = true + } + } + if any { + return tasksOut, me + } + return tasksOut, nil +} + +// Delete deletes a task from a named queue. +func Delete(c context.Context, task *Task, queueName string) error { + err := DeleteMulti(c, []*Task{task}, queueName) + if me, ok := err.(appengine.MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti deletes multiple tasks from a named queue. +// If a given task could not be deleted, an appengine.MultiError is returned. +func DeleteMulti(c context.Context, tasks []*Task, queueName string) error { + taskNames := make([][]byte, len(tasks)) + for i, t := range tasks { + taskNames[i] = []byte(t.Name) + } + if queueName == "" { + queueName = "default" + } + req := &pb.TaskQueueDeleteRequest{ + QueueName: []byte(queueName), + TaskName: taskNames, + } + res := &pb.TaskQueueDeleteResponse{} + if err := internal.Call(c, "taskqueue", "Delete", req, res); err != nil { + return err + } + if a, b := len(req.TaskName), len(res.Result); a != b { + return fmt.Errorf("taskqueue: internal error: requested deletion of %d tasks, got %d results", a, b) + } + me, any := make(appengine.MultiError, len(res.Result)), false + for i, ec := range res.Result { + if ec != pb.TaskQueueServiceError_OK { + me[i] = &internal.APIError{ + Service: "taskqueue", + Code: int32(ec), + } + any = true + } + } + if any { + return me + } + return nil +} + +func lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) { + if queueName == "" { + queueName = "default" + } + req := &pb.TaskQueueQueryAndOwnTasksRequest{ + QueueName: []byte(queueName), + LeaseSeconds: proto.Float64(float64(leaseTime)), + MaxTasks: proto.Int64(int64(maxTasks)), + GroupByTag: proto.Bool(groupByTag), + Tag: tag, + } + res := &pb.TaskQueueQueryAndOwnTasksResponse{} + if err := internal.Call(c, "taskqueue", "QueryAndOwnTasks", req, res); err != nil { + return nil, err + } + tasks := make([]*Task, len(res.Task)) + for i, t := range res.Task { + tasks[i] = &Task{ + Payload: t.Body, + Name: string(t.TaskName), + Method: "PULL", + ETA: time.Unix(0, *t.EtaUsec*1e3), + RetryCount: *t.RetryCount, + Tag: string(t.Tag), + } + } + return tasks, nil +} + +// Lease leases tasks from a queue. +// leaseTime is in seconds. +// The number of tasks fetched will be at most maxTasks. +func Lease(c context.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) { + return lease(c, maxTasks, queueName, leaseTime, false, nil) +} + +// LeaseByTag leases tasks from a queue, grouped by tag. +// If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA. +// leaseTime is in seconds. +// The number of tasks fetched will be at most maxTasks. +func LeaseByTag(c context.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) { + return lease(c, maxTasks, queueName, leaseTime, true, []byte(tag)) +} + +// Purge removes all tasks from a queue. +func Purge(c context.Context, queueName string) error { + if queueName == "" { + queueName = "default" + } + req := &pb.TaskQueuePurgeQueueRequest{ + QueueName: []byte(queueName), + } + res := &pb.TaskQueuePurgeQueueResponse{} + return internal.Call(c, "taskqueue", "PurgeQueue", req, res) +} + +// ModifyLease modifies the lease of a task. +// Used to request more processing time, or to abandon processing. +// leaseTime is in seconds and must not be negative. +func ModifyLease(c context.Context, task *Task, queueName string, leaseTime int) error { + if queueName == "" { + queueName = "default" + } + req := &pb.TaskQueueModifyTaskLeaseRequest{ + QueueName: []byte(queueName), + TaskName: []byte(task.Name), + EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership. + LeaseSeconds: proto.Float64(float64(leaseTime)), + } + res := &pb.TaskQueueModifyTaskLeaseResponse{} + if err := internal.Call(c, "taskqueue", "ModifyTaskLease", req, res); err != nil { + return err + } + task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3) + return nil +} + +// QueueStatistics represents statistics about a single task queue. +type QueueStatistics struct { + Tasks int // may be an approximation + OldestETA time.Time // zero if there are no pending tasks + + Executed1Minute int // tasks executed in the last minute + InFlight int // tasks executing now + EnforcedRate float64 // requests per second +} + +// QueueStats retrieves statistics about queues. +func QueueStats(c context.Context, queueNames []string) ([]QueueStatistics, error) { + req := &pb.TaskQueueFetchQueueStatsRequest{ + QueueName: make([][]byte, len(queueNames)), + } + for i, q := range queueNames { + if q == "" { + q = "default" + } + req.QueueName[i] = []byte(q) + } + res := &pb.TaskQueueFetchQueueStatsResponse{} + if err := internal.Call(c, "taskqueue", "FetchQueueStats", req, res); err != nil { + return nil, err + } + qs := make([]QueueStatistics, len(res.Queuestats)) + for i, qsg := range res.Queuestats { + qs[i] = QueueStatistics{ + Tasks: int(*qsg.NumTasks), + } + if eta := *qsg.OldestEtaUsec; eta > -1 { + qs[i].OldestETA = time.Unix(0, eta*1e3) + } + if si := qsg.ScannerInfo; si != nil { + qs[i].Executed1Minute = int(*si.ExecutedLastMinute) + qs[i].InFlight = int(si.GetRequestsInFlight()) + qs[i].EnforcedRate = si.GetEnforcedRate() + } + } + return qs, nil +} + +func setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) { + x.Transaction = t +} + +func init() { + internal.RegisterErrorCodeMap("taskqueue", pb.TaskQueueServiceError_ErrorCode_name) + + // Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue. + dsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT) + internal.RegisterTimeoutErrorCode("taskqueue", dsCode) + + // Transaction registration. + internal.RegisterTransactionSetter(setTransaction) + internal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) { + for _, req := range x.AddRequest { + setTransaction(req, t) + } + }) +} diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go new file mode 100644 index 0000000..0c14015 --- /dev/null +++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go @@ -0,0 +1,116 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package taskqueue + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/taskqueue" +) + +func TestAddErrors(t *testing.T) { + var tests = []struct { + err, want error + sameErr bool // if true, should return err exactly + }{ + { + err: &internal.APIError{ + Service: "taskqueue", + Code: int32(pb.TaskQueueServiceError_TASK_ALREADY_EXISTS), + }, + want: ErrTaskAlreadyAdded, + }, + { + err: &internal.APIError{ + Service: "taskqueue", + Code: int32(pb.TaskQueueServiceError_TOMBSTONED_TASK), + }, + want: ErrTaskAlreadyAdded, + }, + { + err: &internal.APIError{ + Service: "taskqueue", + Code: int32(pb.TaskQueueServiceError_UNKNOWN_QUEUE), + }, + want: errors.New("not used"), + sameErr: true, + }, + } + for _, tc := range tests { + c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error { + // don't fill in any of the response + return tc.err + }) + task := &Task{Path: "/worker", Method: "PULL"} + _, err := Add(c, task, "a-queue") + want := tc.want + if tc.sameErr { + want = tc.err + } + if err != want { + t.Errorf("Add with tc.err = %v, got %#v, want = %#v", tc.err, err, want) + } + } +} + +func TestAddMulti(t *testing.T) { + c := aetesting.FakeSingleContext(t, "taskqueue", "BulkAdd", func(req *pb.TaskQueueBulkAddRequest, res *pb.TaskQueueBulkAddResponse) error { + res.Taskresult = []*pb.TaskQueueBulkAddResponse_TaskResult{ + { + Result: pb.TaskQueueServiceError_OK.Enum(), + }, + { + Result: pb.TaskQueueServiceError_TASK_ALREADY_EXISTS.Enum(), + }, + { + Result: pb.TaskQueueServiceError_TOMBSTONED_TASK.Enum(), + }, + { + Result: pb.TaskQueueServiceError_INTERNAL_ERROR.Enum(), + }, + } + return nil + }) + tasks := []*Task{ + {Path: "/worker", Method: "PULL"}, + {Path: "/worker", Method: "PULL"}, + {Path: "/worker", Method: "PULL"}, + {Path: "/worker", Method: "PULL"}, + } + r, err := AddMulti(c, tasks, "a-queue") + if len(r) != len(tasks) { + t.Fatalf("AddMulti returned %d tasks, want %d", len(r), len(tasks)) + } + want := appengine.MultiError{ + nil, + ErrTaskAlreadyAdded, + ErrTaskAlreadyAdded, + &internal.APIError{ + Service: "taskqueue", + Code: int32(pb.TaskQueueServiceError_INTERNAL_ERROR), + }, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("AddMulti got %v, wanted %v", err, want) + } +} + +func TestAddWithEmptyPath(t *testing.T) { + c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error { + if got, want := string(req.Url), "/_ah/queue/a-queue"; got != want { + return fmt.Errorf("req.Url = %q; want %q", got, want) + } + return nil + }) + if _, err := Add(c, &Task{}, "a-queue"); err != nil { + t.Fatalf("Add: %v", err) + } +} diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 0000000..05642a9 --- /dev/null +++ b/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "golang.org/x/net/context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go new file mode 100644 index 0000000..6ffe1e6 --- /dev/null +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -0,0 +1,210 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package urlfetch provides an http.RoundTripper implementation +// for fetching URLs via App Engine's urlfetch service. +package urlfetch // import "google.golang.org/appengine/urlfetch" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/urlfetch" +) + +// Transport is an implementation of http.RoundTripper for +// App Engine. Users should generally create an http.Client using +// this transport and use the Client rather than using this transport +// directly. +type Transport struct { + Context context.Context + + // Controls whether the application checks the validity of SSL certificates + // over HTTPS connections. A value of false (the default) instructs the + // application to send a request to the server only if the certificate is + // valid and signed by a trusted certificate authority (CA), and also + // includes a hostname that matches the certificate. A value of true + // instructs the application to perform no certificate validation. + AllowInvalidServerCertificate bool +} + +// Verify statically that *Transport implements http.RoundTripper. +var _ http.RoundTripper = (*Transport)(nil) + +// Client returns an *http.Client using a default urlfetch Transport. This +// client will have the default deadline of 5 seconds, and will check the +// validity of SSL certificates. +// +// Any deadline of the provided context will be used for requests through this client; +// if the client does not have a deadline then a 5 second default is used. +func Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &Transport{ + Context: ctx, + }, + } +} + +type bodyReader struct { + content []byte + truncated bool + closed bool +} + +// ErrTruncatedBody is the error returned after the final Read() from a +// response's Body if the body has been truncated by App Engine's proxy. +var ErrTruncatedBody = errors.New("urlfetch: truncated body") + +func statusCodeToText(code int) string { + if t := http.StatusText(code); t != "" { + return t + } + return strconv.Itoa(code) +} + +func (br *bodyReader) Read(p []byte) (n int, err error) { + if br.closed { + if br.truncated { + return 0, ErrTruncatedBody + } + return 0, io.EOF + } + n = copy(p, br.content) + if n > 0 { + br.content = br.content[n:] + return + } + if br.truncated { + br.closed = true + return 0, ErrTruncatedBody + } + return 0, io.EOF +} + +func (br *bodyReader) Close() error { + br.closed = true + br.content = nil + return nil +} + +// A map of the URL Fetch-accepted methods that take a request body. +var methodAcceptsRequestBody = map[string]bool{ + "POST": true, + "PUT": true, + "PATCH": true, +} + +// urlString returns a valid string given a URL. This function is necessary because +// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. +// See http://code.google.com/p/go/issues/detail?id=4860. +func urlString(u *url.URL) string { + if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { + return u.String() + } + aux := *u + aux.Opaque = "//" + aux.Host + aux.Opaque + return aux.String() +} + +// RoundTrip issues a single HTTP request and returns its response. Per the +// http.RoundTripper interface, RoundTrip only returns an error if there +// was an unsupported request or the URL Fetch proxy fails. +// Note that HTTP response codes such as 5xx, 403, 404, etc are not +// errors as far as the transport is concerned and will be returned +// with err set to nil. +func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { + methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] + if !ok { + return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) + } + + method := pb.URLFetchRequest_RequestMethod(methNum) + + freq := &pb.URLFetchRequest{ + Method: &method, + Url: proto.String(urlString(req.URL)), + FollowRedirects: proto.Bool(false), // http.Client's responsibility + MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), + } + if deadline, ok := t.Context.Deadline(); ok { + freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) + } + + for k, vals := range req.Header { + for _, val := range vals { + freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ + Key: proto.String(k), + Value: proto.String(val), + }) + } + } + if methodAcceptsRequestBody[req.Method] && req.Body != nil { + // Avoid a []byte copy if req.Body has a Bytes method. + switch b := req.Body.(type) { + case interface { + Bytes() []byte + }: + freq.Payload = b.Bytes() + default: + freq.Payload, err = ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + } + } + + fres := &pb.URLFetchResponse{} + if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { + return nil, err + } + + res = &http.Response{} + res.StatusCode = int(*fres.StatusCode) + res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) + res.Header = make(http.Header) + res.Request = req + + // Faked: + res.ProtoMajor = 1 + res.ProtoMinor = 1 + res.Proto = "HTTP/1.1" + res.Close = true + + for _, h := range fres.Header { + hkey := http.CanonicalHeaderKey(*h.Key) + hval := *h.Value + if hkey == "Content-Length" { + // Will get filled in below for all but HEAD requests. + if req.Method == "HEAD" { + res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) + } + continue + } + res.Header.Add(hkey, hval) + } + + if req.Method != "HEAD" { + res.ContentLength = int64(len(fres.Content)) + } + + truncated := fres.GetContentWasTruncated() + res.Body = &bodyReader{content: fres.Content, truncated: truncated} + return +} + +func init() { + internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) + internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) +} diff --git a/vendor/google.golang.org/appengine/user/oauth.go b/vendor/google.golang.org/appengine/user/oauth.go new file mode 100644 index 0000000..ffad571 --- /dev/null +++ b/vendor/google.golang.org/appengine/user/oauth.go @@ -0,0 +1,52 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package user + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/user" +) + +// CurrentOAuth returns the user associated with the OAuth consumer making this +// request. If the OAuth consumer did not make a valid OAuth request, or the +// scopes is non-empty and the current user does not have at least one of the +// scopes, this method will return an error. +func CurrentOAuth(c context.Context, scopes ...string) (*User, error) { + req := &pb.GetOAuthUserRequest{} + if len(scopes) != 1 || scopes[0] != "" { + // The signature for this function used to be CurrentOAuth(Context, string). + // Ignore the singular "" scope to preserve existing behavior. + req.Scopes = scopes + } + + res := &pb.GetOAuthUserResponse{} + + err := internal.Call(c, "user", "GetOAuthUser", req, res) + if err != nil { + return nil, err + } + return &User{ + Email: *res.Email, + AuthDomain: *res.AuthDomain, + Admin: res.GetIsAdmin(), + ID: *res.UserId, + ClientID: res.GetClientId(), + }, nil +} + +// OAuthConsumerKey returns the OAuth consumer key provided with the current +// request. This method will return an error if the OAuth request was invalid. +func OAuthConsumerKey(c context.Context) (string, error) { + req := &pb.CheckOAuthSignatureRequest{} + res := &pb.CheckOAuthSignatureResponse{} + + err := internal.Call(c, "user", "CheckOAuthSignature", req, res) + if err != nil { + return "", err + } + return *res.OauthConsumerKey, err +} diff --git a/vendor/google.golang.org/appengine/user/user.go b/vendor/google.golang.org/appengine/user/user.go new file mode 100644 index 0000000..eb76f59 --- /dev/null +++ b/vendor/google.golang.org/appengine/user/user.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package user provides a client for App Engine's user authentication service. +package user // import "google.golang.org/appengine/user" + +import ( + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/user" +) + +// User represents a user of the application. +type User struct { + Email string + AuthDomain string + Admin bool + + // ID is the unique permanent ID of the user. + // It is populated if the Email is associated + // with a Google account, or empty otherwise. + ID string + + // ClientID is the ID of the pre-registered client so its identity can be verified. + // See https://developers.google.com/console/help/#generatingoauth2 for more information. + ClientID string + + FederatedIdentity string + FederatedProvider string +} + +// String returns a displayable name for the user. +func (u *User) String() string { + if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) { + return u.Email[:len(u.Email)-len("@"+u.AuthDomain)] + } + if u.FederatedIdentity != "" { + return u.FederatedIdentity + } + return u.Email +} + +// LoginURL returns a URL that, when visited, prompts the user to sign in, +// then redirects the user to the URL specified by dest. +func LoginURL(c context.Context, dest string) (string, error) { + return LoginURLFederated(c, dest, "") +} + +// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier. +func LoginURLFederated(c context.Context, dest, identity string) (string, error) { + req := &pb.CreateLoginURLRequest{ + DestinationUrl: proto.String(dest), + } + if identity != "" { + req.FederatedIdentity = proto.String(identity) + } + res := &pb.CreateLoginURLResponse{} + if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil { + return "", err + } + return *res.LoginUrl, nil +} + +// LogoutURL returns a URL that, when visited, signs the user out, +// then redirects the user to the URL specified by dest. +func LogoutURL(c context.Context, dest string) (string, error) { + req := &pb.CreateLogoutURLRequest{ + DestinationUrl: proto.String(dest), + } + res := &pb.CreateLogoutURLResponse{} + if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil { + return "", err + } + return *res.LogoutUrl, nil +} + +func init() { + internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/user/user_classic.go b/vendor/google.golang.org/appengine/user/user_classic.go new file mode 100644 index 0000000..a747ef3 --- /dev/null +++ b/vendor/google.golang.org/appengine/user/user_classic.go @@ -0,0 +1,35 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package user + +import ( + "appengine/user" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +func Current(ctx context.Context) *User { + u := user.Current(internal.ClassicContextFromContext(ctx)) + if u == nil { + return nil + } + // Map appengine/user.User to this package's User type. + return &User{ + Email: u.Email, + AuthDomain: u.AuthDomain, + Admin: u.Admin, + ID: u.ID, + FederatedIdentity: u.FederatedIdentity, + FederatedProvider: u.FederatedProvider, + } +} + +func IsAdmin(ctx context.Context) bool { + return user.IsAdmin(internal.ClassicContextFromContext(ctx)) +} diff --git a/vendor/google.golang.org/appengine/user/user_test.go b/vendor/google.golang.org/appengine/user/user_test.go new file mode 100644 index 0000000..5fc5957 --- /dev/null +++ b/vendor/google.golang.org/appengine/user/user_test.go @@ -0,0 +1,99 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package user + +import ( + "fmt" + "net/http" + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/user" +) + +func baseReq() *http.Request { + return &http.Request{ + Header: http.Header{}, + } +} + +type basicUserTest struct { + nickname, email, authDomain, admin string + // expectations + isNil, isAdmin bool + displayName string +} + +var basicUserTests = []basicUserTest{ + {"", "", "", "0", true, false, ""}, + {"ken", "ken@example.com", "example.com", "0", false, false, "ken"}, + {"ken", "ken@example.com", "auth_domain.com", "1", false, true, "ken@example.com"}, +} + +func TestBasicUserAPI(t *testing.T) { + for i, tc := range basicUserTests { + req := baseReq() + req.Header.Set("X-AppEngine-User-Nickname", tc.nickname) + req.Header.Set("X-AppEngine-User-Email", tc.email) + req.Header.Set("X-AppEngine-Auth-Domain", tc.authDomain) + req.Header.Set("X-AppEngine-User-Is-Admin", tc.admin) + + c := internal.ContextForTesting(req) + + if ga := IsAdmin(c); ga != tc.isAdmin { + t.Errorf("test %d: expected IsAdmin(c) = %v, got %v", i, tc.isAdmin, ga) + } + + u := Current(c) + if tc.isNil { + if u != nil { + t.Errorf("test %d: expected u == nil, got %+v", i, u) + } + continue + } + if u == nil { + t.Errorf("test %d: expected u != nil, got nil", i) + continue + } + if u.Email != tc.email { + t.Errorf("test %d: expected u.Email = %q, got %q", i, tc.email, u.Email) + } + if gs := u.String(); gs != tc.displayName { + t.Errorf("test %d: expected u.String() = %q, got %q", i, tc.displayName, gs) + } + if u.Admin != tc.isAdmin { + t.Errorf("test %d: expected u.Admin = %v, got %v", i, tc.isAdmin, u.Admin) + } + } +} + +func TestLoginURL(t *testing.T) { + expectedQuery := &pb.CreateLoginURLRequest{ + DestinationUrl: proto.String("/destination"), + } + const expectedDest = "/redir/dest" + c := aetesting.FakeSingleContext(t, "user", "CreateLoginURL", func(req *pb.CreateLoginURLRequest, res *pb.CreateLoginURLResponse) error { + if !proto.Equal(req, expectedQuery) { + return fmt.Errorf("got %v, want %v", req, expectedQuery) + } + res.LoginUrl = proto.String(expectedDest) + return nil + }) + + url, err := LoginURL(c, "/destination") + if err != nil { + t.Fatalf("LoginURL failed: %v", err) + } + if url != expectedDest { + t.Errorf("got %v, want %v", url, expectedDest) + } +} + +// TODO(dsymonds): Add test for LogoutURL. diff --git a/vendor/google.golang.org/appengine/user/user_vm.go b/vendor/google.golang.org/appengine/user/user_vm.go new file mode 100644 index 0000000..8dc672e --- /dev/null +++ b/vendor/google.golang.org/appengine/user/user_vm.go @@ -0,0 +1,38 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package user + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Current returns the currently logged-in user, +// or nil if the user is not signed in. +func Current(c context.Context) *User { + h := internal.IncomingHeaders(c) + u := &User{ + Email: h.Get("X-AppEngine-User-Email"), + AuthDomain: h.Get("X-AppEngine-Auth-Domain"), + ID: h.Get("X-AppEngine-User-Id"), + Admin: h.Get("X-AppEngine-User-Is-Admin") == "1", + FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"), + FederatedProvider: h.Get("X-AppEngine-Federated-Provider"), + } + if u.Email == "" && u.FederatedIdentity == "" { + return nil + } + return u +} + +// IsAdmin returns true if the current user is signed in and +// is currently registered as an administrator of the application. +func IsAdmin(c context.Context) bool { + h := internal.IncomingHeaders(c) + return h.Get("X-AppEngine-User-Is-Admin") == "1" +} diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp.go b/vendor/google.golang.org/appengine/xmpp/xmpp.go new file mode 100644 index 0000000..3a561fd --- /dev/null +++ b/vendor/google.golang.org/appengine/xmpp/xmpp.go @@ -0,0 +1,253 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package xmpp provides the means to send and receive instant messages +to and from users of XMPP-compatible services. + +To send a message, + m := &xmpp.Message{ + To: []string{"kaylee@example.com"}, + Body: `Hi! How's the carrot?`, + } + err := m.Send(c) + +To receive messages, + func init() { + xmpp.Handle(handleChat) + } + + func handleChat(c context.Context, m *xmpp.Message) { + // ... + } +*/ +package xmpp // import "google.golang.org/appengine/xmpp" + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/xmpp" +) + +// Message represents an incoming chat message. +type Message struct { + // Sender is the JID of the sender. + // Optional for outgoing messages. + Sender string + + // To is the intended recipients of the message. + // Incoming messages will have exactly one element. + To []string + + // Body is the body of the message. + Body string + + // Type is the message type, per RFC 3921. + // It defaults to "chat". + Type string + + // RawXML is whether the body contains raw XML. + RawXML bool +} + +// Presence represents an outgoing presence update. +type Presence struct { + // Sender is the JID (optional). + Sender string + + // The intended recipient of the presence update. + To string + + // Type, per RFC 3921 (optional). Defaults to "available". + Type string + + // State of presence (optional). + // Valid values: "away", "chat", "xa", "dnd" (RFC 3921). + State string + + // Free text status message (optional). + Status string +} + +var ( + ErrPresenceUnavailable = errors.New("xmpp: presence unavailable") + ErrInvalidJID = errors.New("xmpp: invalid JID") +) + +// Handle arranges for f to be called for incoming XMPP messages. +// Only messages of type "chat" or "normal" will be handled. +func Handle(f func(c context.Context, m *Message)) { + http.HandleFunc("/_ah/xmpp/message/chat/", func(_ http.ResponseWriter, r *http.Request) { + f(appengine.NewContext(r), &Message{ + Sender: r.FormValue("from"), + To: []string{r.FormValue("to")}, + Body: r.FormValue("body"), + }) + }) +} + +// Send sends a message. +// If any failures occur with specific recipients, the error will be an appengine.MultiError. +func (m *Message) Send(c context.Context) error { + req := &pb.XmppMessageRequest{ + Jid: m.To, + Body: &m.Body, + RawXml: &m.RawXML, + } + if m.Type != "" && m.Type != "chat" { + req.Type = &m.Type + } + if m.Sender != "" { + req.FromJid = &m.Sender + } + res := &pb.XmppMessageResponse{} + if err := internal.Call(c, "xmpp", "SendMessage", req, res); err != nil { + return err + } + + if len(res.Status) != len(req.Jid) { + return fmt.Errorf("xmpp: sent message to %d JIDs, but only got %d statuses back", len(req.Jid), len(res.Status)) + } + me, any := make(appengine.MultiError, len(req.Jid)), false + for i, st := range res.Status { + if st != pb.XmppMessageResponse_NO_ERROR { + me[i] = errors.New(st.String()) + any = true + } + } + if any { + return me + } + return nil +} + +// Invite sends an invitation. If the from address is an empty string +// the default (yourapp@appspot.com/bot) will be used. +func Invite(c context.Context, to, from string) error { + req := &pb.XmppInviteRequest{ + Jid: &to, + } + if from != "" { + req.FromJid = &from + } + res := &pb.XmppInviteResponse{} + return internal.Call(c, "xmpp", "SendInvite", req, res) +} + +// Send sends a presence update. +func (p *Presence) Send(c context.Context) error { + req := &pb.XmppSendPresenceRequest{ + Jid: &p.To, + } + if p.State != "" { + req.Show = &p.State + } + if p.Type != "" { + req.Type = &p.Type + } + if p.Sender != "" { + req.FromJid = &p.Sender + } + if p.Status != "" { + req.Status = &p.Status + } + res := &pb.XmppSendPresenceResponse{} + return internal.Call(c, "xmpp", "SendPresence", req, res) +} + +var presenceMap = map[pb.PresenceResponse_SHOW]string{ + pb.PresenceResponse_NORMAL: "", + pb.PresenceResponse_AWAY: "away", + pb.PresenceResponse_DO_NOT_DISTURB: "dnd", + pb.PresenceResponse_CHAT: "chat", + pb.PresenceResponse_EXTENDED_AWAY: "xa", +} + +// GetPresence retrieves a user's presence. +// If the from address is an empty string the default +// (yourapp@appspot.com/bot) will be used. +// Possible return values are "", "away", "dnd", "chat", "xa". +// ErrPresenceUnavailable is returned if the presence is unavailable. +func GetPresence(c context.Context, to string, from string) (string, error) { + req := &pb.PresenceRequest{ + Jid: &to, + } + if from != "" { + req.FromJid = &from + } + res := &pb.PresenceResponse{} + if err := internal.Call(c, "xmpp", "GetPresence", req, res); err != nil { + return "", err + } + if !*res.IsAvailable || res.Presence == nil { + return "", ErrPresenceUnavailable + } + presence, ok := presenceMap[*res.Presence] + if ok { + return presence, nil + } + return "", fmt.Errorf("xmpp: unknown presence %v", *res.Presence) +} + +// GetPresenceMulti retrieves multiple users' presence. +// If the from address is an empty string the default +// (yourapp@appspot.com/bot) will be used. +// Possible return values are "", "away", "dnd", "chat", "xa". +// If any presence is unavailable, an appengine.MultiError is returned +func GetPresenceMulti(c context.Context, to []string, from string) ([]string, error) { + req := &pb.BulkPresenceRequest{ + Jid: to, + } + if from != "" { + req.FromJid = &from + } + res := &pb.BulkPresenceResponse{} + + if err := internal.Call(c, "xmpp", "BulkGetPresence", req, res); err != nil { + return nil, err + } + + presences := make([]string, 0, len(res.PresenceResponse)) + errs := appengine.MultiError{} + + addResult := func(presence string, err error) { + presences = append(presences, presence) + errs = append(errs, err) + } + + anyErr := false + for _, subres := range res.PresenceResponse { + if !subres.GetValid() { + anyErr = true + addResult("", ErrInvalidJID) + continue + } + if !*subres.IsAvailable || subres.Presence == nil { + anyErr = true + addResult("", ErrPresenceUnavailable) + continue + } + presence, ok := presenceMap[*subres.Presence] + if ok { + addResult(presence, nil) + } else { + anyErr = true + addResult("", fmt.Errorf("xmpp: unknown presence %q", *subres.Presence)) + } + } + if anyErr { + return presences, errs + } + return presences, nil +} + +func init() { + internal.RegisterErrorCodeMap("xmpp", pb.XmppServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp_test.go b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go new file mode 100644 index 0000000..c3030d3 --- /dev/null +++ b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go @@ -0,0 +1,173 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package xmpp + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/xmpp" +) + +func newPresenceResponse(isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) *pb.PresenceResponse { + return &pb.PresenceResponse{ + IsAvailable: proto.Bool(isAvailable), + Presence: presence.Enum(), + Valid: proto.Bool(valid), + } +} + +func setPresenceResponse(m *pb.PresenceResponse, isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) { + m.IsAvailable = &isAvailable + m.Presence = presence.Enum() + m.Valid = &valid +} + +func TestGetPresence(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "GetPresence", func(in *pb.PresenceRequest, out *pb.PresenceResponse) error { + if jid := in.GetJid(); jid != "user@example.com" { + return fmt.Errorf("bad jid %q", jid) + } + setPresenceResponse(out, true, pb.PresenceResponse_CHAT, true) + return nil + }) + + presence, err := GetPresence(c, "user@example.com", "") + if err != nil { + t.Fatalf("GetPresence: %v", err) + } + + if presence != "chat" { + t.Errorf("GetPresence: got %#v, want %#v", presence, pb.PresenceResponse_CHAT) + } +} + +func TestGetPresenceMultiSingleJID(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(true, pb.PresenceResponse_NORMAL, true), + } + return nil + }) + + presence, err := GetPresenceMulti(c, []string{"user@example.com"}, "") + if err != nil { + t.Fatalf("GetPresenceMulti: %v", err) + } + if !reflect.DeepEqual(presence, []string{""}) { + t.Errorf("GetPresenceMulti: got %s, want %s", presence, []string{""}) + } +} + +func TestGetPresenceMultiJID(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(true, pb.PresenceResponse_NORMAL, true), + newPresenceResponse(true, pb.PresenceResponse_AWAY, true), + } + return nil + }) + + jids := []string{"user@example.com", "user2@example.com"} + presence, err := GetPresenceMulti(c, jids, "") + if err != nil { + t.Fatalf("GetPresenceMulti: %v", err) + } + want := []string{"", "away"} + if !reflect.DeepEqual(presence, want) { + t.Errorf("GetPresenceMulti: got %v, want %v", presence, want) + } +} + +func TestGetPresenceMultiFromJID(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + if jid := in.GetFromJid(); jid != "bot@appspot.com" { + return fmt.Errorf("bad from jid %q", jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(true, pb.PresenceResponse_NORMAL, true), + newPresenceResponse(true, pb.PresenceResponse_CHAT, true), + } + return nil + }) + + jids := []string{"user@example.com", "user2@example.com"} + presence, err := GetPresenceMulti(c, jids, "bot@appspot.com") + if err != nil { + t.Fatalf("GetPresenceMulti: %v", err) + } + want := []string{"", "chat"} + if !reflect.DeepEqual(presence, want) { + t.Errorf("GetPresenceMulti: got %v, want %v", presence, want) + } +} + +func TestGetPresenceMultiInvalid(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(true, pb.PresenceResponse_EXTENDED_AWAY, true), + newPresenceResponse(true, pb.PresenceResponse_CHAT, false), + } + return nil + }) + + jids := []string{"user@example.com", "user2@example.com"} + presence, err := GetPresenceMulti(c, jids, "") + + wantErr := appengine.MultiError{nil, ErrInvalidJID} + if !reflect.DeepEqual(err, wantErr) { + t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr) + } + + want := []string{"xa", ""} + if !reflect.DeepEqual(presence, want) { + t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want) + } +} + +func TestGetPresenceMultiUnavailable(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(false, pb.PresenceResponse_AWAY, true), + newPresenceResponse(false, pb.PresenceResponse_DO_NOT_DISTURB, true), + } + return nil + }) + + jids := []string{"user@example.com", "user2@example.com"} + presence, err := GetPresenceMulti(c, jids, "") + + wantErr := appengine.MultiError{ + ErrPresenceUnavailable, + ErrPresenceUnavailable, + } + if !reflect.DeepEqual(err, wantErr) { + t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr) + } + want := []string{"", ""} + if !reflect.DeepEqual(presence, want) { + t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want) + } +}